diff options
| author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-05-24 08:28:08 +0000 |
|---|---|---|
| committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2016-05-24 08:28:08 +0000 |
| commit | a4e969f4965059196ca948db781e52f7cfebf19e (patch) | |
| tree | 6ca352808c8fdc52006a0f33f6ae3c593b23867d /Source/JavaScriptCore/llint | |
| parent | 41386e9cb918eed93b3f13648cbef387e371e451 (diff) | |
| download | WebKitGtk-tarball-a4e969f4965059196ca948db781e52f7cfebf19e.tar.gz | |
webkitgtk-2.12.3webkitgtk-2.12.3
Diffstat (limited to 'Source/JavaScriptCore/llint')
21 files changed, 3697 insertions, 2673 deletions
diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.cpp b/Source/JavaScriptCore/llint/LLIntCLoop.cpp index 18c2b99c1..e3c6c6ce9 100644 --- a/Source/JavaScriptCore/llint/LLIntCLoop.cpp +++ b/Source/JavaScriptCore/llint/LLIntCLoop.cpp @@ -26,7 +26,7 @@ #include "config.h" #include "LLIntCLoop.h" -#if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) #include "LLIntData.h" @@ -35,10 +35,10 @@ namespace LLInt { void CLoop::initialize() { - execute(0, getOpcode(llint_unused), true); + execute(llint_entry, 0, 0, 0, true); } } // namespace LLInt } // namespace JSC -#endif // ENABLE(LLINT_C_LOOP) +#endif // !ENABLE(JIT) diff --git a/Source/JavaScriptCore/llint/LLIntCLoop.h b/Source/JavaScriptCore/llint/LLIntCLoop.h index 8759571f3..8782b369c 100644 --- a/Source/JavaScriptCore/llint/LLIntCLoop.h +++ b/Source/JavaScriptCore/llint/LLIntCLoop.h @@ -26,27 +26,26 @@ #ifndef LLIntCLoop_h #define LLIntCLoop_h -#if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) #include "CallFrame.h" #include "JSCJSValue.h" #include "Opcode.h" +#include "ProtoCallFrame.h" namespace JSC { namespace LLInt { -const OpcodeID llint_unused = llint_end; - class CLoop { public: static void initialize(); - static JSValue execute(CallFrame*, Opcode entryOpcode, bool isInitializationPass = false); + static JSValue execute(OpcodeID entryOpcodeID, void* executableAddress, VM*, ProtoCallFrame*, bool isInitializationPass = false); }; } } // namespace JSC::LLInt using JSC::LLInt::CLoop; -#endif // ENABLE(LLINT_C_LOOP) +#endif // !ENABLE(JIT) #endif // LLIntCLoop_h diff --git a/Source/JavaScriptCore/llint/LLIntData.cpp b/Source/JavaScriptCore/llint/LLIntData.cpp index ac77836c0..77b5614fc 100644 --- a/Source/JavaScriptCore/llint/LLIntData.cpp +++ b/Source/JavaScriptCore/llint/LLIntData.cpp @@ -25,39 +25,42 @@ #include "config.h" #include "LLIntData.h" - -#if ENABLE(LLINT) - #include "BytecodeConventions.h" +#include "CodeBlock.h" #include "CodeType.h" #include "Instruction.h" #include "JSScope.h" #include "LLIntCLoop.h" +#include "MaxFrameExtentForSlowPathCall.h" #include "Opcode.h" #include "PropertyOffset.h" +#include "WriteBarrier.h" + +#define STATIC_ASSERT(cond) static_assert(cond, "LLInt assumes " #cond) namespace JSC { namespace LLInt { Instruction* Data::s_exceptionInstructions = 0; -Opcode* Data::s_opcodeMap = 0; +Opcode Data::s_opcodeMap[numOpcodeIDs] = { }; + +#if ENABLE(JIT) +extern "C" void llint_entry(void*); +#endif void initialize() { Data::s_exceptionInstructions = new Instruction[maxOpcodeLength + 1]; - Data::s_opcodeMap = new Opcode[numOpcodeIDs]; - #if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) CLoop::initialize(); - #else // !ENABLE(LLINT_C_LOOP) +#else // ENABLE(JIT) + llint_entry(&Data::s_opcodeMap); + for (int i = 0; i < maxOpcodeLength + 1; ++i) Data::s_exceptionInstructions[i].u.pointer = LLInt::getCodePtr(llint_throw_from_slow_path_trampoline); - #define OPCODE_ENTRY(opcode, length) \ - Data::s_opcodeMap[opcode] = LLInt::getCodePtr(llint_##opcode); - FOR_EACH_OPCODE_ID(OPCODE_ENTRY); - #undef OPCODE_ENTRY - #endif // !ENABLE(LLINT_C_LOOP) +#endif // ENABLE(JIT) } #if COMPILER(CLANG) @@ -71,29 +74,27 @@ void Data::performAssertions(VM& vm) // Assertions to match LowLevelInterpreter.asm. If you change any of this code, be // prepared to change LowLevelInterpreter.asm as well!! -#ifndef NDEBUG #if USE(JSVALUE64) const ptrdiff_t PtrSize = 8; - const ptrdiff_t CallFrameHeaderSlots = 6; + const ptrdiff_t CallFrameHeaderSlots = 5; #else // USE(JSVALUE64) // i.e. 32-bit version const ptrdiff_t PtrSize = 4; - const ptrdiff_t CallFrameHeaderSlots = 5; + const ptrdiff_t CallFrameHeaderSlots = 4; #endif const ptrdiff_t SlotSize = 8; -#endif - ASSERT(sizeof(void*) == PtrSize); - ASSERT(sizeof(Register) == SlotSize); - ASSERT(JSStack::CallFrameHeaderSize == CallFrameHeaderSlots); + STATIC_ASSERT(sizeof(void*) == PtrSize); + STATIC_ASSERT(sizeof(Register) == SlotSize); + STATIC_ASSERT(JSStack::CallFrameHeaderSize == CallFrameHeaderSlots); ASSERT(!CallFrame::callerFrameOffset()); + STATIC_ASSERT(JSStack::CallerFrameAndPCSize == (PtrSize * 2) / SlotSize); ASSERT(CallFrame::returnPCOffset() == CallFrame::callerFrameOffset() + PtrSize); ASSERT(JSStack::CodeBlock * sizeof(Register) == CallFrame::returnPCOffset() + PtrSize); - ASSERT(JSStack::ScopeChain * sizeof(Register) == JSStack::CodeBlock * sizeof(Register) + SlotSize); - ASSERT(JSStack::Callee * sizeof(Register) == JSStack::ScopeChain * sizeof(Register) + SlotSize); - ASSERT(JSStack::ArgumentCount * sizeof(Register) == JSStack::Callee * sizeof(Register) + SlotSize); - ASSERT(JSStack::ThisArgument * sizeof(Register) == JSStack::ArgumentCount * sizeof(Register) + SlotSize); - ASSERT(JSStack::CallFrameHeaderSize == JSStack::ThisArgument); + STATIC_ASSERT(JSStack::Callee * sizeof(Register) == JSStack::CodeBlock * sizeof(Register) + SlotSize); + STATIC_ASSERT(JSStack::ArgumentCount * sizeof(Register) == JSStack::Callee * sizeof(Register) + SlotSize); + STATIC_ASSERT(JSStack::ThisArgument * sizeof(Register) == JSStack::ArgumentCount * sizeof(Register) + SlotSize); + STATIC_ASSERT(JSStack::CallFrameHeaderSize == JSStack::ThisArgument); ASSERT(CallFrame::argumentOffsetIncludingThis(0) == JSStack::ThisArgument); @@ -105,46 +106,89 @@ void Data::performAssertions(VM& vm) ASSERT(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) == 0); #endif #if USE(JSVALUE32_64) - ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1)); - ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2)); - ASSERT(JSValue::NullTag == static_cast<unsigned>(-3)); - ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4)); - ASSERT(JSValue::CellTag == static_cast<unsigned>(-5)); - ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6)); - ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7)); - ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7)); + STATIC_ASSERT(JSValue::Int32Tag == static_cast<unsigned>(-1)); + STATIC_ASSERT(JSValue::BooleanTag == static_cast<unsigned>(-2)); + STATIC_ASSERT(JSValue::NullTag == static_cast<unsigned>(-3)); + STATIC_ASSERT(JSValue::UndefinedTag == static_cast<unsigned>(-4)); + STATIC_ASSERT(JSValue::CellTag == static_cast<unsigned>(-5)); + STATIC_ASSERT(JSValue::EmptyValueTag == static_cast<unsigned>(-6)); + STATIC_ASSERT(JSValue::DeletedValueTag == static_cast<unsigned>(-7)); + STATIC_ASSERT(JSValue::LowestTag == static_cast<unsigned>(-7)); #else - ASSERT(TagBitTypeOther == 0x2); - ASSERT(TagBitBool == 0x4); - ASSERT(TagBitUndefined == 0x8); - ASSERT(ValueEmpty == 0x0); - ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool)); - ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1)); - ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined)); - ASSERT(ValueNull == TagBitTypeOther); + STATIC_ASSERT(TagBitTypeOther == 0x2); + STATIC_ASSERT(TagBitBool == 0x4); + STATIC_ASSERT(TagBitUndefined == 0x8); + STATIC_ASSERT(ValueEmpty == 0x0); + STATIC_ASSERT(ValueFalse == (TagBitTypeOther | TagBitBool)); + STATIC_ASSERT(ValueTrue == (TagBitTypeOther | TagBitBool | 1)); + STATIC_ASSERT(ValueUndefined == (TagBitTypeOther | TagBitUndefined)); + STATIC_ASSERT(ValueNull == TagBitTypeOther); #endif - ASSERT(StringType == 5); - ASSERT(ObjectType == 17); - ASSERT(FinalObjectType == 18); - ASSERT(MasqueradesAsUndefined == 1); - ASSERT(ImplementsHasInstance == 2); - ASSERT(ImplementsDefaultHasInstance == 8); - ASSERT(FirstConstantRegisterIndex == 0x40000000); - ASSERT(GlobalCode == 0); - ASSERT(EvalCode == 1); - ASSERT(FunctionCode == 2); - - ASSERT(GlobalProperty == 0); - ASSERT(GlobalVar == 1); - ASSERT(ClosureVar == 2); - ASSERT(GlobalPropertyWithVarInjectionChecks == 3); - ASSERT(GlobalVarWithVarInjectionChecks == 4); - ASSERT(ClosureVarWithVarInjectionChecks == 5); - ASSERT(Dynamic == 6); +#if (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) || !ENABLE(JIT) + STATIC_ASSERT(!maxFrameExtentForSlowPathCall); +#elif CPU(ARM) || CPU(SH4) + STATIC_ASSERT(maxFrameExtentForSlowPathCall == 24); +#elif CPU(X86) || CPU(MIPS) + STATIC_ASSERT(maxFrameExtentForSlowPathCall == 40); +#elif CPU(X86_64) && OS(WINDOWS) + STATIC_ASSERT(maxFrameExtentForSlowPathCall == 64); +#endif + +#if !ENABLE(JIT) || USE(JSVALUE32_64) + ASSERT(!CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters()); +#elif (CPU(X86_64) && !OS(WINDOWS)) || CPU(ARM64) + ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3); +#elif (CPU(X86_64) && OS(WINDOWS)) + ASSERT(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters() == 3); +#endif + + STATIC_ASSERT(StringType == 6); + STATIC_ASSERT(SymbolType == 7); + STATIC_ASSERT(ObjectType == 21); + STATIC_ASSERT(FinalObjectType == 22); + STATIC_ASSERT(MasqueradesAsUndefined == 1); + STATIC_ASSERT(ImplementsDefaultHasInstance == 2); + STATIC_ASSERT(FirstConstantRegisterIndex == 0x40000000); + STATIC_ASSERT(GlobalCode == 0); + STATIC_ASSERT(EvalCode == 1); + STATIC_ASSERT(FunctionCode == 2); + STATIC_ASSERT(ModuleCode == 3); + + ASSERT(!(reinterpret_cast<ptrdiff_t>((reinterpret_cast<WriteBarrier<JSCell>*>(0x4000)->slot())) - 0x4000)); + static_assert(PutByIdPrimaryTypeMask == 0x6, "LLInt assumes PutByIdPrimaryTypeMask is == 0x6"); + static_assert(PutByIdPrimaryTypeSecondary == 0x0, "LLInt assumes PutByIdPrimaryTypeSecondary is == 0x0"); + static_assert(PutByIdPrimaryTypeObjectWithStructure == 0x2, "LLInt assumes PutByIdPrimaryTypeObjectWithStructure is == 0x2"); + static_assert(PutByIdPrimaryTypeObjectWithStructureOrOther == 0x4, "LLInt assumes PutByIdPrimaryTypeObjectWithStructureOrOther is == 0x4"); + static_assert(PutByIdSecondaryTypeMask == -0x8, "LLInt assumes PutByIdSecondaryTypeMask is == -0x8"); + static_assert(PutByIdSecondaryTypeBottom == 0x0, "LLInt assumes PutByIdSecondaryTypeBottom is == 0x0"); + static_assert(PutByIdSecondaryTypeBoolean == 0x8, "LLInt assumes PutByIdSecondaryTypeBoolean is == 0x8"); + static_assert(PutByIdSecondaryTypeOther == 0x10, "LLInt assumes PutByIdSecondaryTypeOther is == 0x10"); + static_assert(PutByIdSecondaryTypeInt32 == 0x18, "LLInt assumes PutByIdSecondaryTypeInt32 is == 0x18"); + static_assert(PutByIdSecondaryTypeNumber == 0x20, "LLInt assumes PutByIdSecondaryTypeNumber is == 0x20"); + static_assert(PutByIdSecondaryTypeString == 0x28, "LLInt assumes PutByIdSecondaryTypeString is == 0x28"); + static_assert(PutByIdSecondaryTypeSymbol == 0x30, "LLInt assumes PutByIdSecondaryTypeSymbol is == 0x30"); + static_assert(PutByIdSecondaryTypeObject == 0x38, "LLInt assumes PutByIdSecondaryTypeObject is == 0x38"); + static_assert(PutByIdSecondaryTypeObjectOrOther == 0x40, "LLInt assumes PutByIdSecondaryTypeObjectOrOther is == 0x40"); + static_assert(PutByIdSecondaryTypeTop == 0x48, "LLInt assumes PutByIdSecondaryTypeTop is == 0x48"); + + static_assert(GlobalProperty == 0, "LLInt assumes GlobalProperty ResultType is == 0"); + static_assert(GlobalVar == 1, "LLInt assumes GlobalVar ResultType is == 1"); + static_assert(GlobalLexicalVar == 2, "LLInt assumes GlobalLexicalVar ResultType is == 2"); + static_assert(ClosureVar == 3, "LLInt assumes ClosureVar ResultType is == 3"); + static_assert(LocalClosureVar == 4, "LLInt assumes LocalClosureVar ResultType is == 4"); + static_assert(ModuleVar == 5, "LLInt assumes ModuleVar ResultType is == 5"); + static_assert(GlobalPropertyWithVarInjectionChecks == 6, "LLInt assumes GlobalPropertyWithVarInjectionChecks ResultType is == 6"); + static_assert(GlobalVarWithVarInjectionChecks == 7, "LLInt assumes GlobalVarWithVarInjectionChecks ResultType is == 7"); + static_assert(GlobalLexicalVarWithVarInjectionChecks == 8, "LLInt assumes GlobalLexicalVarWithVarInjectionChecks ResultType is == 8"); + static_assert(ClosureVarWithVarInjectionChecks == 9, "LLInt assumes ClosureVarWithVarInjectionChecks ResultType is == 9"); + + static_assert(InitializationMode::Initialization == 0, "LLInt assumes that InitializationMode::Initialization is 0"); - ASSERT(ResolveModeAndType::mask == 0xffff); + STATIC_ASSERT(GetPutInfo::typeBits == 0x3ff); + STATIC_ASSERT(GetPutInfo::initializationShift == 10); + STATIC_ASSERT(GetPutInfo::initializationBits == 0xffc00); - ASSERT(MarkedBlock::blockMask == ~static_cast<decltype(MarkedBlock::blockMask)>(0xffff)); + STATIC_ASSERT(MarkedBlock::blockMask == ~static_cast<decltype(MarkedBlock::blockMask)>(0x3fff)); // FIXME: make these assertions less horrible. #if !ASSERT_DISABLED @@ -154,12 +198,10 @@ void Data::performAssertions(VM& vm) ASSERT(bitwise_cast<int**>(&testVector)[0] == testVector.begin()); #endif - ASSERT(StringImpl::s_hashFlag8BitBuffer == 32); + ASSERT(StringImpl::s_hashFlag8BitBuffer == 8); } #if COMPILER(CLANG) #pragma clang diagnostic pop #endif } } // namespace JSC::LLInt - -#endif // ENABLE(LLINT) diff --git a/Source/JavaScriptCore/llint/LLIntData.h b/Source/JavaScriptCore/llint/LLIntData.h index 8ed2bceda..7e7794b14 100644 --- a/Source/JavaScriptCore/llint/LLIntData.h +++ b/Source/JavaScriptCore/llint/LLIntData.h @@ -28,14 +28,13 @@ #include "JSCJSValue.h" #include "Opcode.h" -#include <wtf/Platform.h> namespace JSC { class VM; struct Instruction; -#if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) typedef OpcodeID LLIntCode; #else typedef void (*LLIntCode)(); @@ -43,15 +42,13 @@ typedef void (*LLIntCode)(); namespace LLInt { -#if ENABLE(LLINT) - class Data { public: static void performAssertions(VM&); private: static Instruction* s_exceptionInstructions; - static Opcode* s_opcodeMap; + static Opcode s_opcodeMap[numOpcodeIDs]; friend void initialize(); @@ -87,33 +84,12 @@ ALWAYS_INLINE void* getCodePtr(OpcodeID id) return reinterpret_cast<void*>(getOpcode(id)); } -#else // !ENABLE(LLINT) - -#if COMPILER(CLANG) -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wmissing-noreturn" -#endif - -class Data { -public: - static void performAssertions(VM&) { } -}; - -#if COMPILER(CLANG) -#pragma clang diagnostic pop -#endif - -#endif // !ENABLE(LLINT) - -ALWAYS_INLINE void* getOpcode(void llintOpcode()) -{ - return bitwise_cast<void*>(llintOpcode); -} - -ALWAYS_INLINE void* getCodePtr(void glueHelper()) +#if ENABLE(JIT) +ALWAYS_INLINE LLIntCode getCodeFunctionPtr(OpcodeID codeId) { - return bitwise_cast<void*>(glueHelper); + return reinterpret_cast<LLIntCode>(getCodePtr(codeId)); } +#endif ALWAYS_INLINE void* getCodePtr(JSC::EncodedJSValue glueHelper()) { diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp index 993ec67b9..f5918b721 100644 --- a/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp +++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.cpp @@ -25,14 +25,15 @@ #include "config.h" #include "LLIntEntrypoint.h" - -#if ENABLE(LLINT) - #include "CodeBlock.h" +#include "HeapInlines.h" #include "JITCode.h" +#include "JSCellInlines.h" #include "JSObject.h" #include "LLIntThunks.h" #include "LowLevelInterpreter.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "StackAlignment.h" #include "VM.h" namespace JSC { namespace LLInt { @@ -45,14 +46,12 @@ static void setFunctionEntrypoint(VM& vm, CodeBlock* codeBlock) if (vm.canUseJIT()) { if (kind == CodeForCall) { codeBlock->setJITCode( - adoptRef(new DirectJITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), JITCode::InterpreterThunk)), - vm.getCTIStub(functionForCallArityCheckThunkGenerator).code()); + adoptRef(new DirectJITCode(vm.getCTIStub(functionForCallEntryThunkGenerator), vm.getCTIStub(functionForCallArityCheckThunkGenerator).code(), JITCode::InterpreterThunk))); return; } ASSERT(kind == CodeForConstruct); codeBlock->setJITCode( - adoptRef(new DirectJITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), JITCode::InterpreterThunk)), - vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code()); + adoptRef(new DirectJITCode(vm.getCTIStub(functionForConstructEntryThunkGenerator), vm.getCTIStub(functionForConstructArityCheckThunkGenerator).code(), JITCode::InterpreterThunk))); return; } #endif // ENABLE(JIT) @@ -60,14 +59,12 @@ static void setFunctionEntrypoint(VM& vm, CodeBlock* codeBlock) UNUSED_PARAM(vm); if (kind == CodeForCall) { codeBlock->setJITCode( - adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), JITCode::InterpreterThunk)), - MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check)); + adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_call_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_call_arity_check), JITCode::InterpreterThunk))); return; } ASSERT(kind == CodeForConstruct); codeBlock->setJITCode( - adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), JITCode::InterpreterThunk)), - MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check)); + adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_function_for_construct_prologue), MacroAssemblerCodePtr::createLLIntCodePtr(llint_function_for_construct_arity_check), JITCode::InterpreterThunk))); } static void setEvalEntrypoint(VM& vm, CodeBlock* codeBlock) @@ -75,16 +72,14 @@ static void setEvalEntrypoint(VM& vm, CodeBlock* codeBlock) #if ENABLE(JIT) if (vm.canUseJIT()) { codeBlock->setJITCode( - adoptRef(new DirectJITCode(vm.getCTIStub(evalEntryThunkGenerator), JITCode::InterpreterThunk)), - MacroAssemblerCodePtr()); + adoptRef(new DirectJITCode(vm.getCTIStub(evalEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk))); return; } #endif // ENABLE(JIT) UNUSED_PARAM(vm); codeBlock->setJITCode( - adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), JITCode::InterpreterThunk)), - MacroAssemblerCodePtr()); + adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_eval_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk))); } static void setProgramEntrypoint(VM& vm, CodeBlock* codeBlock) @@ -92,16 +87,29 @@ static void setProgramEntrypoint(VM& vm, CodeBlock* codeBlock) #if ENABLE(JIT) if (vm.canUseJIT()) { codeBlock->setJITCode( - adoptRef(new DirectJITCode(vm.getCTIStub(programEntryThunkGenerator), JITCode::InterpreterThunk)), - MacroAssemblerCodePtr()); + adoptRef(new DirectJITCode(vm.getCTIStub(programEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk))); return; } #endif // ENABLE(JIT) UNUSED_PARAM(vm); codeBlock->setJITCode( - adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), JITCode::InterpreterThunk)), - MacroAssemblerCodePtr()); + adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_program_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk))); +} + +static void setModuleProgramEntrypoint(VM& vm, CodeBlock* codeBlock) +{ +#if ENABLE(JIT) + if (vm.canUseJIT()) { + codeBlock->setJITCode( + adoptRef(new DirectJITCode(vm.getCTIStub(moduleProgramEntryThunkGenerator), MacroAssemblerCodePtr(), JITCode::InterpreterThunk))); + return; + } +#endif // ENABLE(JIT) + + UNUSED_PARAM(vm); + codeBlock->setJITCode( + adoptRef(new DirectJITCode(MacroAssemblerCodeRef::createLLIntCodeRef(llint_module_program_prologue), MacroAssemblerCodePtr(), JITCode::InterpreterThunk))); } void setEntrypoint(VM& vm, CodeBlock* codeBlock) @@ -110,6 +118,9 @@ void setEntrypoint(VM& vm, CodeBlock* codeBlock) case GlobalCode: setProgramEntrypoint(vm, codeBlock); return; + case ModuleCode: + setModuleProgramEntrypoint(vm, codeBlock); + return; case EvalCode: setEvalEntrypoint(vm, codeBlock); return; @@ -123,9 +134,9 @@ void setEntrypoint(VM& vm, CodeBlock* codeBlock) unsigned frameRegisterCountFor(CodeBlock* codeBlock) { - return codeBlock->m_numCalleeRegisters; + ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals))); + + return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters); } } } // namespace JSC::LLInt - -#endif // ENABLE(LLINT) diff --git a/Source/JavaScriptCore/llint/LLIntEntrypoint.h b/Source/JavaScriptCore/llint/LLIntEntrypoint.h index 4b687c6de..5b8fd51cd 100644 --- a/Source/JavaScriptCore/llint/LLIntEntrypoint.h +++ b/Source/JavaScriptCore/llint/LLIntEntrypoint.h @@ -26,10 +26,6 @@ #ifndef LLIntEntrypoint_h #define LLIntEntrypoint_h -#include <wtf/Platform.h> - -#if ENABLE(LLINT) - #include "CodeSpecializationKind.h" namespace JSC { @@ -45,6 +41,4 @@ unsigned frameRegisterCountFor(CodeBlock*); } } // namespace JSC::LLInt -#endif // ENABLE(LLINT) - #endif // LLIntEntrypoint_h diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.cpp b/Source/JavaScriptCore/llint/LLIntExceptions.cpp index ddacb5016..039936e73 100644 --- a/Source/JavaScriptCore/llint/LLIntExceptions.cpp +++ b/Source/JavaScriptCore/llint/LLIntExceptions.cpp @@ -25,15 +25,12 @@ #include "config.h" #include "LLIntExceptions.h" - -#if ENABLE(LLINT) - #include "CallFrame.h" #include "CodeBlock.h" #include "Instruction.h" #include "LLIntCommon.h" #include "LowLevelInterpreter.h" -#include "Operations.h" +#include "JSCInlines.h" namespace JSC { namespace LLInt { @@ -64,5 +61,3 @@ void* callToThrow(ExecState* exec) } } } // namespace JSC::LLInt - -#endif // ENABLE(LLINT) diff --git a/Source/JavaScriptCore/llint/LLIntExceptions.h b/Source/JavaScriptCore/llint/LLIntExceptions.h index bf18feed3..bdeb5e4a7 100644 --- a/Source/JavaScriptCore/llint/LLIntExceptions.h +++ b/Source/JavaScriptCore/llint/LLIntExceptions.h @@ -26,11 +26,7 @@ #ifndef LLIntExceptions_h #define LLIntExceptions_h -#include <wtf/Platform.h> #include <wtf/StdLibExtras.h> - -#if ENABLE(LLINT) - #include "MacroAssemblerCodeRef.h" namespace JSC { @@ -54,6 +50,4 @@ void* callToThrow(ExecState*); } } // namespace JSC::LLInt -#endif // ENABLE(LLINT) - #endif // LLIntExceptions_h diff --git a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h index 07a91bb73..e9c64d90c 100644 --- a/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h +++ b/Source/JavaScriptCore/llint/LLIntOfflineAsmConfig.h @@ -29,31 +29,44 @@ #include "LLIntCommon.h" #include <wtf/Assertions.h> #include <wtf/InlineASM.h> -#include <wtf/Platform.h> - -#if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) #define OFFLINE_ASM_C_LOOP 1 #define OFFLINE_ASM_X86 0 +#define OFFLINE_ASM_X86_WIN 0 #define OFFLINE_ASM_ARM 0 #define OFFLINE_ASM_ARMv7 0 #define OFFLINE_ASM_ARMv7_TRADITIONAL 0 #define OFFLINE_ASM_ARM64 0 #define OFFLINE_ASM_X86_64 0 +#define OFFLINE_ASM_X86_64_WIN 0 +#define OFFLINE_ASM_ARMv7k 0 #define OFFLINE_ASM_ARMv7s 0 #define OFFLINE_ASM_MIPS 0 #define OFFLINE_ASM_SH4 0 -#else // !ENABLE(LLINT_C_LOOP) +#else // ENABLE(JIT) #define OFFLINE_ASM_C_LOOP 0 -#if CPU(X86) +#if CPU(X86) && !PLATFORM(WIN) #define OFFLINE_ASM_X86 1 #else #define OFFLINE_ASM_X86 0 #endif +#if CPU(X86) && PLATFORM(WIN) +#define OFFLINE_ASM_X86_WIN 1 +#else +#define OFFLINE_ASM_X86_WIN 0 +#endif + +#ifdef __ARM_ARCH_7K__ +#define OFFLINE_ASM_ARMv7k 1 +#else +#define OFFLINE_ASM_ARMv7k 0 +#endif + #ifdef __ARM_ARCH_7S__ #define OFFLINE_ASM_ARMv7s 1 #else @@ -79,12 +92,18 @@ #define OFFLINE_ASM_ARM 0 #endif -#if CPU(X86_64) +#if CPU(X86_64) && !PLATFORM(WIN) #define OFFLINE_ASM_X86_64 1 #else #define OFFLINE_ASM_X86_64 0 #endif +#if CPU(X86_64) && PLATFORM(WIN) +#define OFFLINE_ASM_X86_64_WIN 1 +#else +#define OFFLINE_ASM_X86_64_WIN 0 +#endif + #if CPU(MIPS) #define OFFLINE_ASM_MIPS 1 #else @@ -116,7 +135,7 @@ #endif #endif -#endif // !ENABLE(LLINT_C_LOOP) +#endif // ENABLE(JIT) #if USE(JSVALUE64) #define OFFLINE_ASM_JSVALUE64 1 @@ -142,16 +161,4 @@ #define OFFLINE_ASM_EXECUTION_TRACING 0 #endif -#if LLINT_ALWAYS_ALLOCATE_SLOW -#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 1 -#else -#define OFFLINE_ASM_ALWAYS_ALLOCATE_SLOW 0 -#endif - -#if ENABLE(GGC) -#define OFFLINE_ASM_GGC 1 -#else -#define OFFLINE_ASM_GGC 0 -#endif - #endif // LLIntOfflineAsmConfig_h diff --git a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp index a0cbfa0ba..2b4e61986 100644 --- a/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp +++ b/Source/JavaScriptCore/llint/LLIntOffsetsExtractor.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,31 +27,38 @@ #include "ArrayProfile.h" #include "CodeBlock.h" +#include "CommonSlowPaths.h" #include "Debugger.h" +#include "DirectArguments.h" +#include "Exception.h" #include "Executable.h" #include "Heap.h" #include "Interpreter.h" -#include "JITStubs.h" #include "JSArray.h" #include "JSCell.h" #include "JSFunction.h" #include "VM.h" +#include "JSEnvironmentRecord.h" #include "JSGlobalObject.h" +#include "JSModuleRecord.h" #include "JSObject.h" -#include "JSPropertyNameIterator.h" #include "JSStack.h" #include "JSString.h" #include "JSTypeInfo.h" -#include "JSVariableObject.h" #include "JumpTable.h" #include "LLIntOfflineAsmConfig.h" #include "MarkedSpace.h" #include "ProtoCallFrame.h" #include "Structure.h" #include "StructureChain.h" +#include "TypeProfiler.h" +#include "TypeProfilerLog.h" +#include "VMEntryRecord.h" #include "ValueProfile.h" +#include "Watchdog.h" #include <wtf/text/StringImpl.h> + namespace JSC { #define OFFLINE_ASM_OFFSETOF(clazz, field) (static_cast<unsigned>(OBJECT_OFFSETOF(clazz, field))) @@ -63,7 +70,6 @@ public: const unsigned* LLIntOffsetsExtractor::dummy() { -#if ENABLE(LLINT) // This is a file generated by offlineasm/generate_offsets_extractor.rb, and contains code // to create a table of offsets, sizes, and a header identifying what combination of // Platform.h macros we have set. We include it inside of a method on LLIntOffsetsExtractor @@ -72,9 +78,6 @@ const unsigned* LLIntOffsetsExtractor::dummy() // compiler to kindly step aside and yield to our best intentions. #include "LLIntDesiredOffsets.h" return extractorTable; -#else - return 0; -#endif } } // namespace JSC diff --git a/Source/JavaScriptCore/llint/LLIntOpcode.h b/Source/JavaScriptCore/llint/LLIntOpcode.h index 7ee53df82..9b26676c4 100644 --- a/Source/JavaScriptCore/llint/LLIntOpcode.h +++ b/Source/JavaScriptCore/llint/LLIntOpcode.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,56 +26,26 @@ #ifndef LLIntOpcode_h #define LLIntOpcode_h -#include <wtf/Platform.h> - -#if ENABLE(LLINT) - -#if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) #define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \ - macro(getHostCallReturnValue, 1) \ - macro(returnFromJavaScript, 1) + FOR_EACH_CLOOP_BYTECODE_HELPER_ID(macro) -#else // !ENABLE(LLINT_C_LOOP) +#else // ENABLE(JIT) #define FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \ // Nothing to do here. Use the JIT impl instead. -#endif // !ENABLE(LLINT_C_LOOP) +#endif // !ENABLE(JIT) #define FOR_EACH_LLINT_NATIVE_HELPER(macro) \ FOR_EACH_LLINT_NOJIT_NATIVE_HELPER(macro) \ \ - macro(llint_begin, 1) \ - \ - macro(llint_program_prologue, 1) \ - macro(llint_eval_prologue, 1) \ - macro(llint_function_for_call_prologue, 1) \ - macro(llint_function_for_construct_prologue, 1) \ - macro(llint_function_for_call_arity_check, 1) \ - macro(llint_function_for_construct_arity_check, 1) \ - macro(llint_generic_return_point, 1) \ - macro(llint_throw_from_slow_path_trampoline, 1) \ - macro(llint_throw_during_call_trampoline, 1) \ - \ - /* Native call trampolines */ \ - macro(llint_native_call_trampoline, 1) \ - macro(llint_native_construct_trampoline, 1) \ - \ - macro(llint_end, 1) + FOR_EACH_BYTECODE_HELPER_ID(macro) -#if ENABLE(LLINT_C_LOOP) -#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) FOR_EACH_LLINT_NATIVE_HELPER(macro) -#else -#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add. -#endif - -#else // !ENABLE(LLINT) -#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) // Nothing to add. - -#endif // !ENABLE(LLINT) +#define FOR_EACH_LLINT_OPCODE_EXTENSION(macro) FOR_EACH_LLINT_NATIVE_HELPER(macro) #endif // LLIntOpcode_h diff --git a/Source/JavaScriptCore/llint/LLIntPCRanges.h b/Source/JavaScriptCore/llint/LLIntPCRanges.h new file mode 100644 index 000000000..fdb48598f --- /dev/null +++ b/Source/JavaScriptCore/llint/LLIntPCRanges.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef LLIntPCRanges_h +#define LLIntPCRanges_h + +namespace JSC { + +namespace LLInt { + +// These are used just to denote where LLInt code begins and where it ends. +extern "C" { + void llintPCRangeStart(); + void llintPCRangeEnd(); +} + +ALWAYS_INLINE bool isLLIntPC(void* pc) +{ + uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc); + uintptr_t llintStart = bitwise_cast<uintptr_t>(llintPCRangeStart); + uintptr_t llintEnd = bitwise_cast<uintptr_t>(llintPCRangeEnd); + RELEASE_ASSERT(llintStart < llintEnd); + return llintStart <= pcAsInt && pcAsInt <= llintEnd; +} + +#if ENABLE(JIT) +static const GPRReg LLIntPC = GPRInfo::regT4; +#endif + +} } // namespace JSC::LLInt + +#endif // LLIntPCRanges_h diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp index 26264171d..700af9cff 100644 --- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp +++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,32 +26,35 @@ #include "config.h" #include "LLIntSlowPaths.h" -#if ENABLE(LLINT) - -#include "Arguments.h" #include "ArrayConstructor.h" #include "CallFrame.h" #include "CommonSlowPaths.h" #include "CommonSlowPathsExceptions.h" +#include "Error.h" +#include "ErrorHandlingScope.h" +#include "Exception.h" +#include "ExceptionFuzz.h" #include "GetterSetter.h" #include "HostCallReturnValue.h" #include "Interpreter.h" #include "JIT.h" #include "JITExceptions.h" -#include "JSActivation.h" +#include "JSLexicalEnvironment.h" +#include "JSCInlines.h" #include "JSCJSValue.h" +#include "JSGeneratorFunction.h" #include "JSGlobalObjectFunctions.h" -#include "JSNameScope.h" -#include "JSPropertyNameIterator.h" #include "JSStackInlines.h" #include "JSString.h" #include "JSWithScope.h" #include "LLIntCommon.h" #include "LLIntExceptions.h" +#include "LegacyProfiler.h" #include "LowLevelInterpreter.h" #include "ObjectConstructor.h" -#include "Operations.h" +#include "ProtoCallFrame.h" #include "StructureRareDataInlines.h" +#include "VMInlines.h" #include <wtf/StringPrintStream.h> namespace JSC { namespace LLInt { @@ -82,7 +85,7 @@ namespace JSC { namespace LLInt { return encodeResult(first, second); \ } while (false) -#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, exec) +#define LLINT_END_IMPL() LLINT_RETURN_TWO(pc, 0) #define LLINT_THROW(exceptionToThrow) do { \ vm.throwException(exec, exceptionToThrow); \ @@ -91,6 +94,7 @@ namespace JSC { namespace LLInt { } while (false) #define LLINT_CHECK_EXCEPTION() do { \ + doExceptionFuzzingIfEnabled(exec, "LLIntSlowPaths", pc); \ if (UNLIKELY(vm.exception())) { \ pc = returnToThrow(exec); \ LLINT_END_IMPL(); \ @@ -119,6 +123,14 @@ namespace JSC { namespace LLInt { LLINT_END_IMPL(); \ } while (false) +#define LLINT_RETURN_WITH_PC_ADJUSTMENT(value, pcAdjustment) do { \ + JSValue __r_returnValue = (value); \ + LLINT_CHECK_EXCEPTION(); \ + LLINT_OP(1) = __r_returnValue; \ + pc += (pcAdjustment); \ + LLINT_END_IMPL(); \ + } while (false) + #define LLINT_RETURN_PROFILED(opcode, value) do { \ JSValue __rp_returnValue = (value); \ LLINT_CHECK_EXCEPTION(); \ @@ -137,22 +149,30 @@ namespace JSC { namespace LLInt { #define LLINT_CALL_THROW(exec, exceptionToThrow) do { \ ExecState* __ct_exec = (exec); \ vm.throwException(__ct_exec, exceptionToThrow); \ - LLINT_CALL_END_IMPL(__ct_exec, callToThrow(__ct_exec)); \ + LLINT_CALL_END_IMPL(0, callToThrow(__ct_exec)); \ } while (false) -#define LLINT_CALL_CHECK_EXCEPTION(exec) do { \ +#define LLINT_CALL_CHECK_EXCEPTION(exec, execCallee) do { \ ExecState* __cce_exec = (exec); \ + ExecState* __cce_execCallee = (execCallee); \ + doExceptionFuzzingIfEnabled(__cce_exec, "LLIntSlowPaths/call", nullptr); \ if (UNLIKELY(vm.exception())) \ - LLINT_CALL_END_IMPL(__cce_exec, callToThrow(__cce_exec)); \ + LLINT_CALL_END_IMPL(0, callToThrow(__cce_execCallee)); \ } while (false) -#define LLINT_CALL_RETURN(exec, callTarget) do { \ +#define LLINT_CALL_RETURN(exec, execCallee, callTarget) do { \ ExecState* __cr_exec = (exec); \ + ExecState* __cr_execCallee = (execCallee); \ void* __cr_callTarget = (callTarget); \ - LLINT_CALL_CHECK_EXCEPTION(__cr_exec->callerFrame()); \ - LLINT_CALL_END_IMPL(__cr_exec, __cr_callTarget); \ + LLINT_CALL_CHECK_EXCEPTION(__cr_exec, __cr_execCallee); \ + LLINT_CALL_END_IMPL(__cr_execCallee, __cr_callTarget); \ } while (false) +#define LLINT_RETURN_CALLEE_FRAME(execCallee) do { \ + ExecState* __rcf_exec = (execCallee); \ + LLINT_RETURN_TWO(pc, __rcf_exec); \ + } while (false) + extern "C" SlowPathReturnType llint_trace_operand(ExecState* exec, Instruction* pc, int fromWhere, int operand) { LLINT_BEGIN(); @@ -204,9 +224,9 @@ static void traceFunctionPrologue(ExecState* exec, const char* comment, CodeSpec JSFunction* callee = jsCast<JSFunction*>(exec->callee()); FunctionExecutable* executable = callee->jsExecutable(); CodeBlock* codeBlock = executable->codeBlockFor(kind); - dataLogF("%p / %p: in %s of function %p, executable %p; numVars = %u, numParameters = %u, numCalleeRegisters = %u, caller = %p.\n", + dataLogF("%p / %p: in %s of function %p, executable %p; numVars = %u, numParameters = %u, numCalleeLocals = %u, caller = %p.\n", codeBlock, exec, comment, callee, executable, - codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeRegisters, + codeBlock->m_numVars, codeBlock->numParameters(), codeBlock->m_numCalleeLocals, exec->callerFrame()); } @@ -236,12 +256,15 @@ LLINT_SLOW_PATH_DECL(trace_arityCheck_for_construct) LLINT_SLOW_PATH_DECL(trace) { - dataLogF("%p / %p: executing bc#%zu, %s, scope %p, pc = %p\n", + dataLogF("%p / %p: executing bc#%zu, %s, pc = %p\n", exec->codeBlock(), exec, static_cast<intptr_t>(pc - exec->codeBlock()->instructions().begin()), - opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)], - exec->scope(), pc); + opcodeNames[exec->vm().interpreter->getOpcodeID(pc[0].u.opcode)], pc); + if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_enter) { + dataLogF("Frame will eventually return to %p\n", exec->returnPC().value()); + *bitwise_cast<volatile char*>(exec->returnPC().value()); + } if (exec->vm().interpreter->getOpcodeID(pc[0].u.opcode) == op_ret) { dataLogF("Will be returning to %p\n", exec->returnPC().value()); dataLogF("The new cfr will be %p\n", exec->callerFrame()); @@ -263,7 +286,7 @@ LLINT_SLOW_PATH_DECL(special_trace) enum EntryKind { Prologue, ArityCheck }; #if ENABLE(JIT) -inline bool shouldJIT(ExecState* exec) +inline bool shouldJIT(ExecState* exec, CodeBlock*) { // You can modify this to turn off JITting without rebuilding the world. return exec->vm().canUseJIT(); @@ -276,7 +299,7 @@ inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec) DeferGCForAWhile deferGC(vm.heap); // My callers don't set top callframe, so we don't want to GC here at all. codeBlock->updateAllValueProfilePredictions(); - + if (!codeBlock->checkIfJITThresholdReached()) { if (Options::verboseOSR()) dataLogF(" JIT threshold should be lifted.\n"); @@ -301,7 +324,7 @@ inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec) case CompilationSuccessful: if (Options::verboseOSR()) dataLogF(" JIT compilation successful.\n"); - codeBlock->install(); + codeBlock->ownerScriptExecutable()->installCode(codeBlock); codeBlock->jitSoon(); return true; default: @@ -310,6 +333,7 @@ inline bool jitCompileAndSetHeuristics(CodeBlock* codeBlock, ExecState* exec) } } default: + dataLog("Unexpected code block in LLInt: ", *codeBlock, "\n"); RELEASE_ASSERT_NOT_REACHED(); return false; } @@ -323,17 +347,17 @@ static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* cod codeBlock->llintExecuteCounter(), "\n"); } - if (!shouldJIT(exec)) { + if (!shouldJIT(exec, codeBlock)) { codeBlock->dontJITAnytimeSoon(); - LLINT_RETURN_TWO(0, exec); + LLINT_RETURN_TWO(0, 0); } if (!jitCompileAndSetHeuristics(codeBlock, exec)) - LLINT_RETURN_TWO(0, exec); + LLINT_RETURN_TWO(0, 0); if (kind == Prologue) - LLINT_RETURN_TWO(codeBlock->jitCode()->executableAddress(), exec); + LLINT_RETURN_TWO(codeBlock->jitCode()->executableAddress(), 0); ASSERT(kind == ArityCheck); - LLINT_RETURN_TWO(codeBlock->jitCodeWithArityCheck().executableAddress(), exec); + LLINT_RETURN_TWO(codeBlock->jitCode()->addressForCall(MustCheckArity).executableAddress(), 0); } #else // ENABLE(JIT) static SlowPathReturnType entryOSR(ExecState* exec, Instruction*, CodeBlock* codeBlock, const char*, EntryKind) @@ -379,13 +403,13 @@ LLINT_SLOW_PATH_DECL(loop_osr) codeBlock->llintExecuteCounter(), "\n"); } - if (!shouldJIT(exec)) { + if (!shouldJIT(exec, codeBlock)) { codeBlock->dontJITAnytimeSoon(); - LLINT_RETURN_TWO(0, exec); + LLINT_RETURN_TWO(0, 0); } if (!jitCompileAndSetHeuristics(codeBlock, exec)) - LLINT_RETURN_TWO(0, exec); + LLINT_RETURN_TWO(0, 0); ASSERT(codeBlock->jitType() == JITCode::BaselineJIT); @@ -398,10 +422,11 @@ LLINT_SLOW_PATH_DECL(loop_osr) void* jumpTarget = codeBlock->jitCode()->executableAddressAtOffset(mapping->m_machineCodeOffset); ASSERT(jumpTarget); - LLINT_RETURN_TWO(jumpTarget, exec); + LLINT_RETURN_TWO(jumpTarget, exec->topOfFrame()); #else // ENABLE(JIT) + UNUSED_PARAM(pc); codeBlock->dontJITAnytimeSoon(); - LLINT_RETURN_TWO(0, exec); + LLINT_RETURN_TWO(0, 0); #endif // ENABLE(JIT) } @@ -416,7 +441,7 @@ LLINT_SLOW_PATH_DECL(replace) codeBlock->llintExecuteCounter(), "\n"); } - if (shouldJIT(exec)) + if (shouldJIT(exec, codeBlock)) jitCompileAndSetHeuristics(codeBlock, exec); else codeBlock->dontJITAnytimeSoon(); @@ -433,28 +458,34 @@ LLINT_SLOW_PATH_DECL(stack_check) #if LLINT_SLOW_PATH_TRACING dataLogF("Checking stack height with exec = %p.\n", exec); dataLogF("CodeBlock = %p.\n", exec->codeBlock()); - dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeRegisters); + dataLogF("Num callee registers = %u.\n", exec->codeBlock()->m_numCalleeLocals); dataLogF("Num vars = %u.\n", exec->codeBlock()->m_numVars); - dataLogF("Current end is at %p.\n", exec->vm().interpreter->stack().end()); + +#if ENABLE(JIT) + dataLogF("Current end is at %p.\n", exec->vm().stackLimit()); +#else + dataLogF("Current end is at %p.\n", exec->vm().jsStackLimit()); #endif - ASSERT(!exec->vm().interpreter->stack().containsAddress(&exec->registers()[virtualRegisterForLocal(exec->codeBlock()->m_numCalleeRegisters).offset()])); - if (UNLIKELY(!vm.interpreter->stack().grow(&exec->registers()[virtualRegisterForLocal(exec->codeBlock()->m_numCalleeRegisters).offset()]))) { - exec = exec->callerFrame(); - CommonSlowPaths::interpreterThrowInCaller(exec, createStackOverflowError(exec)); - pc = returnToThrowForThrownException(exec); - } - LLINT_END_IMPL(); -} -LLINT_SLOW_PATH_DECL(slow_path_create_activation) -{ - LLINT_BEGIN(); -#if LLINT_SLOW_PATH_TRACING - dataLogF("Creating an activation, exec = %p!\n", exec); #endif - JSActivation* activation = JSActivation::create(vm, exec, exec->codeBlock()); - exec->setScope(activation); - LLINT_RETURN(JSValue(activation)); + // If the stack check succeeds and we don't need to throw the error, then + // we'll return 0 instead. The prologue will check for a non-zero value + // when determining whether to set the callFrame or not. + + // For JIT enabled builds which uses the C stack, the stack is not growable. + // Hence, if we get here, then we know a stack overflow is imminent. So, just + // throw the StackOverflowError unconditionally. +#if !ENABLE(JIT) + ASSERT(!vm.interpreter->stack().containsAddress(exec->topOfFrame())); + if (LIKELY(vm.interpreter->stack().ensureCapacityFor(exec->topOfFrame()))) + LLINT_RETURN_TWO(pc, 0); +#endif + + vm.topCallFrame = exec; + ErrorHandlingScope errorScope(vm); + vm.throwException(exec, createStackOverflowError(exec)); + pc = returnToThrow(exec); + LLINT_RETURN_TWO(pc, exec); } LLINT_SLOW_PATH_DECL(slow_path_new_object) @@ -490,30 +521,28 @@ LLINT_SLOW_PATH_DECL(slow_path_new_regexp) LLINT_RETURN(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regExp)); } -LLINT_SLOW_PATH_DECL(slow_path_check_has_instance) +LLINT_SLOW_PATH_DECL(slow_path_instanceof) { LLINT_BEGIN(); - JSValue value = LLINT_OP_C(2).jsValue(); - JSValue baseVal = LLINT_OP_C(3).jsValue(); - if (baseVal.isObject()) { - JSObject* baseObject = asObject(baseVal); - ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance()); - if (baseObject->structure()->typeInfo().implementsHasInstance()) { - pc += pc[4].u.operand; - LLINT_RETURN(jsBoolean(baseObject->methodTable()->customHasInstance(baseObject, exec, value))); - } - } - LLINT_THROW(createInvalidParameterError(exec, "instanceof", baseVal)); + JSValue proto = LLINT_OP_C(3).jsValue(); + ASSERT(!value.isObject() || !proto.isObject()); + LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto))); } -LLINT_SLOW_PATH_DECL(slow_path_instanceof) +LLINT_SLOW_PATH_DECL(slow_path_instanceof_custom) { LLINT_BEGIN(); + JSValue value = LLINT_OP_C(2).jsValue(); - JSValue proto = LLINT_OP_C(3).jsValue(); - ASSERT(!value.isObject() || !proto.isObject()); - LLINT_RETURN(jsBoolean(JSObject::defaultHasInstance(exec, value, proto))); + JSValue constructor = LLINT_OP_C(3).jsValue(); + JSValue hasInstanceValue = LLINT_OP_C(4).jsValue(); + + ASSERT(constructor.isObject()); + ASSERT(hasInstanceValue != exec->lexicalGlobalObject()->functionProtoHasInstanceSymbolFunction() || !constructor.getObject()->structure()->typeInfo().implementsDefaultHasInstance()); + + JSValue result = jsBoolean(constructor.getObject()->hasInstance(exec, value, hasInstanceValue)); + LLINT_RETURN(result); } LLINT_SLOW_PATH_DECL(slow_path_get_by_id) @@ -522,7 +551,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) CodeBlock* codeBlock = exec->codeBlock(); const Identifier& ident = codeBlock->identifier(pc[3].u.operand); JSValue baseValue = LLINT_OP_C(2).jsValue(); - PropertySlot slot(baseValue); + PropertySlot slot(baseValue, PropertySlot::PropertySlot::InternalMethodType::Get); JSValue result = baseValue.get(exec, ident, slot); LLINT_CHECK_EXCEPTION(); @@ -537,26 +566,27 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) JSCell* baseCell = baseValue.asCell(); Structure* structure = baseCell->structure(); + // Start out by clearing out the old cache. + pc[0].u.opcode = LLInt::getOpcode(op_get_by_id); + pc[4].u.pointer = nullptr; // old structure + pc[5].u.pointer = nullptr; // offset + if (!structure->isUncacheableDictionary() - && !structure->typeInfo().prohibitsPropertyCaching()) { - ConcurrentJITLocker locker(codeBlock->m_lock); + && !structure->typeInfo().prohibitsPropertyCaching() + && !structure->typeInfo().newImpurePropertyFiresWatchpoints()) { + vm.heap.writeBarrier(codeBlock); - pc[4].u.structure.set( - vm, codeBlock->ownerExecutable(), structure); - if (isInlineOffset(slot.cachedOffset())) { - pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id); - pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); - } else { - pc[0].u.opcode = LLInt::getOpcode(llint_op_get_by_id_out_of_line); - pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue); - } + ConcurrentJITLocker locker(codeBlock->m_lock); + + pc[4].u.structureID = structure->id(); + pc[5].u.operand = slot.cachedOffset(); } } if (!LLINT_ALWAYS_ACCESS_SLOW && isJSArray(baseValue) && ident == exec->propertyNames().length) { - pc[0].u.opcode = LLInt::getOpcode(llint_op_get_array_length); + pc[0].u.opcode = LLInt::getOpcode(op_get_array_length); ArrayProfile* arrayProfile = codeBlock->getOrAddArrayProfile(pc - codeBlock->instructions().begin()); arrayProfile->observeStructure(baseValue.asCell()->structure()); pc[4].u.arrayProfile = arrayProfile; @@ -572,7 +602,7 @@ LLINT_SLOW_PATH_DECL(slow_path_get_arguments_length) CodeBlock* codeBlock = exec->codeBlock(); const Identifier& ident = codeBlock->identifier(pc[3].u.operand); JSValue baseValue = LLINT_OP(2).jsValue(); - PropertySlot slot(baseValue); + PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get); LLINT_RETURN(baseValue.get(exec, ident, slot)); } @@ -584,15 +614,23 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) JSValue baseValue = LLINT_OP_C(1).jsValue(); PutPropertySlot slot(baseValue, codeBlock->isStrictMode(), codeBlock->putByIdContext()); - if (pc[8].u.operand) + if (pc[8].u.putByIdFlags & PutByIdIsDirect) asObject(baseValue)->putDirect(vm, ident, LLINT_OP_C(3).jsValue(), slot); else - baseValue.put(exec, ident, LLINT_OP_C(3).jsValue(), slot); + baseValue.putInline(exec, ident, LLINT_OP_C(3).jsValue(), slot); LLINT_CHECK_EXCEPTION(); if (!LLINT_ALWAYS_ACCESS_SLOW && baseValue.isCell() - && slot.isCacheable()) { + && slot.isCacheablePut()) { + + // Start out by clearing out the old cache. + pc[4].u.pointer = nullptr; // old structure + pc[5].u.pointer = nullptr; // offset + pc[6].u.pointer = nullptr; // new structure + pc[7].u.pointer = nullptr; // structure chain + pc[8].u.putByIdFlags = + static_cast<PutByIdFlags>(pc[8].u.putByIdFlags & PutByIdPersistentFlagsMask); JSCell* baseCell = baseValue.asCell(); Structure* structure = baseCell->structure(); @@ -600,55 +638,38 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) if (!structure->isUncacheableDictionary() && !structure->typeInfo().prohibitsPropertyCaching() && baseCell == slot.base()) { + + vm.heap.writeBarrier(codeBlock); if (slot.type() == PutPropertySlot::NewProperty) { GCSafeConcurrentJITLocker locker(codeBlock->m_lock, vm.heap); if (!structure->isDictionary() && structure->previousID()->outOfLineCapacity() == structure->outOfLineCapacity()) { ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated()); - - // This is needed because some of the methods we call - // below may GC. - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id); - if (normalizePrototypeChain(exec, baseCell) != InvalidPrototypeChain) { + if (normalizePrototypeChain(exec, structure) != InvalidPrototypeChain) { ASSERT(structure->previousID()->isObject()); - pc[4].u.structure.set( - vm, codeBlock->ownerExecutable(), structure->previousID()); - if (isInlineOffset(slot.cachedOffset())) - pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); - else - pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue); - pc[6].u.structure.set( - vm, codeBlock->ownerExecutable(), structure); - StructureChain* chain = structure->prototypeChain(exec); - ASSERT(chain); - pc[7].u.structureChain.set( - vm, codeBlock->ownerExecutable(), chain); - - if (pc[8].u.operand) { - if (isInlineOffset(slot.cachedOffset())) - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct); - else - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_direct_out_of_line); - } else { - if (isInlineOffset(slot.cachedOffset())) - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal); - else - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_transition_normal_out_of_line); + pc[4].u.structureID = structure->previousID()->id(); + pc[5].u.operand = slot.cachedOffset(); + pc[6].u.structureID = structure->id(); + if (!(pc[8].u.putByIdFlags & PutByIdIsDirect)) { + StructureChain* chain = structure->prototypeChain(exec); + ASSERT(chain); + pc[7].u.structureChain.set( + vm, codeBlock, chain); } + pc[8].u.putByIdFlags = static_cast<PutByIdFlags>( + pc[8].u.putByIdFlags | + structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags()); } } } else { - pc[4].u.structure.set( - vm, codeBlock->ownerExecutable(), structure); - if (isInlineOffset(slot.cachedOffset())) { - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id); - pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); - } else { - pc[0].u.opcode = LLInt::getOpcode(llint_op_put_by_id_out_of_line); - pc[5].u.operand = offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue); - } + structure->didCachePropertyReplacement(vm, slot.cachedOffset()); + pc[4].u.structureID = structure->id(); + pc[5].u.operand = slot.cachedOffset(); + pc[8].u.putByIdFlags = static_cast<PutByIdFlags>( + pc[8].u.putByIdFlags | + structure->inferredTypeDescriptorFor(ident.impl()).putByIdFlags()); } } } @@ -671,8 +692,14 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_id) inline JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript) { if (LIKELY(baseValue.isCell() && subscript.isString())) { - if (JSValue result = baseValue.asCell()->fastGetOwnProperty(exec, asString(subscript)->value(exec))) - return result; + VM& vm = exec->vm(); + Structure& structure = *baseValue.asCell()->structure(vm); + if (JSCell::canUseFastGetOwnProperty(structure)) { + if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) { + if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get())) + return result; + } + } } if (subscript.isUInt32()) { @@ -683,10 +710,12 @@ inline JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript) return baseValue.get(exec, i); } - if (isName(subscript)) - return baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName()); - - Identifier property(exec, subscript.toString(exec)->value(exec)); + baseValue.requireObjectCoercible(exec); + if (exec->hadException()) + return jsUndefined(); + auto property = subscript.toPropertyKey(exec); + if (exec->hadException()) + return jsUndefined(); return baseValue.get(exec, property); } @@ -696,26 +725,6 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_val) LLINT_RETURN_PROFILED(op_get_by_val, getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())); } -LLINT_SLOW_PATH_DECL(slow_path_get_argument_by_val) -{ - LLINT_BEGIN(); - JSValue arguments = LLINT_OP(2).jsValue(); - if (!arguments) { - arguments = Arguments::create(vm, exec); - LLINT_CHECK_EXCEPTION(); - LLINT_OP(2) = arguments; - exec->uncheckedR(unmodifiedArgumentsRegister(VirtualRegister(pc[2].u.operand)).offset()) = arguments; - } - - LLINT_RETURN_PROFILED(op_get_argument_by_val, getByVal(exec, arguments, LLINT_OP_C(3).jsValue())); -} - -LLINT_SLOW_PATH_DECL(slow_path_get_by_pname) -{ - LLINT_BEGIN(); - LLINT_RETURN(getByVal(exec, LLINT_OP_C(2).jsValue(), LLINT_OP_C(3).jsValue())); -} - LLINT_SLOW_PATH_DECL(slow_path_put_by_val) { LLINT_BEGIN(); @@ -738,13 +747,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val) LLINT_END(); } - if (isName(subscript)) { - PutPropertySlot slot(baseValue, exec->codeBlock()->isStrictMode()); - baseValue.put(exec, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot); - LLINT_END(); - } - - Identifier property(exec, subscript.toString(exec)->value(exec)); + auto property = subscript.toPropertyKey(exec); LLINT_CHECK_EXCEPTION(); PutPropertySlot slot(baseValue, exec->codeBlock()->isStrictMode()); baseValue.put(exec, property, value, slot); @@ -760,19 +763,34 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_val_direct) JSValue value = LLINT_OP_C(3).jsValue(); RELEASE_ASSERT(baseValue.isObject()); JSObject* baseObject = asObject(baseValue); + bool isStrictMode = exec->codeBlock()->isStrictMode(); if (LIKELY(subscript.isUInt32())) { - uint32_t i = subscript.asUInt32(); - baseObject->putDirectIndex(exec, i, value); - } else if (isName(subscript)) { - PutPropertySlot slot(baseObject, exec->codeBlock()->isStrictMode()); - baseObject->putDirect(exec->vm(), jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot); - } else { - Identifier property(exec, subscript.toString(exec)->value(exec)); - if (!exec->vm().exception()) { // Don't put to an object if toString threw an exception. - PutPropertySlot slot(baseObject, exec->codeBlock()->isStrictMode()); - baseObject->putDirect(exec->vm(), property, value, slot); + // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices. + ASSERT(isIndex(subscript.asUInt32())); + baseObject->putDirectIndex(exec, subscript.asUInt32(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); + LLINT_END(); + } + + if (subscript.isDouble()) { + double subscriptAsDouble = subscript.asDouble(); + uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble); + if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) { + baseObject->putDirectIndex(exec, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); + LLINT_END(); } } + + // Don't put to an object if toString threw an exception. + auto property = subscript.toPropertyKey(exec); + if (exec->vm().exception()) + LLINT_END(); + + if (Optional<uint32_t> index = parseIndex(property)) + baseObject->putDirectIndex(exec, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); + else { + PutPropertySlot slot(baseObject, isStrictMode); + baseObject->putDirect(exec->vm(), property, value, slot); + } LLINT_END(); } @@ -789,11 +807,9 @@ LLINT_SLOW_PATH_DECL(slow_path_del_by_val) uint32_t i; if (subscript.getUInt32(i)) couldDelete = baseObject->methodTable()->deletePropertyByIndex(baseObject, exec, i); - else if (isName(subscript)) - couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, jsCast<NameInstance*>(subscript.asCell())->privateName()); else { LLINT_CHECK_EXCEPTION(); - Identifier property(exec, subscript.toString(exec)->value(exec)); + auto property = subscript.toPropertyKey(exec); LLINT_CHECK_EXCEPTION(); couldDelete = baseObject->methodTable()->deleteProperty(baseObject, exec, property); } @@ -813,29 +829,97 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_index) LLINT_END(); } -LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter) +LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_id) +{ + LLINT_BEGIN(); + ASSERT(LLINT_OP(1).jsValue().isObject()); + JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); + + unsigned options = pc[3].u.operand; + + JSValue getter = LLINT_OP(4).jsValue(); + ASSERT(getter.isObject()); + + baseObj->putGetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(getter), options); + LLINT_END(); +} + +LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_id) +{ + LLINT_BEGIN(); + ASSERT(LLINT_OP(1).jsValue().isObject()); + JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); + + unsigned options = pc[3].u.operand; + + JSValue setter = LLINT_OP(4).jsValue(); + ASSERT(setter.isObject()); + + baseObj->putSetter(exec, exec->codeBlock()->identifier(pc[2].u.operand), asObject(setter), options); + LLINT_END(); +} + +LLINT_SLOW_PATH_DECL(slow_path_put_getter_setter_by_id) { LLINT_BEGIN(); ASSERT(LLINT_OP(1).jsValue().isObject()); JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); - GetterSetter* accessor = GetterSetter::create(vm); + GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject()); LLINT_CHECK_EXCEPTION(); - - JSValue getter = LLINT_OP(3).jsValue(); - JSValue setter = LLINT_OP(4).jsValue(); + + JSValue getter = LLINT_OP(4).jsValue(); + JSValue setter = LLINT_OP(5).jsValue(); ASSERT(getter.isObject() || getter.isUndefined()); ASSERT(setter.isObject() || setter.isUndefined()); ASSERT(getter.isObject() || setter.isObject()); if (!getter.isUndefined()) - accessor->setGetter(vm, asObject(getter)); + accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter)); if (!setter.isUndefined()) - accessor->setSetter(vm, asObject(setter)); + accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter)); baseObj->putDirectAccessor( exec, exec->codeBlock()->identifier(pc[2].u.operand), - accessor, Accessor); + accessor, pc[3].u.operand); + LLINT_END(); +} + +LLINT_SLOW_PATH_DECL(slow_path_put_getter_by_val) +{ + LLINT_BEGIN(); + ASSERT(LLINT_OP(1).jsValue().isObject()); + JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); + JSValue subscript = LLINT_OP_C(2).jsValue(); + + unsigned options = pc[3].u.operand; + + JSValue getter = LLINT_OP(4).jsValue(); + ASSERT(getter.isObject()); + + auto property = subscript.toPropertyKey(exec); + LLINT_CHECK_EXCEPTION(); + + baseObj->putGetter(exec, property, asObject(getter), options); + LLINT_END(); +} + +LLINT_SLOW_PATH_DECL(slow_path_put_setter_by_val) +{ + LLINT_BEGIN(); + ASSERT(LLINT_OP(1).jsValue().isObject()); + JSObject* baseObj = asObject(LLINT_OP(1).jsValue()); + JSValue subscript = LLINT_OP_C(2).jsValue(); + + unsigned options = pc[3].u.operand; + + JSValue setter = LLINT_OP(4).jsValue(); + ASSERT(setter.isObject()); + + auto property = subscript.toPropertyKey(exec); + LLINT_CHECK_EXCEPTION(); + + baseObj->putSetter(exec, property, asObject(setter), options); LLINT_END(); } @@ -947,23 +1031,55 @@ LLINT_SLOW_PATH_DECL(slow_path_new_func) { LLINT_BEGIN(); CodeBlock* codeBlock = exec->codeBlock(); - ASSERT(codeBlock->codeType() != FunctionCode - || !codeBlock->needsFullScopeChain() - || exec->uncheckedR(codeBlock->activationRegister().offset()).jsValue()); + JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); #if LLINT_SLOW_PATH_TRACING dataLogF("Creating function!\n"); #endif - LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(pc[2].u.operand), exec->scope())); + LLINT_RETURN(JSFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope)); +} + +LLINT_SLOW_PATH_DECL(slow_path_new_generator_func) +{ + LLINT_BEGIN(); + CodeBlock* codeBlock = exec->codeBlock(); + JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); +#if LLINT_SLOW_PATH_TRACING + dataLogF("Creating function!\n"); +#endif + LLINT_RETURN(JSGeneratorFunction::create(vm, codeBlock->functionDecl(pc[3].u.operand), scope)); } LLINT_SLOW_PATH_DECL(slow_path_new_func_exp) { LLINT_BEGIN(); + CodeBlock* codeBlock = exec->codeBlock(); - FunctionExecutable* function = codeBlock->functionExpr(pc[2].u.operand); - JSFunction* func = JSFunction::create(vm, function, exec->scope()); + JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); + FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand); - LLINT_RETURN(func); + LLINT_RETURN(JSFunction::create(vm, executable, scope)); +} + +LLINT_SLOW_PATH_DECL(slow_path_new_generator_func_exp) +{ + LLINT_BEGIN(); + + CodeBlock* codeBlock = exec->codeBlock(); + JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); + FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand); + + LLINT_RETURN(JSGeneratorFunction::create(vm, executable, scope)); +} + +LLINT_SLOW_PATH_DECL(slow_path_new_arrow_func_exp) +{ + LLINT_BEGIN(); + + CodeBlock* codeBlock = exec->codeBlock(); + JSScope* scope = exec->uncheckedR(pc[2].u.operand).Register::scope(); + FunctionExecutable* executable = codeBlock->functionExpr(pc[3].u.operand); + + LLINT_RETURN(JSFunction::create(vm, executable, scope)); } static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, JSValue callee, CodeSpecializationKind kind) @@ -977,7 +1093,6 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, ExecState* exec = execCallee->callerFrame(); VM& vm = exec->vm(); - execCallee->setScope(exec->scope()); execCallee->setCodeBlock(0); execCallee->clearReturnPC(); @@ -992,7 +1107,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, execCallee->setCallee(asObject(callee)); vm.hostCallReturnValue = JSValue::decode(callData.native.function(execCallee)); - LLINT_CALL_RETURN(execCallee, LLInt::getCodePtr(getHostCallReturnValue)); + LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue)); } #if LLINT_SLOW_PATH_TRACING @@ -1015,7 +1130,7 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, execCallee->setCallee(asObject(callee)); vm.hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee)); - LLINT_CALL_RETURN(execCallee, LLInt::getCodePtr(getHostCallReturnValue)); + LLINT_CALL_RETURN(execCallee, execCallee, LLInt::getCodePtr(getHostCallReturnValue)); } #if LLINT_SLOW_PATH_TRACING @@ -1028,10 +1143,12 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, Instruction* pc, inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, CodeSpecializationKind kind, JSValue calleeAsValue, LLIntCallLinkInfo* callLinkInfo = 0) { + ExecState* exec = execCallee->callerFrame(); + #if LLINT_SLOW_PATH_TRACING - dataLogF("Performing call with recorded PC = %p\n", execCallee->callerFrame()->currentVPC()); + dataLogF("Performing call with recorded PC = %p\n", exec->currentVPC()); #endif - + JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue); if (!calleeAsFunctionCell) return handleHostCall(execCallee, pc, calleeAsValue, kind); @@ -1039,43 +1156,66 @@ inline SlowPathReturnType setUpCall(ExecState* execCallee, Instruction* pc, Code JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); JSScope* scope = callee->scopeUnchecked(); VM& vm = *scope->vm(); - execCallee->setScope(scope); ExecutableBase* executable = callee->executable(); - + MacroAssemblerCodePtr codePtr; CodeBlock* codeBlock = 0; - if (executable->isHostFunction()) - codePtr = executable->hostCodeEntryFor(kind); - else { + bool isWebAssemblyExecutable = false; +#if ENABLE(WEBASSEMBLY) + isWebAssemblyExecutable = executable->isWebAssemblyExecutable(); +#endif + + if (executable->isHostFunction()) { + codePtr = executable->entrypointFor(kind, MustCheckArity); + } else if (!isWebAssemblyExecutable) { FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable); - JSObject* error = functionExecutable->prepareForExecution(execCallee, callee->scope(), kind); + + if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) + LLINT_CALL_THROW(exec, createNotAConstructorError(exec, callee)); + + JSObject* error = functionExecutable->prepareForExecution(execCallee, callee, scope, kind); if (error) - LLINT_CALL_THROW(execCallee->callerFrame(), error); + LLINT_CALL_THROW(exec, error); codeBlock = functionExecutable->codeBlockFor(kind); ASSERT(codeBlock); + ArityCheckMode arity; if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters())) - codePtr = functionExecutable->jsCodeWithArityCheckEntryFor(kind); + arity = MustCheckArity; else - codePtr = functionExecutable->jsCodeEntryFor(kind); + arity = ArityCheckNotRequired; + codePtr = functionExecutable->entrypointFor(kind, arity); + } else { +#if ENABLE(WEBASSEMBLY) + WebAssemblyExecutable* webAssemblyExecutable = static_cast<WebAssemblyExecutable*>(executable); + webAssemblyExecutable->prepareForExecution(execCallee); + codeBlock = webAssemblyExecutable->codeBlockForCall(); + ASSERT(codeBlock); + ArityCheckMode arity; + if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters())) + arity = MustCheckArity; + else + arity = ArityCheckNotRequired; + codePtr = webAssemblyExecutable->entrypointFor(kind, arity); +#endif } + ASSERT(!!codePtr); + if (!LLINT_ALWAYS_ACCESS_SLOW && callLinkInfo) { - ExecState* execCaller = execCallee->callerFrame(); - - CodeBlock* callerCodeBlock = execCaller->codeBlock(); + CodeBlock* callerCodeBlock = exec->codeBlock(); ConcurrentJITLocker locker(callerCodeBlock->m_lock); if (callLinkInfo->isOnList()) callLinkInfo->remove(); - callLinkInfo->callee.set(vm, callerCodeBlock->ownerExecutable(), callee); - callLinkInfo->lastSeenCallee.set(vm, callerCodeBlock->ownerExecutable(), callee); + callLinkInfo->callee.set(vm, callerCodeBlock, callee); + callLinkInfo->lastSeenCallee.set(vm, callerCodeBlock, callee); callLinkInfo->machineCodeTarget = codePtr; if (codeBlock) - codeBlock->linkIncomingCall(execCaller, callLinkInfo); + codeBlock->linkIncomingCall(exec, callLinkInfo); } - LLINT_CALL_RETURN(execCallee, codePtr.executableAddress()); + LLINT_CALL_RETURN(exec, execCallee, codePtr.executableAddress()); } inline SlowPathReturnType genericCall(ExecState* exec, Instruction* pc, CodeSpecializationKind kind) @@ -1110,19 +1250,22 @@ LLINT_SLOW_PATH_DECL(slow_path_construct) return genericCall(exec, pc, CodeForConstruct); } -LLINT_SLOW_PATH_DECL(slow_path_size_and_alloc_frame_for_varargs) +LLINT_SLOW_PATH_DECL(slow_path_size_frame_for_varargs) { LLINT_BEGIN(); // This needs to: // - Set up a call frame while respecting the variable arguments. - ExecState* execCallee = sizeAndAllocFrameForVarargs(exec, &vm.interpreter->stack(), - LLINT_OP_C(4).jsValue(), pc[5].u.operand); - LLINT_CALL_CHECK_EXCEPTION(exec); + unsigned numUsedStackSlots = -pc[5].u.operand; + unsigned length = sizeFrameForVarargs(exec, &vm.interpreter->stack(), + LLINT_OP_C(4).jsValue(), numUsedStackSlots, pc[6].u.operand); + LLINT_CALL_CHECK_EXCEPTION(exec, exec); + ExecState* execCallee = calleeFrameForVarargs(exec, numUsedStackSlots, length + 1); + vm.varargsLength = length; vm.newCallFrameReturnValue = execCallee; - LLINT_END(); + LLINT_RETURN_CALLEE_FRAME(execCallee); } LLINT_SLOW_PATH_DECL(slow_path_call_varargs) @@ -1136,8 +1279,8 @@ LLINT_SLOW_PATH_DECL(slow_path_call_varargs) ExecState* execCallee = vm.newCallFrameReturnValue; - loadVarargs(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue()); - LLINT_CALL_CHECK_EXCEPTION(exec); + setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength); + LLINT_CALL_CHECK_EXCEPTION(exec, exec); execCallee->uncheckedR(JSStack::Callee) = calleeAsValue; execCallee->setCallerFrame(exec); @@ -1145,7 +1288,28 @@ LLINT_SLOW_PATH_DECL(slow_path_call_varargs) return setUpCall(execCallee, pc, CodeForCall, calleeAsValue); } - + +LLINT_SLOW_PATH_DECL(slow_path_construct_varargs) +{ + LLINT_BEGIN_NO_SET_PC(); + // This needs to: + // - Figure out what to call and compile it if necessary. + // - Return a tuple of machine code address to call and the new call frame. + + JSValue calleeAsValue = LLINT_OP_C(2).jsValue(); + + ExecState* execCallee = vm.newCallFrameReturnValue; + + setupVarargsFrameAndSetThis(exec, execCallee, LLINT_OP_C(3).jsValue(), LLINT_OP_C(4).jsValue(), pc[6].u.operand, vm.varargsLength); + LLINT_CALL_CHECK_EXCEPTION(exec, exec); + + execCallee->uncheckedR(JSStack::Callee) = calleeAsValue; + execCallee->setCallerFrame(exec); + exec->setCurrentVPC(pc); + + return setUpCall(execCallee, pc, CodeForConstruct, calleeAsValue); +} + LLINT_SLOW_PATH_DECL(slow_path_call_eval) { LLINT_BEGIN_NO_SET_PC(); @@ -1156,7 +1320,6 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval) execCallee->setArgumentCountIncludingThis(pc[3].u.operand); execCallee->setCallerFrame(exec); execCallee->uncheckedR(JSStack::Callee) = calleeAsValue; - execCallee->setScope(exec->scope()); execCallee->setReturnPC(LLInt::getCodePtr(llint_generic_return_point)); execCallee->setCodeBlock(0); exec->setCurrentVPC(pc); @@ -1165,27 +1328,7 @@ LLINT_SLOW_PATH_DECL(slow_path_call_eval) return setUpCall(execCallee, pc, CodeForCall, calleeAsValue); vm.hostCallReturnValue = eval(execCallee); - LLINT_CALL_RETURN(execCallee, LLInt::getCodePtr(getHostCallReturnValue)); -} - -LLINT_SLOW_PATH_DECL(slow_path_tear_off_activation) -{ - LLINT_BEGIN(); - ASSERT(exec->codeBlock()->needsFullScopeChain()); - jsCast<JSActivation*>(LLINT_OP(1).jsValue())->tearOff(vm); - LLINT_END(); -} - -LLINT_SLOW_PATH_DECL(slow_path_tear_off_arguments) -{ - LLINT_BEGIN(); - ASSERT(exec->codeBlock()->usesArguments()); - Arguments* arguments = jsCast<Arguments*>(exec->uncheckedR(unmodifiedArgumentsRegister(VirtualRegister(pc[1].u.operand)).offset()).jsValue()); - if (JSValue activationValue = LLINT_OP_C(2).jsValue()) - arguments->didTearOffActivation(exec, jsCast<JSActivation*>(activationValue)); - else - arguments->tearOff(exec); - LLINT_END(); + LLINT_CALL_RETURN(exec, execCallee, LLInt::getCodePtr(getHostCallReturnValue)); } LLINT_SLOW_PATH_DECL(slow_path_strcat) @@ -1200,70 +1343,6 @@ LLINT_SLOW_PATH_DECL(slow_path_to_primitive) LLINT_RETURN(LLINT_OP_C(2).jsValue().toPrimitive(exec)); } -LLINT_SLOW_PATH_DECL(slow_path_get_pnames) -{ - LLINT_BEGIN(); - JSValue v = LLINT_OP(2).jsValue(); - if (v.isUndefinedOrNull()) { - pc += pc[5].u.operand; - LLINT_END(); - } - - JSObject* o = v.toObject(exec); - Structure* structure = o->structure(); - JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache(); - if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(exec)) - jsPropertyNameIterator = JSPropertyNameIterator::create(exec, o); - - LLINT_OP(1) = JSValue(jsPropertyNameIterator); - LLINT_OP(2) = JSValue(o); - LLINT_OP(3) = Register::withInt(0); - LLINT_OP(4) = Register::withInt(jsPropertyNameIterator->size()); - - pc += OPCODE_LENGTH(op_get_pnames); - LLINT_END(); -} - -LLINT_SLOW_PATH_DECL(slow_path_next_pname) -{ - LLINT_BEGIN(); - JSObject* base = asObject(LLINT_OP(2).jsValue()); - JSString* property = asString(LLINT_OP(1).jsValue()); - if (base->hasProperty(exec, Identifier(exec, property->value(exec)))) { - // Go to target. - pc += pc[6].u.operand; - } // Else, don't change the PC, so the interpreter will reloop. - LLINT_END(); -} - -LLINT_SLOW_PATH_DECL(slow_path_push_with_scope) -{ - LLINT_BEGIN(); - JSValue v = LLINT_OP_C(1).jsValue(); - JSObject* o = v.toObject(exec); - LLINT_CHECK_EXCEPTION(); - - exec->setScope(JSWithScope::create(exec, o)); - - LLINT_END(); -} - -LLINT_SLOW_PATH_DECL(slow_path_pop_scope) -{ - LLINT_BEGIN(); - exec->setScope(exec->scope()->next()); - LLINT_END(); -} - -LLINT_SLOW_PATH_DECL(slow_path_push_name_scope) -{ - LLINT_BEGIN(); - CodeBlock* codeBlock = exec->codeBlock(); - JSNameScope* scope = JSNameScope::create(exec, codeBlock->identifier(pc[1].u.operand), LLINT_OP(2).jsValue(), pc[3].u.operand); - exec->setScope(scope); - LLINT_END(); -} - LLINT_SLOW_PATH_DECL(slow_path_throw) { LLINT_BEGIN(); @@ -1273,16 +1352,20 @@ LLINT_SLOW_PATH_DECL(slow_path_throw) LLINT_SLOW_PATH_DECL(slow_path_throw_static_error) { LLINT_BEGIN(); + JSValue errorMessageValue = LLINT_OP_C(1).jsValue(); + RELEASE_ASSERT(errorMessageValue.isString()); + String errorMessage = asString(errorMessageValue)->value(exec); if (pc[2].u.operand) - LLINT_THROW(createReferenceError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec))); + LLINT_THROW(createReferenceError(exec, errorMessage)); else - LLINT_THROW(createTypeError(exec, errorDescriptionForValue(exec, LLINT_OP_C(1).jsValue())->value(exec))); + LLINT_THROW(createTypeError(exec, errorMessage)); } LLINT_SLOW_PATH_DECL(slow_path_handle_watchdog_timer) { LLINT_BEGIN_NO_SET_PC(); - if (UNLIKELY(vm.watchdog.didFire(exec))) + ASSERT(vm.watchdog()); + if (UNLIKELY(vm.shouldTriggerTermination(exec))) LLINT_THROW(createTerminatedExecutionException(&vm)); LLINT_RETURN_TWO(0, exec); } @@ -1312,87 +1395,128 @@ LLINT_SLOW_PATH_DECL(slow_path_profile_did_call) LLINT_END(); } -LLINT_SLOW_PATH_DECL(throw_from_native_call) -{ - LLINT_BEGIN(); - ASSERT(vm.exception()); - LLINT_END(); -} - LLINT_SLOW_PATH_DECL(slow_path_handle_exception) { LLINT_BEGIN_NO_SET_PC(); - ASSERT(vm.exception()); - genericUnwind(&vm, exec, vm.exception()); + genericUnwind(&vm, exec); LLINT_END_IMPL(); } -LLINT_SLOW_PATH_DECL(slow_path_resolve_scope) -{ - LLINT_BEGIN(); - const Identifier& ident = exec->codeBlock()->identifier(pc[2].u.operand); - LLINT_RETURN(JSScope::resolve(exec, exec->scope(), ident)); -} - LLINT_SLOW_PATH_DECL(slow_path_get_from_scope) { LLINT_BEGIN(); + const Identifier& ident = exec->codeBlock()->identifier(pc[3].u.operand); JSObject* scope = jsCast<JSObject*>(LLINT_OP(2).jsValue()); - ResolveModeAndType modeAndType(pc[4].u.operand); + GetPutInfo getPutInfo(pc[4].u.operand); - PropertySlot slot(scope); + // ModuleVar is always converted to ClosureVar for get_from_scope. + ASSERT(getPutInfo.resolveType() != ModuleVar); + + PropertySlot slot(scope, PropertySlot::InternalMethodType::Get); if (!scope->getPropertySlot(exec, ident, slot)) { - if (modeAndType.mode() == ThrowIfNotFound) + if (getPutInfo.resolveMode() == ThrowIfNotFound) LLINT_RETURN(exec->vm().throwException(exec, createUndefinedVariableError(exec, ident))); LLINT_RETURN(jsUndefined()); } - // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time. - if (slot.isCacheableValue() && slot.slotBase() == scope && scope->structure()->propertyAccessesAreCacheable()) { - if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) { - CodeBlock* codeBlock = exec->codeBlock(); - ConcurrentJITLocker locker(codeBlock->m_lock); - pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure()); - pc[6].u.operand = slot.cachedOffset(); - } + JSValue result = JSValue(); + if (jsDynamicCast<JSGlobalLexicalEnvironment*>(scope)) { + // When we can't statically prove we need a TDZ check, we must perform the check on the slow path. + result = slot.getValue(exec, ident); + if (result == jsTDZValue()) + LLINT_THROW(createTDZError(exec)); } - LLINT_RETURN(slot.getValue(exec, ident)); + CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident); + + if (!result) + result = slot.getValue(exec, ident); + LLINT_RETURN(result); } LLINT_SLOW_PATH_DECL(slow_path_put_to_scope) { LLINT_BEGIN(); + CodeBlock* codeBlock = exec->codeBlock(); const Identifier& ident = codeBlock->identifier(pc[2].u.operand); JSObject* scope = jsCast<JSObject*>(LLINT_OP(1).jsValue()); JSValue value = LLINT_OP_C(3).jsValue(); - ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand); + GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand); + if (getPutInfo.resolveType() == LocalClosureVar) { + JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope); + environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value); + + // Have to do this *after* the write, because if this puts the set into IsWatched, then we need + // to have already changed the value of the variable. Otherwise we might watch and constant-fold + // to the Undefined value from before the assignment. + if (WatchpointSet* set = pc[5].u.watchpointSet) + set->touch("Executed op_put_scope<LocalClosureVar>"); + LLINT_END(); + } - if (modeAndType.mode() == ThrowIfNotFound && !scope->hasProperty(exec, ident)) + bool hasProperty = scope->hasProperty(exec, ident); + if (hasProperty + && jsDynamicCast<JSGlobalLexicalEnvironment*>(scope) + && getPutInfo.initializationMode() != Initialization) { + // When we can't statically prove we need a TDZ check, we must perform the check on the slow path. + PropertySlot slot(scope, PropertySlot::InternalMethodType::Get); + JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot); + if (slot.getValue(exec, ident) == jsTDZValue()) + LLINT_THROW(createTDZError(exec)); + } + + if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) LLINT_THROW(createUndefinedVariableError(exec, ident)); - PutPropertySlot slot(scope, codeBlock->isStrictMode()); + PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, getPutInfo.initializationMode() == Initialization); scope->methodTable()->put(scope, exec, ident, value, slot); - - // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time. - if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) { - if (slot.isCacheable() && slot.base() == scope && scope->structure()->propertyAccessesAreCacheable()) { - ConcurrentJITLocker locker(codeBlock->m_lock); - pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure()); - pc[6].u.operand = slot.cachedOffset(); - } - } + + CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident); LLINT_END(); } -extern "C" void llint_write_barrier_slow(ExecState*, JSCell* cell) +LLINT_SLOW_PATH_DECL(slow_path_check_if_exception_is_uncatchable_and_notify_profiler) +{ + LLINT_BEGIN(); + RELEASE_ASSERT(!!vm.exception()); + + if (LegacyProfiler* profiler = vm.enabledProfiler()) + profiler->exceptionUnwind(exec); + + if (isTerminatedExecutionException(vm.exception())) + LLINT_RETURN_TWO(pc, bitwise_cast<void*>(static_cast<uintptr_t>(1))); + LLINT_RETURN_TWO(pc, 0); +} + +extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM* vm, ProtoCallFrame* protoFrame) { - Heap::writeBarrier(cell); + ExecState* exec = vm->topCallFrame; + if (!exec) + exec = protoFrame->callee()->globalObject()->globalExec(); + throwStackOverflowError(exec); + return encodeResult(0, 0); } -} } // namespace JSC::LLInt +#if !ENABLE(JIT) +extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM* vm, Register* newTopOfStack) +{ + bool success = vm->interpreter->stack().ensureCapacityFor(newTopOfStack); + return encodeResult(reinterpret_cast<void*>(success), 0); +} +#endif -#endif // ENABLE(LLINT) +extern "C" void llint_write_barrier_slow(ExecState* exec, JSCell* cell) +{ + VM& vm = exec->vm(); + vm.heap.writeBarrier(cell); +} + +extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash() +{ + CRASH(); +} + +} } // namespace JSC::LLInt diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.h b/Source/JavaScriptCore/llint/LLIntSlowPaths.h index 8d60afa24..0d5c8da43 100644 --- a/Source/JavaScriptCore/llint/LLIntSlowPaths.h +++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,21 +27,19 @@ #define LLIntSlowPaths_h #include "CommonSlowPaths.h" -#include <wtf/Platform.h> #include <wtf/StdLibExtras.h> -#if ENABLE(LLINT) - namespace JSC { class ExecState; struct Instruction; +struct ProtoCallFrame; namespace LLInt { extern "C" SlowPathReturnType llint_trace_operand(ExecState*, Instruction*, int fromWhere, int operand); extern "C" SlowPathReturnType llint_trace_value(ExecState*, Instruction*, int fromWhere, int operand); -extern "C" void llint_write_barrier_slow(ExecState*, JSCell*); +extern "C" void llint_write_barrier_slow(ExecState*, JSCell*) WTF_INTERNAL; #define LLINT_SLOW_PATH_DECL(name) \ extern "C" SlowPathReturnType llint_##name(ExecState* exec, Instruction* pc) @@ -64,26 +62,28 @@ LLINT_SLOW_PATH_HIDDEN_DECL(entry_osr_function_for_construct_arityCheck); LLINT_SLOW_PATH_HIDDEN_DECL(loop_osr); LLINT_SLOW_PATH_HIDDEN_DECL(replace); LLINT_SLOW_PATH_HIDDEN_DECL(stack_check); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_create_activation); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_object); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_with_size); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_array_buffer); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_regexp); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_check_has_instance); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_instanceof_custom); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_id); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_arguments_length); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_id); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_id); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_val); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_argument_by_val); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_by_pname); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_val_direct); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_del_by_val); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_by_index); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_by_id); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_setter_by_id); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_setter_by_id); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_getter_by_val); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_setter_by_val); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jtrue); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jfalse); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_jless); @@ -99,35 +99,35 @@ LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_char); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_switch_string); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_func_exp); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_generator_func); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_generator_func_exp); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_new_arrow_func_exp); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_and_alloc_frame_for_varargs); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_size_frame_for_varargs); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_varargs); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_construct_varargs); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_call_eval); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_activation); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_tear_off_arguments); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_strcat); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_to_primitive); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_pnames); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_next_pname); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_with_scope); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_pop_scope); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_push_name_scope); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_throw_static_error); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_watchdog_timer); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_debug); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_will_call); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_profile_did_call); -LLINT_SLOW_PATH_HIDDEN_DECL(throw_from_native_call); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_handle_exception); -LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_resolve_scope); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_get_from_scope); LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_put_to_scope); +LLINT_SLOW_PATH_HIDDEN_DECL(slow_path_check_if_exception_is_uncatchable_and_notify_profiler); +extern "C" SlowPathReturnType llint_throw_stack_overflow_error(VM*, ProtoCallFrame*) WTF_INTERNAL; +#if !ENABLE(JIT) +extern "C" SlowPathReturnType llint_stack_check_at_vm_entry(VM*, Register*) WTF_INTERNAL; +#endif +extern "C" NO_RETURN_DUE_TO_CRASH void llint_crash() WTF_INTERNAL; } } // namespace JSC::LLInt -#endif // ENABLE(LLINT) - #endif // LLIntSlowPaths_h diff --git a/Source/JavaScriptCore/llint/LLIntThunks.cpp b/Source/JavaScriptCore/llint/LLIntThunks.cpp index 9429e6cb5..af6884e5e 100644 --- a/Source/JavaScriptCore/llint/LLIntThunks.cpp +++ b/Source/JavaScriptCore/llint/LLIntThunks.cpp @@ -29,6 +29,7 @@ #include "CallData.h" #include "ExceptionHelpers.h" #include "Interpreter.h" +#include "JSCJSValueInlines.h" #include "JSInterfaceJIT.h" #include "JSObject.h" #include "JSStackInlines.h" @@ -36,12 +37,12 @@ #include "LinkBuffer.h" #include "LowLevelInterpreter.h" #include "ProtoCallFrame.h" +#include "StackAlignment.h" #include "VM.h" namespace JSC { #if ENABLE(JIT) -#if ENABLE(LLINT) namespace LLInt { @@ -53,100 +54,71 @@ static MacroAssemblerCodeRef generateThunkWithJumpTo(VM* vm, void (*target)(), c jit.move(JSInterfaceJIT::TrustedImmPtr(bitwise_cast<void*>(target)), JSInterfaceJIT::regT0); jit.jump(JSInterfaceJIT::regT0); - LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("LLInt %s prologue thunk", thunkKind)); } MacroAssemblerCodeRef functionForCallEntryThunkGenerator(VM* vm) { - return generateThunkWithJumpTo(vm, llint_function_for_call_prologue, "function for call"); + return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_prologue), "function for call"); } MacroAssemblerCodeRef functionForConstructEntryThunkGenerator(VM* vm) { - return generateThunkWithJumpTo(vm, llint_function_for_construct_prologue, "function for construct"); + return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_prologue), "function for construct"); } MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM* vm) { - return generateThunkWithJumpTo(vm, llint_function_for_call_arity_check, "function for call with arity check"); + return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_call_arity_check), "function for call with arity check"); } MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM* vm) { - return generateThunkWithJumpTo(vm, llint_function_for_construct_arity_check, "function for construct with arity check"); + return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_function_for_construct_arity_check), "function for construct with arity check"); } MacroAssemblerCodeRef evalEntryThunkGenerator(VM* vm) { - return generateThunkWithJumpTo(vm, llint_eval_prologue, "eval"); + return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_eval_prologue), "eval"); } MacroAssemblerCodeRef programEntryThunkGenerator(VM* vm) { - return generateThunkWithJumpTo(vm, llint_program_prologue, "program"); + return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_program_prologue), "program"); +} + +MacroAssemblerCodeRef moduleProgramEntryThunkGenerator(VM* vm) +{ + return generateThunkWithJumpTo(vm, LLInt::getCodeFunctionPtr(llint_module_program_prologue), "module_program"); } } // namespace LLInt -#endif // ENABLE(LLINT) #else // ENABLE(JIT) // Non-JIT (i.e. C Loop LLINT) case: -typedef JSValue (*ExecuteCode) (CallFrame*, void* executableAddress); - -template<ExecuteCode execute> -EncodedJSValue doCallToJavaScript(void* executableAddress, ProtoCallFrame* protoCallFrame) +EncodedJSValue vmEntryToJavaScript(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame) { - CodeBlock* codeBlock = protoCallFrame->codeBlock(); - JSScope* scope = protoCallFrame->scope(); - JSObject* callee = protoCallFrame->callee(); - int argCountIncludingThis = protoCallFrame->argumentCountIncludingThis(); - int argCount = protoCallFrame->argumentCount(); - JSValue thisValue = protoCallFrame->thisValue(); - JSStack& stack = scope->vm()->interpreter->stack(); - - CallFrame* newCallFrame = stack.pushFrame(codeBlock, scope, argCountIncludingThis, callee); - if (UNLIKELY(!newCallFrame)) { - JSGlobalObject* globalObject = scope->globalObject(); - ExecState* exec = globalObject->globalExec(); - return JSValue::encode(throwStackOverflowError(exec)); - } - - // Set the arguments for the callee: - newCallFrame->setThisValue(thisValue); - for (int i = 0; i < argCount; ++i) - newCallFrame->setArgument(i, protoCallFrame->argument(i)); - - JSValue result = execute(newCallFrame, executableAddress); - - stack.popFrame(newCallFrame); - + JSValue result = CLoop::execute(llint_vm_entry_to_javascript, executableAddress, vm, protoCallFrame); return JSValue::encode(result); } -static inline JSValue executeJS(CallFrame* newCallFrame, void* executableAddress) +EncodedJSValue vmEntryToNative(void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame) { - Opcode entryOpcode = *reinterpret_cast<Opcode*>(&executableAddress); - return CLoop::execute(newCallFrame, entryOpcode); -} - -EncodedJSValue callToJavaScript(void* executableAddress, ExecState**, ProtoCallFrame* protoCallFrame, Register*) -{ - return doCallToJavaScript<executeJS>(executableAddress, protoCallFrame); + JSValue result = CLoop::execute(llint_vm_entry_to_native, executableAddress, vm, protoCallFrame); + return JSValue::encode(result); } -static inline JSValue executeNative(CallFrame* newCallFrame, void* executableAddress) +extern "C" VMEntryRecord* vmEntryRecord(VMEntryFrame* entryFrame) { - NativeFunction function = reinterpret_cast<NativeFunction>(executableAddress); - return JSValue::decode(function(newCallFrame)); + // The C Loop doesn't have any callee save registers, so the VMEntryRecord is allocated at the base of the frame. + intptr_t stackAlignment = stackAlignmentBytes(); + intptr_t VMEntryTotalFrameSize = (sizeof(VMEntryRecord) + (stackAlignment - 1)) & ~(stackAlignment - 1); + return reinterpret_cast<VMEntryRecord*>(static_cast<char*>(entryFrame) - VMEntryTotalFrameSize); } -EncodedJSValue callToNativeFunction(void* executableAddress, ExecState**, ProtoCallFrame* protoCallFrame, Register*) -{ - return doCallToJavaScript<executeNative>(executableAddress, protoCallFrame); -} #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/llint/LLIntThunks.h b/Source/JavaScriptCore/llint/LLIntThunks.h index 8a894aa41..95b0f4484 100644 --- a/Source/JavaScriptCore/llint/LLIntThunks.h +++ b/Source/JavaScriptCore/llint/LLIntThunks.h @@ -26,25 +26,16 @@ #ifndef LLIntThunks_h #define LLIntThunks_h -#include <wtf/Platform.h> - -#if ENABLE(LLINT) - #include "MacroAssemblerCodeRef.h" namespace JSC { -class ExecState; -class Register; class VM; struct ProtoCallFrame; extern "C" { - EncodedJSValue callToJavaScript(void*, ExecState**, ProtoCallFrame*, Register*); - EncodedJSValue callToNativeFunction(void*, ExecState**, ProtoCallFrame*, Register*); -#if ENABLE(JIT) - void returnFromJavaScript(); -#endif + EncodedJSValue vmEntryToJavaScript(void*, VM*, ProtoCallFrame*); + EncodedJSValue vmEntryToNative(void*, VM*, ProtoCallFrame*); } namespace LLInt { @@ -55,9 +46,8 @@ MacroAssemblerCodeRef functionForCallArityCheckThunkGenerator(VM*); MacroAssemblerCodeRef functionForConstructArityCheckThunkGenerator(VM*); MacroAssemblerCodeRef evalEntryThunkGenerator(VM*); MacroAssemblerCodeRef programEntryThunkGenerator(VM*); +MacroAssemblerCodeRef moduleProgramEntryThunkGenerator(VM*); } } // namespace JSC::LLInt -#endif // ENABLE(LLINT) - #endif // LLIntThunks_h diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm index be84dee36..8e77c0e22 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm @@ -1,4 +1,4 @@ -# Copyright (C) 2011, 2012, 2013, 2014 Apple Inc. All rights reserved. +# Copyright (C) 2011-2015 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -21,56 +21,231 @@ # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF # THE POSSIBILITY OF SUCH DAMAGE. +# Crash course on the language that this is written in (which I just call +# "assembly" even though it's more than that): +# +# - Mostly gas-style operand ordering. The last operand tends to be the +# destination. So "a := b" is written as "mov b, a". But unlike gas, +# comparisons are in-order, so "if (a < b)" is written as +# "bilt a, b, ...". +# +# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer. +# For 32-bit, "i" and "p" are interchangeable except when an op supports one +# but not the other. +# +# - In general, valid operands for macro invocations and instructions are +# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses +# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels +# (eg "_foo" or ".foo"). Macro invocations can also take anonymous +# macros as operands. Instructions cannot take anonymous macros. +# +# - Labels must have names that begin with either "_" or ".". A "." label +# is local and gets renamed before code gen to minimize namespace +# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_" +# may or may not be removed during code gen depending on whether the asm +# conventions for C name mangling on the target platform mandate a "_" +# prefix. +# +# - A "macro" is a lambda expression, which may be either anonymous or +# named. But this has caveats. "macro" can take zero or more arguments, +# which may be macros or any valid operands, but it can only return +# code. But you can do Turing-complete things via continuation passing +# style: "macro foo (a, b) b(a, a) end foo(foo, foo)". Actually, don't do +# that, since you'll just crash the assembler. +# +# - An "if" is a conditional on settings. Any identifier supplied in the +# predicate of an "if" is assumed to be a #define that is available +# during code gen. So you can't use "if" for computation in a macro, but +# you can use it to select different pieces of code for different +# platforms. +# +# - Arguments to macros follow lexical scoping rather than dynamic scoping. +# Const's also follow lexical scoping and may override (hide) arguments +# or other consts. All variables (arguments and constants) can be bound +# to operands. Additionally, arguments (but not constants) can be bound +# to macros. + +# The following general-purpose registers are available: +# +# - cfr and sp hold the call frame and (native) stack pointer respectively. +# They are callee-save registers, and guaranteed to be distinct from all other +# registers on all architectures. +# +# - lr is defined on non-X86 architectures (ARM64, ARMv7, ARM, +# ARMv7_TRADITIONAL, MIPS, SH4 and CLOOP) and holds the return PC +# +# - pc holds the (native) program counter on 32-bits ARM architectures (ARM, +# ARMv7, ARMv7_TRADITIONAL) +# +# - t0, t1, t2, t3, t4 and optionally t5 are temporary registers that can get trashed on +# calls, and are pairwise distinct registers. t4 holds the JS program counter, so use +# with caution in opcodes (actually, don't use it in opcodes at all, except as PC). +# +# - r0 and r1 are the platform's customary return registers, and thus are +# two distinct registers +# +# - a0, a1, a2 and a3 are the platform's customary argument registers, and +# thus are pairwise distinct registers. Be mindful that: +# + On X86, there are no argument registers. a0 and a1 are edx and +# ecx following the fastcall convention, but you should still use the stack +# to pass your arguments. The cCall2 and cCall4 macros do this for you. +# + On X86_64_WIN, you should allocate space on the stack for the arguments, +# and the return convention is weird for > 8 bytes types. The only place we +# use > 8 bytes return values is on a cCall, and cCall2 and cCall4 handle +# this for you. +# +# - The only registers guaranteed to be caller-saved are r0, r1, a0, a1 and a2, and +# you should be mindful of that in functions that are called directly from C. +# If you need more registers, you should push and pop them like a good +# assembly citizen, because any other register will be callee-saved on X86. +# +# You can additionally assume: +# +# - a3, t2, t3, t4 and t5 are never return registers; t0, t1, a0, a1 and a2 +# can be return registers. +# +# - t4 and t5 are never argument registers, t3 can only be a3, t1 can only be +# a1; but t0 and t2 can be either a0 or a2. +# +# - On 64 bits, there are callee-save registers named csr0, csr1, ... csrN. +# The last three csr registers are used used to store the PC base and +# two special tag values. Don't use them for anything else. +# +# Additional platform-specific details (you shouldn't rely on this remaining +# true): +# +# - For consistency with the baseline JIT, t0 is always r0 (and t1 is always +# r1 on 32 bits platforms). You should use the r version when you need return +# registers, and the t version otherwise: code using t0 (or t1) should still +# work if swapped with e.g. t3, while code using r0 (or r1) should not. There +# *may* be legacy code relying on this. +# +# - On all platforms other than X86, t0 can only be a0 and t2 can only be a2. +# +# - On all platforms other than X86 and X86_64, a2 is not a return register. +# a2 is r0 on X86 (because we have so few registers) and r1 on X86_64 (because +# the ABI enforces it). +# +# The following floating-point registers are available: +# +# - ft0-ft5 are temporary floating-point registers that get trashed on calls, +# and are pairwise distinct. +# +# - fa0 and fa1 are the platform's customary floating-point argument +# registers, and are both distinct. On 64-bits platforms, fa2 and fa3 are +# additional floating-point argument registers. +# +# - fr is the platform's customary floating-point return register +# +# You can assume that ft1-ft5 or fa1-fa3 are never fr, and that ftX is never +# faY if X != Y. + # First come the common protocols that both interpreters use. Note that each # of these must have an ASSERT() in LLIntData.cpp -# Work-around for the fact that the toolchain's awareness of armv7s results in -# a separate slab in the fat binary, yet the offlineasm doesn't know to expect -# it. +# Work-around for the fact that the toolchain's awareness of armv7k / armv7s +# results in a separate slab in the fat binary, yet the offlineasm doesn't know +# to expect it. +if ARMv7k +end if ARMv7s end # These declarations must match interpreter/JSStack.h. if JSVALUE64 -const PtrSize = 8 -const CallFrameHeaderSlots = 6 + const PtrSize = 8 + const CallFrameHeaderSlots = 5 else -const PtrSize = 4 -const CallFrameHeaderSlots = 5 + const PtrSize = 4 + const CallFrameHeaderSlots = 4 + const CallFrameAlignSlots = 1 end const SlotSize = 8 +const JSEnvironmentRecord_variables = (sizeof JSEnvironmentRecord + SlotSize - 1) & ~(SlotSize - 1) +const DirectArguments_storage = (sizeof DirectArguments + SlotSize - 1) & ~(SlotSize - 1) + +const StackAlignment = 16 +const StackAlignmentSlots = 2 +const StackAlignmentMask = StackAlignment - 1 + +const CallerFrameAndPCSize = 2 * PtrSize + const CallerFrame = 0 const ReturnPC = CallerFrame + PtrSize const CodeBlock = ReturnPC + PtrSize -const ScopeChain = CodeBlock + SlotSize -const Callee = ScopeChain + SlotSize +const Callee = CodeBlock + SlotSize const ArgumentCount = Callee + SlotSize const ThisArgumentOffset = ArgumentCount + SlotSize +const FirstArgumentOffset = ThisArgumentOffset + SlotSize const CallFrameHeaderSize = ThisArgumentOffset # Some value representation constants. if JSVALUE64 -const TagBitTypeOther = 0x2 -const TagBitBool = 0x4 -const TagBitUndefined = 0x8 -const ValueEmpty = 0x0 -const ValueFalse = TagBitTypeOther | TagBitBool -const ValueTrue = TagBitTypeOther | TagBitBool | 1 -const ValueUndefined = TagBitTypeOther | TagBitUndefined -const ValueNull = TagBitTypeOther + const TagBitTypeOther = 0x2 + const TagBitBool = 0x4 + const TagBitUndefined = 0x8 + const ValueEmpty = 0x0 + const ValueFalse = TagBitTypeOther | TagBitBool + const ValueTrue = TagBitTypeOther | TagBitBool | 1 + const ValueUndefined = TagBitTypeOther | TagBitUndefined + const ValueNull = TagBitTypeOther + const TagTypeNumber = 0xffff000000000000 + const TagMask = TagTypeNumber | TagBitTypeOther +else + const Int32Tag = -1 + const BooleanTag = -2 + const NullTag = -3 + const UndefinedTag = -4 + const CellTag = -5 + const EmptyValueTag = -6 + const DeletedValueTag = -7 + const LowestTag = DeletedValueTag +end + +# NOTE: The values below must be in sync with what is in PutByIdFlags.h. +const PutByIdPrimaryTypeMask = 0x6 +const PutByIdPrimaryTypeSecondary = 0x0 +const PutByIdPrimaryTypeObjectWithStructure = 0x2 +const PutByIdPrimaryTypeObjectWithStructureOrOther = 0x4 +const PutByIdSecondaryTypeMask = -0x8 +const PutByIdSecondaryTypeBottom = 0x0 +const PutByIdSecondaryTypeBoolean = 0x8 +const PutByIdSecondaryTypeOther = 0x10 +const PutByIdSecondaryTypeInt32 = 0x18 +const PutByIdSecondaryTypeNumber = 0x20 +const PutByIdSecondaryTypeString = 0x28 +const PutByIdSecondaryTypeSymbol = 0x30 +const PutByIdSecondaryTypeObject = 0x38 +const PutByIdSecondaryTypeObjectOrOther = 0x40 +const PutByIdSecondaryTypeTop = 0x48 + +const CopyBarrierSpaceBits = 3 + +const CallOpCodeSize = 9 + +if X86_64 or ARM64 or C_LOOP + const maxFrameExtentForSlowPathCall = 0 +elsif ARM or ARMv7_TRADITIONAL or ARMv7 or SH4 + const maxFrameExtentForSlowPathCall = 24 +elsif X86 or X86_WIN + const maxFrameExtentForSlowPathCall = 40 +elsif MIPS + const maxFrameExtentForSlowPathCall = 40 +elsif X86_64_WIN + const maxFrameExtentForSlowPathCall = 64 +end + +if X86_64 or X86_64_WIN or ARM64 + const CalleeSaveSpaceAsVirtualRegisters = 3 else -const Int32Tag = -1 -const BooleanTag = -2 -const NullTag = -3 -const UndefinedTag = -4 -const CellTag = -5 -const EmptyValueTag = -6 -const DeletedValueTag = -7 -const LowestTag = DeletedValueTag + const CalleeSaveSpaceAsVirtualRegisters = 0 end +const CalleeSaveSpaceStackAligned = (CalleeSaveSpaceAsVirtualRegisters * SlotSize + StackAlignment - 1) & ~StackAlignmentMask + + # Watchpoint states const ClearWatchpoint = 0 const IsWatched = 1 @@ -80,16 +255,28 @@ const IsInvalidated = 2 if JSVALUE64 # - Use a pair of registers to represent the PC: one register for the # base of the bytecodes, and one register for the index. - # - The PC base (or PB for short) should be stored in the csr. It will - # get clobbered on calls to other JS code, but will get saved on calls - # to C functions. + # - The PC base (or PB for short) must be stored in a callee-save register. # - C calls are still given the Instruction* rather than the PC index. # This requires an add before the call, and a sub after. - const PC = t4 - const PB = t6 - const tagTypeNumber = csr1 - const tagMask = csr2 - + const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h + if ARM64 + const PB = csr7 + const tagTypeNumber = csr8 + const tagMask = csr9 + elsif X86_64 + const PB = csr2 + const tagTypeNumber = csr3 + const tagMask = csr4 + elsif X86_64_WIN + const PB = csr4 + const tagTypeNumber = csr5 + const tagMask = csr6 + elsif C_LOOP + const PB = csr0 + const tagTypeNumber = csr1 + const tagMask = csr2 + end + macro loadisFromInstruction(offset, dest) loadis offset * 8[PB, PC, 8], dest end @@ -103,7 +290,7 @@ if JSVALUE64 end else - const PC = t4 + const PC = t4 # When changing this, make sure LLIntPC is up to date in LLIntPCRanges.h macro loadisFromInstruction(offset, dest) loadis offset * 4[PC], dest end @@ -113,6 +300,12 @@ else end end +if X86_64_WIN + const extraTempReg = t0 +else + const extraTempReg = t5 +end + # Constants for reasoning about value representation. if BIG_ENDIAN const TagOffset = 0 @@ -133,14 +326,14 @@ const ArrayStorageShape = 28 const SlowPutArrayStorageShape = 30 # Type constants. -const StringType = 5 -const ObjectType = 17 -const FinalObjectType = 18 +const StringType = 6 +const SymbolType = 7 +const ObjectType = 21 +const FinalObjectType = 22 # Type flags constants. const MasqueradesAsUndefined = 1 -const ImplementsHasInstance = 2 -const ImplementsDefaultHasInstance = 8 +const ImplementsDefaultHasInstance = 2 # Bytecode operand constants. const FirstConstantRegisterIndex = 0x40000000 @@ -149,12 +342,13 @@ const FirstConstantRegisterIndex = 0x40000000 const GlobalCode = 0 const EvalCode = 1 const FunctionCode = 2 +const ModuleCode = 3 # The interpreter steals the tag word of the argument count. const LLIntReturnPC = ArgumentCount + TagOffset # String flags. -const HashFlags8BitBuffer = 32 +const HashFlags8BitBuffer = 8 # Copied from PropertyOffset.h const firstOutOfLineOffset = 100 @@ -162,19 +356,22 @@ const firstOutOfLineOffset = 100 # ResolveType const GlobalProperty = 0 const GlobalVar = 1 -const ClosureVar = 2 -const GlobalPropertyWithVarInjectionChecks = 3 -const GlobalVarWithVarInjectionChecks = 4 -const ClosureVarWithVarInjectionChecks = 5 -const Dynamic = 6 - -const ResolveModeMask = 0xffff - -const MarkedBlockSize = 64 * 1024 +const GlobalLexicalVar = 2 +const ClosureVar = 3 +const LocalClosureVar = 4 +const ModuleVar = 5 +const GlobalPropertyWithVarInjectionChecks = 6 +const GlobalVarWithVarInjectionChecks = 7 +const GlobalLexicalVarWithVarInjectionChecks = 8 +const ClosureVarWithVarInjectionChecks = 9 + +const ResolveTypeMask = 0x3ff +const InitializationModeMask = 0xffc00 +const InitializationModeShift = 10 +const Initialization = 0 + +const MarkedBlockSize = 16 * 1024 const MarkedBlockMask = ~(MarkedBlockSize - 1) -# Constants for checking mark bits. -const AtomNumberShift = 3 -const BitMapWordShift = 4 # Allocation constants if JSVALUE64 @@ -196,9 +393,7 @@ macro crash() if C_LOOP cloopCrash else - storei t0, 0xbbadbeef[] - move 0, t0 - call t0 + call _llint_crash end end @@ -210,25 +405,306 @@ macro assert(assertion) end end +macro checkStackPointerAlignment(tempReg, location) + if ARM64 or C_LOOP or SH4 + # ARM64 will check for us! + # C_LOOP does not need the alignment, and can use a little perf + # improvement from avoiding useless work. + # SH4 does not need specific alignment (4 bytes). + else + if ARM or ARMv7 or ARMv7_TRADITIONAL + # ARM can't do logical ops with the sp as a source + move sp, tempReg + andp StackAlignmentMask, tempReg + else + andp sp, StackAlignmentMask, tempReg + end + btpz tempReg, .stackPointerOkay + move location, tempReg + break + .stackPointerOkay: + end +end + +if C_LOOP or ARM64 or X86_64 or X86_64_WIN + const CalleeSaveRegisterCount = 0 +elsif ARM or ARMv7_TRADITIONAL or ARMv7 + const CalleeSaveRegisterCount = 7 +elsif SH4 + const CalleeSaveRegisterCount = 5 +elsif MIPS + const CalleeSaveRegisterCount = 1 +elsif X86 or X86_WIN + const CalleeSaveRegisterCount = 3 +end + +const CalleeRegisterSaveSize = CalleeSaveRegisterCount * PtrSize + +# VMEntryTotalFrameSize includes the space for struct VMEntryRecord and the +# callee save registers rounded up to keep the stack aligned +const VMEntryTotalFrameSize = (CalleeRegisterSaveSize + sizeof VMEntryRecord + StackAlignment - 1) & ~StackAlignmentMask + +macro pushCalleeSaves() + if C_LOOP or ARM64 or X86_64 or X86_64_WIN + elsif ARM or ARMv7_TRADITIONAL + emit "push {r4-r10}" + elsif ARMv7 + emit "push {r4-r6, r8-r11}" + elsif MIPS + emit "addiu $sp, $sp, -4" + emit "sw $s4, 0($sp)" + # save $gp to $s4 so that we can restore it after a function call + emit "move $s4, $gp" + elsif SH4 + emit "mov.l r13, @-r15" + emit "mov.l r11, @-r15" + emit "mov.l r10, @-r15" + emit "mov.l r9, @-r15" + emit "mov.l r8, @-r15" + elsif X86 + emit "push %esi" + emit "push %edi" + emit "push %ebx" + elsif X86_WIN + emit "push esi" + emit "push edi" + emit "push ebx" + end +end + +macro popCalleeSaves() + if C_LOOP or ARM64 or X86_64 or X86_64_WIN + elsif ARM or ARMv7_TRADITIONAL + emit "pop {r4-r10}" + elsif ARMv7 + emit "pop {r4-r6, r8-r11}" + elsif MIPS + emit "lw $s4, 0($sp)" + emit "addiu $sp, $sp, 4" + elsif SH4 + emit "mov.l @r15+, r8" + emit "mov.l @r15+, r9" + emit "mov.l @r15+, r10" + emit "mov.l @r15+, r11" + emit "mov.l @r15+, r13" + elsif X86 + emit "pop %ebx" + emit "pop %edi" + emit "pop %esi" + elsif X86_WIN + emit "pop ebx" + emit "pop edi" + emit "pop esi" + end +end + +macro preserveCallerPCAndCFR() + if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 + push lr + push cfr + elsif X86 or X86_WIN or X86_64 or X86_64_WIN + push cfr + elsif ARM64 + push cfr, lr + else + error + end + move sp, cfr +end + +macro restoreCallerPCAndCFR() + move cfr, sp + if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 + pop cfr + pop lr + elsif X86 or X86_WIN or X86_64 or X86_64_WIN + pop cfr + elsif ARM64 + pop lr, cfr + end +end + +macro preserveCalleeSavesUsedByLLInt() + subp CalleeSaveSpaceStackAligned, sp + if C_LOOP + elsif ARM or ARMv7_TRADITIONAL + elsif ARMv7 + elsif ARM64 + emit "stp x27, x28, [x29, #-16]" + emit "stp xzr, x26, [x29, #-32]" + elsif MIPS + elsif SH4 + elsif X86 + elsif X86_WIN + elsif X86_64 + storep csr4, -8[cfr] + storep csr3, -16[cfr] + storep csr2, -24[cfr] + elsif X86_64_WIN + storep csr6, -8[cfr] + storep csr5, -16[cfr] + storep csr4, -24[cfr] + end +end + +macro restoreCalleeSavesUsedByLLInt() + if C_LOOP + elsif ARM or ARMv7_TRADITIONAL + elsif ARMv7 + elsif ARM64 + emit "ldp xzr, x26, [x29, #-32]" + emit "ldp x27, x28, [x29, #-16]" + elsif MIPS + elsif SH4 + elsif X86 + elsif X86_WIN + elsif X86_64 + loadp -24[cfr], csr2 + loadp -16[cfr], csr3 + loadp -8[cfr], csr4 + elsif X86_64_WIN + loadp -24[cfr], csr4 + loadp -16[cfr], csr5 + loadp -8[cfr], csr6 + end +end + +macro copyCalleeSavesToVMCalleeSavesBuffer(vm, temp) + if ARM64 or X86_64 or X86_64_WIN + leap VM::calleeSaveRegistersBuffer[vm], temp + if ARM64 + storep csr0, [temp] + storep csr1, 8[temp] + storep csr2, 16[temp] + storep csr3, 24[temp] + storep csr4, 32[temp] + storep csr5, 40[temp] + storep csr6, 48[temp] + storep csr7, 56[temp] + storep csr8, 64[temp] + storep csr9, 72[temp] + stored csfr0, 80[temp] + stored csfr1, 88[temp] + stored csfr2, 96[temp] + stored csfr3, 104[temp] + stored csfr4, 112[temp] + stored csfr5, 120[temp] + stored csfr6, 128[temp] + stored csfr7, 136[temp] + elsif X86_64 + storep csr0, [temp] + storep csr1, 8[temp] + storep csr2, 16[temp] + storep csr3, 24[temp] + storep csr4, 32[temp] + elsif X86_64_WIN + storep csr0, [temp] + storep csr1, 8[temp] + storep csr2, 16[temp] + storep csr3, 24[temp] + storep csr4, 32[temp] + storep csr5, 40[temp] + storep csr6, 48[temp] + end + end +end + +macro restoreCalleeSavesFromVMCalleeSavesBuffer(vm, temp) + if ARM64 or X86_64 or X86_64_WIN + leap VM::calleeSaveRegistersBuffer[vm], temp + if ARM64 + loadp [temp], csr0 + loadp 8[temp], csr1 + loadp 16[temp], csr2 + loadp 24[temp], csr3 + loadp 32[temp], csr4 + loadp 40[temp], csr5 + loadp 48[temp], csr6 + loadp 56[temp], csr7 + loadp 64[temp], csr8 + loadp 72[temp], csr9 + loadd 80[temp], csfr0 + loadd 88[temp], csfr1 + loadd 96[temp], csfr2 + loadd 104[temp], csfr3 + loadd 112[temp], csfr4 + loadd 120[temp], csfr5 + loadd 128[temp], csfr6 + loadd 136[temp], csfr7 + elsif X86_64 + loadp [temp], csr0 + loadp 8[temp], csr1 + loadp 16[temp], csr2 + loadp 24[temp], csr3 + loadp 32[temp], csr4 + elsif X86_64_WIN + loadp [temp], csr0 + loadp 8[temp], csr1 + loadp 16[temp], csr2 + loadp 24[temp], csr3 + loadp 32[temp], csr4 + loadp 40[temp], csr5 + loadp 48[temp], csr6 + end + end +end + macro preserveReturnAddressAfterCall(destinationRegister) if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4 # In C_LOOP case, we're only preserving the bytecode vPC. move lr, destinationRegister - elsif X86 or X86_64 + elsif X86 or X86_WIN or X86_64 or X86_64_WIN pop destinationRegister else error end end -macro restoreReturnAddressBeforeReturn(sourceRegister) - if C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or ARM64 or MIPS or SH4 - # In C_LOOP case, we're only restoring the bytecode vPC. - move sourceRegister, lr - elsif X86 or X86_64 - push sourceRegister +macro copyBarrier(value, slow) + btpnz value, CopyBarrierSpaceBits, slow +end + +macro functionPrologue() + if X86 or X86_WIN or X86_64 or X86_64_WIN + push cfr + elsif ARM64 + push cfr, lr + elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 + push lr + push cfr + end + move sp, cfr +end + +macro functionEpilogue() + if X86 or X86_WIN or X86_64 or X86_64_WIN + pop cfr + elsif ARM64 + pop lr, cfr + elsif C_LOOP or ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 + pop cfr + pop lr + end +end + +macro vmEntryRecord(entryFramePointer, resultReg) + subp entryFramePointer, VMEntryTotalFrameSize, resultReg +end + +macro getFrameRegisterSizeForCodeBlock(codeBlock, size) + loadi CodeBlock::m_numCalleeLocals[codeBlock], size + lshiftp 3, size + addp maxFrameExtentForSlowPathCall, size +end + +macro restoreStackPointerAfterCall() + loadp CodeBlock[cfr], t2 + getFrameRegisterSizeForCodeBlock(t2, t2) + if ARMv7 + subp cfr, t2, t2 + move t2, sp else - error + subp cfr, t2, sp end end @@ -238,50 +714,102 @@ macro traceExecution() end end -macro callTargetFunction(callLinkInfo) +macro callTargetFunction(callee) if C_LOOP - cloopCallJSFunction LLIntCallLinkInfo::machineCodeTarget[callLinkInfo] + cloopCallJSFunction callee else - call LLIntCallLinkInfo::machineCodeTarget[callLinkInfo] - dispatchAfterCall() + call callee end + restoreStackPointerAfterCall() + dispatchAfterCall() end -macro slowPathForCall(slowPath) +macro prepareForRegularCall(callee, temp1, temp2, temp3) + addp CallerFrameAndPCSize, sp +end + +# sp points to the new frame +macro prepareForTailCall(callee, temp1, temp2, temp3) + restoreCalleeSavesUsedByLLInt() + + loadi PayloadOffset + ArgumentCount[cfr], temp2 + loadp CodeBlock[cfr], temp1 + loadp CodeBlock::m_numParameters[temp1], temp1 + bilteq temp1, temp2, .noArityFixup + move temp1, temp2 + +.noArityFixup: + # We assume < 2^28 arguments + muli SlotSize, temp2 + addi StackAlignment - 1 + CallFrameHeaderSize, temp2 + andi ~StackAlignmentMask, temp2 + + move cfr, temp1 + addp temp2, temp1 + + loadi PayloadOffset + ArgumentCount[sp], temp2 + # We assume < 2^28 arguments + muli SlotSize, temp2 + addi StackAlignment - 1 + CallFrameHeaderSize, temp2 + andi ~StackAlignmentMask, temp2 + + if ARM or ARMv7_TRADITIONAL or ARMv7 or SH4 or ARM64 or C_LOOP or MIPS + addp 2 * PtrSize, sp + subi 2 * PtrSize, temp2 + loadp PtrSize[cfr], lr + else + addp PtrSize, sp + subi PtrSize, temp2 + loadp PtrSize[cfr], temp3 + storep temp3, [sp] + end + + subp temp2, temp1 + loadp [cfr], cfr + +.copyLoop: + subi PtrSize, temp2 + loadp [sp, temp2, 1], temp3 + storep temp3, [temp1, temp2, 1] + btinz temp2, .copyLoop + + move temp1, sp + jmp callee +end + +macro slowPathForCall(slowPath, prepareCall) callCallSlowPath( slowPath, - macro (callee) - if C_LOOP - cloopCallJSFunction callee - else - call callee - dispatchAfterCall() - end + # Those are r0 and r1 + macro (callee, calleeFramePtr) + btpz calleeFramePtr, .dontUpdateSP + move calleeFramePtr, sp + prepareCall(callee, t2, t3, t4) + .dontUpdateSP: + callTargetFunction(callee) end) end -macro arrayProfile(structureAndIndexingType, profile, scratch) - const structure = structureAndIndexingType - const indexingType = structureAndIndexingType - storep structure, ArrayProfile::m_lastSeenStructure[profile] - loadb Structure::m_indexingType[structure], indexingType +macro arrayProfile(cellAndIndexingType, profile, scratch) + const cell = cellAndIndexingType + const indexingType = cellAndIndexingType + loadi JSCell::m_structureID[cell], scratch + storei scratch, ArrayProfile::m_lastSeenStructureID[profile] + loadb JSCell::m_indexingType[cell], indexingType end -macro checkMarkByte(cell, scratch1, scratch2, continuation) - move cell, scratch1 - move cell, scratch2 - - andp MarkedBlockMask, scratch1 - andp ~MarkedBlockMask, scratch2 - - rshiftp AtomNumberShift + BitMapWordShift, scratch2 - loadb MarkedBlock::m_marks[scratch1, scratch2, 1], scratch1 +macro skipIfIsRememberedOrInEden(cell, scratch1, scratch2, continuation) + loadb JSCell::m_cellState[cell], scratch1 continuation(scratch1) end +macro notifyWrite(set, slow) + bbneq WatchpointSet::m_state[set], IsInvalidated, slow +end + macro checkSwitchToJIT(increment, action) loadp CodeBlock[cfr], t0 - baddis increment, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t0], .continue + baddis increment, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t0], .continue action() .continue: end @@ -299,13 +827,21 @@ macro assertNotConstant(index) end macro functionForCallCodeBlockGetter(targetRegister) - loadp Callee[cfr], targetRegister + if JSVALUE64 + loadp Callee[cfr], targetRegister + else + loadp Callee + PayloadOffset[cfr], targetRegister + end loadp JSFunction::m_executable[targetRegister], targetRegister loadp FunctionExecutable::m_codeBlockForCall[targetRegister], targetRegister end macro functionForConstructCodeBlockGetter(targetRegister) - loadp Callee[cfr], targetRegister + if JSVALUE64 + loadp Callee[cfr], targetRegister + else + loadp Callee + PayloadOffset[cfr], targetRegister + end loadp JSFunction::m_executable[targetRegister], targetRegister loadp FunctionExecutable::m_codeBlockForConstruct[targetRegister], targetRegister end @@ -325,26 +861,51 @@ end # Do the bare minimum required to execute code. Sets up the PC, leave the CodeBlock* # in t1. May also trigger prologue entry OSR. macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath) - preserveReturnAddressAfterCall(t2) - # Set up the call frame and check if we should OSR. - storep t2, ReturnPC[cfr] + preserveCallerPCAndCFR() + if EXECUTION_TRACING + subp maxFrameExtentForSlowPathCall, sp callSlowPath(traceSlowPath) + addp maxFrameExtentForSlowPathCall, sp end codeBlockGetter(t1) - baddis 5, CodeBlock::m_llintExecuteCounter + ExecutionCounter::m_counter[t1], .continue - cCall2(osrSlowPath, cfr, PC) - move t1, cfr - btpz t0, .recover - loadp ReturnPC[cfr], t2 - restoreReturnAddressBeforeReturn(t2) - jmp t0 -.recover: - codeBlockGetter(t1) -.continue: + if not C_LOOP + baddis 5, CodeBlock::m_llintExecuteCounter + BaselineExecutionCounter::m_counter[t1], .continue + if JSVALUE64 + move cfr, a0 + move PC, a1 + cCall2(osrSlowPath) + else + # We are after the function prologue, but before we have set up sp from the CodeBlock. + # Temporarily align stack pointer for this call. + subp 8, sp + move cfr, a0 + move PC, a1 + cCall2(osrSlowPath) + addp 8, sp + end + btpz r0, .recover + move cfr, sp # restore the previous sp + # pop the callerFrame since we will jump to a function that wants to save it + if ARM64 + pop lr, cfr + elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 + pop cfr + pop lr + else + pop cfr + end + jmp r0 + .recover: + codeBlockGetter(t1) + .continue: + end + codeBlockSetter(t1) - + + preserveCalleeSavesUsedByLLInt() + # Set up the PC. if JSVALUE64 loadp CodeBlock::m_instructions[t1], PB @@ -352,6 +913,35 @@ macro prologue(codeBlockGetter, codeBlockSetter, osrSlowPath, traceSlowPath) else loadp CodeBlock::m_instructions[t1], PC end + + # Get new sp in t0 and check stack height. + getFrameRegisterSizeForCodeBlock(t1, t0) + subp cfr, t0, t0 + loadp CodeBlock::m_vm[t1], t2 + bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK + + # Stack height check failed - need to call a slow_path. + # Set up temporary stack pointer for call including callee saves + subp maxFrameExtentForSlowPathCall, sp + callSlowPath(_llint_stack_check) + bpeq r1, 0, .stackHeightOKGetCodeBlock + move r1, cfr + dispatch(0) # Go to exception handler in PC + +.stackHeightOKGetCodeBlock: + # Stack check slow path returned that the stack was ok. + # Since they were clobbered, need to get CodeBlock and new sp + codeBlockGetter(t1) + getFrameRegisterSizeForCodeBlock(t1, t0) + subp cfr, t0, t0 + +.stackHeightOK: + move t0, sp + + if JSVALUE64 + move TagTypeNumber, tagTypeNumber + addp TagBitTypeOther, tagTypeNumber, tagMask + end end # Expects that CodeBlock is in t1, which is what prologue() leaves behind. @@ -386,73 +976,187 @@ macro functionInitialization(profileArgSkip) end baddpnz -8, t0, .argumentProfileLoop .argumentProfileDone: - - # Check stack height. - loadi CodeBlock::m_numCalleeRegisters[t1], t0 - addi 1, t0 # Account that local0 goes at slot -1 - loadp CodeBlock::m_vm[t1], t2 - lshiftp 3, t0 - subp cfr, t0, t0 - bpbeq VM::m_jsStackLimit[t2], t0, .stackHeightOK - - # Stack height check failed - need to call a slow_path. - callSlowPath(_llint_stack_check) -.stackHeightOK: end macro allocateJSObject(allocator, structure, result, scratch1, slowCase) - if ALWAYS_ALLOCATE_SLOW - jmp slowCase - else - const offsetOfFirstFreeCell = - MarkedAllocator::m_freeList + - MarkedBlock::FreeList::head - - # Get the object from the free list. - loadp offsetOfFirstFreeCell[allocator], result - btpz result, slowCase - - # Remove the object from the free list. - loadp [result], scratch1 - storep scratch1, offsetOfFirstFreeCell[allocator] + const offsetOfFirstFreeCell = + MarkedAllocator::m_freeList + + MarkedBlock::FreeList::head + + # Get the object from the free list. + loadp offsetOfFirstFreeCell[allocator], result + btpz result, slowCase - # Initialize the object. - storep structure, JSCell::m_structure[result] - storep 0, JSObject::m_butterfly[result] - end + # Remove the object from the free list. + loadp [result], scratch1 + storep scratch1, offsetOfFirstFreeCell[allocator] + + # Initialize the object. + storep 0, JSObject::m_butterfly[result] + storeStructureWithTypeInfo(result, structure, scratch1) end macro doReturn() - loadp ReturnPC[cfr], t2 - loadp CallerFrame[cfr], cfr - restoreReturnAddressBeforeReturn(t2) + restoreCalleeSavesUsedByLLInt() + restoreCallerPCAndCFR() ret end +# stub to call into JavaScript or Native functions +# EncodedJSValue vmEntryToJavaScript(void* code, VM* vm, ProtoCallFrame* protoFrame) +# EncodedJSValue vmEntryToNativeFunction(void* code, VM* vm, ProtoCallFrame* protoFrame) + if C_LOOP + _llint_vm_entry_to_javascript: else -# stub to call into JavaScript or Native functions -# EncodedJSValue callToJavaScript(void* code, ExecState** vm, ProtoCallFrame* protoFrame, Register* topOfStack) -# EncodedJSValue callToNativeFunction(void* code, ExecState** vm, ProtoCallFrame* protoFrame, Register* topOfStack) -# Note, if these stubs or one of their related macros are changed, make the -# equivalent changes in jit/JITStubsX86.h and/or jit/JITStubsMSVC64.asm -_callToJavaScript: - doCallToJavaScript(makeJavaScriptCall, doReturnFromJavaScript) + global _vmEntryToJavaScript + _vmEntryToJavaScript: +end + doVMEntry(makeJavaScriptCall) -_callToNativeFunction: - doCallToJavaScript(makeHostFunctionCall, doReturnFromHostFunction) + +if C_LOOP + _llint_vm_entry_to_native: +else + global _vmEntryToNative + _vmEntryToNative: +end + doVMEntry(makeHostFunctionCall) + + +if not C_LOOP + # void sanitizeStackForVMImpl(VM* vm) + global _sanitizeStackForVMImpl + _sanitizeStackForVMImpl: + # We need three non-aliased caller-save registers. We are guaranteed + # this for a0, a1 and a2 on all architectures. + if X86 or X86_WIN + loadp 4[sp], a0 + end + const vm = a0 + const address = a1 + const zeroValue = a2 + + loadp VM::m_lastStackTop[vm], address + bpbeq sp, address, .zeroFillDone + + move 0, zeroValue + .zeroFillLoop: + storep zeroValue, [address] + addp PtrSize, address + bpa sp, address, .zeroFillLoop + + .zeroFillDone: + move sp, address + storep address, VM::m_lastStackTop[vm] + ret + + # VMEntryRecord* vmEntryRecord(const VMEntryFrame* entryFrame) + global _vmEntryRecord + _vmEntryRecord: + if X86 or X86_WIN + loadp 4[sp], a0 + end + + vmEntryRecord(a0, r0) + ret +end + +if C_LOOP + # Dummy entry point the C Loop uses to initialize. + _llint_entry: + crash() +else + macro initPCRelative(pcBase) + if X86_64 or X86_64_WIN or X86 or X86_WIN + call _relativePCBase + _relativePCBase: + pop pcBase + elsif ARM64 + elsif ARMv7 + _relativePCBase: + move pc, pcBase + subp 3, pcBase # Need to back up the PC and set the Thumb2 bit + elsif ARM or ARMv7_TRADITIONAL + _relativePCBase: + move pc, pcBase + subp 8, pcBase + elsif MIPS + la _relativePCBase, pcBase + setcallreg pcBase # needed to set $t9 to the right value for the .cpload created by the label. + _relativePCBase: + elsif SH4 + mova _relativePCBase, t0 + move t0, pcBase + alignformova + _relativePCBase: + end +end + +# The PC base is in t1, as this is what _llint_entry leaves behind through +# initPCRelative(t1) +macro setEntryAddress(index, label) + if X86_64 or X86_64_WIN + leap (label - _relativePCBase)[t1], t3 + move index, t4 + storep t3, [a0, t4, 8] + elsif X86 or X86_WIN + leap (label - _relativePCBase)[t1], t3 + move index, t4 + storep t3, [a0, t4, 4] + elsif ARM64 + pcrtoaddr label, t1 + move index, t4 + storep t1, [a0, t4, 8] + elsif ARM or ARMv7 or ARMv7_TRADITIONAL + mvlbl (label - _relativePCBase), t4 + addp t4, t1, t4 + move index, t3 + storep t4, [a0, t3, 4] + elsif SH4 + move (label - _relativePCBase), t4 + addp t4, t1, t4 + move index, t3 + storep t4, [a0, t3, 4] + flushcp # Force constant pool flush to avoid "pcrel too far" link error. + elsif MIPS + la label, t4 + la _relativePCBase, t3 + subp t3, t4 + addp t4, t1, t4 + move index, t3 + storep t4, [a0, t3, 4] + end end -# Indicate the beginning of LLInt. -_llint_begin: - crash() +global _llint_entry +# Entry point for the llint to initialize. +_llint_entry: + functionPrologue() + pushCalleeSaves() + if X86 or X86_WIN + loadp 20[sp], a0 + end + initPCRelative(t1) + + # Include generated bytecode initialization file. + include InitBytecodes + popCalleeSaves() + functionEpilogue() + ret +end _llint_program_prologue: prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) dispatch(0) +_llint_module_program_prologue: + prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) + dispatch(0) + + _llint_eval_prologue: prologue(notFunctionCodeBlockGetter, notFunctionCodeBlockSetter, _llint_entry_osr, _llint_trace_prologue) dispatch(0) @@ -460,14 +1164,12 @@ _llint_eval_prologue: _llint_function_for_call_prologue: prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call, _llint_trace_prologue_function_for_call) -.functionForCallBegin: functionInitialization(0) dispatch(0) _llint_function_for_construct_prologue: prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct, _llint_trace_prologue_function_for_construct) -.functionForConstructBegin: functionInitialization(1) dispatch(0) @@ -475,11 +1177,17 @@ _llint_function_for_construct_prologue: _llint_function_for_call_arity_check: prologue(functionForCallCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_call_arityCheck, _llint_trace_arityCheck_for_call) functionArityCheck(.functionForCallBegin, _slow_path_call_arityCheck) +.functionForCallBegin: + functionInitialization(0) + dispatch(0) _llint_function_for_construct_arity_check: prologue(functionForConstructCodeBlockGetter, functionCodeBlockSetter, _llint_entry_osr_function_for_construct_arityCheck, _llint_trace_arityCheck_for_construct) functionArityCheck(.functionForConstructBegin, _slow_path_construct_arityCheck) +.functionForConstructBegin: + functionInitialization(1) + dispatch(0) # Value-representation-specific code. @@ -491,10 +1199,34 @@ end # Value-representation-agnostic code. -_llint_op_touch_entry: +_llint_op_create_direct_arguments: traceExecution() - callSlowPath(_slow_path_touch_entry) - dispatch(1) + callSlowPath(_slow_path_create_direct_arguments) + dispatch(2) + + +_llint_op_create_scoped_arguments: + traceExecution() + callSlowPath(_slow_path_create_scoped_arguments) + dispatch(3) + + +_llint_op_create_out_of_band_arguments: + traceExecution() + callSlowPath(_slow_path_create_out_of_band_arguments) + dispatch(2) + + +_llint_op_new_func: + traceExecution() + callSlowPath(_llint_slow_path_new_func) + dispatch(4) + + +_llint_op_new_generator_func: + traceExecution() + callSlowPath(_llint_slow_path_new_generator_func) + dispatch(4) _llint_op_new_array: @@ -557,12 +1289,11 @@ _llint_op_typeof: dispatch(3) -_llint_op_is_object: +_llint_op_is_object_or_null: traceExecution() - callSlowPath(_slow_path_is_object) + callSlowPath(_slow_path_is_object_or_null) dispatch(3) - _llint_op_is_function: traceExecution() callSlowPath(_slow_path_is_function) @@ -574,20 +1305,6 @@ _llint_op_in: callSlowPath(_slow_path_in) dispatch(4) -macro withInlineStorage(object, propertyStorage, continuation) - # Indicate that the object is the property storage, and that the - # property storage register is unused. - continuation(object, propertyStorage) -end - -macro withOutOfLineStorage(object, propertyStorage, continuation) - loadp JSObject::m_butterfly[object], propertyStorage - # Indicate that the propertyStorage register now points to the - # property storage, and that the object register may be reused - # if the object pointer is not needed anymore. - continuation(propertyStorage, object) -end - _llint_op_del_by_id: traceExecution() @@ -607,9 +1324,33 @@ _llint_op_put_by_index: dispatch(4) -_llint_op_put_getter_setter: +_llint_op_put_getter_by_id: + traceExecution() + callSlowPath(_llint_slow_path_put_getter_by_id) + dispatch(5) + + +_llint_op_put_setter_by_id: traceExecution() - callSlowPath(_llint_slow_path_put_getter_setter) + callSlowPath(_llint_slow_path_put_setter_by_id) + dispatch(5) + + +_llint_op_put_getter_setter_by_id: + traceExecution() + callSlowPath(_llint_slow_path_put_getter_setter_by_id) + dispatch(6) + + +_llint_op_put_getter_by_val: + traceExecution() + callSlowPath(_llint_slow_path_put_getter_by_val) + dispatch(5) + + +_llint_op_put_setter_by_val: + traceExecution() + callSlowPath(_llint_slow_path_put_setter_by_val) dispatch(5) @@ -693,19 +1434,27 @@ _llint_op_jngreatereq: _llint_op_loop_hint: traceExecution() + checkSwitchToJITForLoop() + dispatch(1) + + +_llint_op_watchdog: + traceExecution() loadp CodeBlock[cfr], t1 loadp CodeBlock::m_vm[t1], t1 - loadb VM::watchdog+Watchdog::m_timerDidFire[t1], t0 - btbnz t0, .handleWatchdogTimer + loadp VM::m_watchdog[t1], t0 + btpnz t0, .handleWatchdogTimer .afterWatchdogTimerCheck: - checkSwitchToJITForLoop() dispatch(1) .handleWatchdogTimer: + loadb Watchdog::m_timerDidFire[t0], t0 + btbz t0, .afterWatchdogTimerCheck callWatchdogTimerHandler(.throwHandler) jmp .afterWatchdogTimerCheck .throwHandler: jmp _llint_throw_from_slow_path_trampoline + _llint_op_switch_string: traceExecution() callSlowPath(_llint_slow_path_switch_string) @@ -715,25 +1464,65 @@ _llint_op_switch_string: _llint_op_new_func_exp: traceExecution() callSlowPath(_llint_slow_path_new_func_exp) - dispatch(3) + dispatch(4) +_llint_op_new_generator_func_exp: + traceExecution() + callSlowPath(_llint_slow_path_new_generator_func_exp) + dispatch(4) + +_llint_op_new_arrow_func_exp: + traceExecution() + callSlowPath(_llint_slow_path_new_arrow_func_exp) + dispatch(4) _llint_op_call: traceExecution() arrayProfileForCall() - doCall(_llint_slow_path_call) + doCall(_llint_slow_path_call, prepareForRegularCall) +_llint_op_tail_call: + traceExecution() + arrayProfileForCall() + checkSwitchToJITForEpilogue() + doCall(_llint_slow_path_call, prepareForTailCall) _llint_op_construct: traceExecution() - doCall(_llint_slow_path_construct) + doCall(_llint_slow_path_construct, prepareForRegularCall) +macro doCallVarargs(slowPath, prepareCall) + callSlowPath(_llint_slow_path_size_frame_for_varargs) + branchIfException(_llint_throw_from_slow_path_trampoline) + # calleeFrame in r1 + if JSVALUE64 + move r1, sp + else + # The calleeFrame is not stack aligned, move down by CallerFrameAndPCSize to align + if ARMv7 + subp r1, CallerFrameAndPCSize, t2 + move t2, sp + else + subp r1, CallerFrameAndPCSize, sp + end + end + slowPathForCall(slowPath, prepareCall) +end _llint_op_call_varargs: traceExecution() - callSlowPath(_llint_slow_path_size_and_alloc_frame_for_varargs) - branchIfException(_llint_throw_from_slow_path_trampoline) - slowPathForCall(_llint_slow_path_call_varargs) + doCallVarargs(_llint_slow_path_call_varargs, prepareForRegularCall) + +_llint_op_tail_call_varargs: + traceExecution() + checkSwitchToJITForEpilogue() + # We lie and perform the tail call instead of preparing it since we can't + # prepare the frame for a call opcode + doCallVarargs(_llint_slow_path_call_varargs, prepareForTailCall) + +_llint_op_construct_varargs: + traceExecution() + doCallVarargs(_llint_slow_path_construct_varargs, prepareForRegularCall) _llint_op_call_eval: @@ -772,7 +1561,7 @@ _llint_op_call_eval: # and a PC to call, and that PC may be a dummy thunk that just # returns the JS value that the eval returned. - slowPathForCall(_llint_slow_path_call_eval) + slowPathForCall(_llint_slow_path_call_eval, prepareForRegularCall) _llint_generic_return_point: @@ -785,28 +1574,34 @@ _llint_op_strcat: dispatch(4) -_llint_op_get_pnames: +_llint_op_push_with_scope: traceExecution() - callSlowPath(_llint_slow_path_get_pnames) - dispatch(0) # The slow_path either advances the PC or jumps us to somewhere else. + callSlowPath(_slow_path_push_with_scope) + dispatch(4) -_llint_op_push_with_scope: +_llint_op_assert: traceExecution() - callSlowPath(_llint_slow_path_push_with_scope) - dispatch(2) + callSlowPath(_slow_path_assert) + dispatch(3) -_llint_op_pop_scope: +_llint_op_save: traceExecution() - callSlowPath(_llint_slow_path_pop_scope) - dispatch(1) + callSlowPath(_slow_path_save) + dispatch(4) -_llint_op_push_name_scope: +_llint_op_resume: traceExecution() - callSlowPath(_llint_slow_path_push_name_scope) - dispatch(4) + callSlowPath(_slow_path_resume) + dispatch(3) + + +_llint_op_create_lexical_environment: + traceExecution() + callSlowPath(_slow_path_create_lexical_environment) + dispatch(5) _llint_op_throw: @@ -860,6 +1655,56 @@ _llint_native_call_trampoline: _llint_native_construct_trampoline: nativeCallTrampoline(NativeExecutable::m_constructor) +_llint_op_get_enumerable_length: + traceExecution() + callSlowPath(_slow_path_get_enumerable_length) + dispatch(3) + +_llint_op_has_indexed_property: + traceExecution() + callSlowPath(_slow_path_has_indexed_property) + dispatch(5) + +_llint_op_has_structure_property: + traceExecution() + callSlowPath(_slow_path_has_structure_property) + dispatch(5) + +_llint_op_has_generic_property: + traceExecution() + callSlowPath(_slow_path_has_generic_property) + dispatch(4) + +_llint_op_get_direct_pname: + traceExecution() + callSlowPath(_slow_path_get_direct_pname) + dispatch(7) + +_llint_op_get_property_enumerator: + traceExecution() + callSlowPath(_slow_path_get_property_enumerator) + dispatch(3) + +_llint_op_enumerator_structure_pname: + traceExecution() + callSlowPath(_slow_path_next_structure_enumerator_pname) + dispatch(4) + +_llint_op_enumerator_generic_pname: + traceExecution() + callSlowPath(_slow_path_next_generic_enumerator_pname) + dispatch(4) + +_llint_op_to_index_string: + traceExecution() + callSlowPath(_slow_path_to_index_string) + dispatch(3) + +_llint_op_copy_rest: + traceExecution() + callSlowPath(_slow_path_copy_rest) + dispatch(4) + # Lastly, make sure that we can link even though we don't support all opcodes. # These opcodes should never arise when using LLInt or either JIT. We assert @@ -878,53 +1723,3 @@ macro notSupported() break end end - -_llint_op_get_by_id_chain: - notSupported() - -_llint_op_get_by_id_custom_chain: - notSupported() - -_llint_op_get_by_id_custom_proto: - notSupported() - -_llint_op_get_by_id_custom_self: - notSupported() - -_llint_op_get_by_id_generic: - notSupported() - -_llint_op_get_by_id_getter_chain: - notSupported() - -_llint_op_get_by_id_getter_proto: - notSupported() - -_llint_op_get_by_id_getter_self: - notSupported() - -_llint_op_get_by_id_proto: - notSupported() - -_llint_op_get_by_id_self: - notSupported() - -_llint_op_get_string_length: - notSupported() - -_llint_op_put_by_id_generic: - notSupported() - -_llint_op_put_by_id_replace: - notSupported() - -_llint_op_put_by_id_transition: - notSupported() - -_llint_op_init_global_const_nop: - dispatch(5) - -# Indicate the end of LLInt. -_llint_end: - crash() - diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp index 48148c6f4..72bcddf57 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -25,19 +25,15 @@ #include "config.h" #include "LowLevelInterpreter.h" - -#if ENABLE(LLINT) - #include "LLIntOfflineAsmConfig.h" #include <wtf/InlineASM.h> -#if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) #include "CodeBlock.h" #include "CommonSlowPaths.h" #include "LLIntCLoop.h" #include "LLIntSlowPaths.h" -#include "Operations.h" -#include "VMInspector.h" +#include "JSCInlines.h" #include <wtf/Assertions.h> #include <wtf/MathExtras.h> @@ -90,6 +86,12 @@ using namespace JSC::LLInt; #define OFFLINE_ASM_BEGIN #define OFFLINE_ASM_END +#if ENABLE(OPCODE_TRACING) +#define TRACE_OPCODE(opcode) dataLogF(" op %s\n", #opcode) +#else +#define TRACE_OPCODE(opcode) +#endif + // To keep compilers happy in case of unused labels, force usage of the label: #define USE_LABEL(label) \ do { \ @@ -97,7 +99,9 @@ using namespace JSC::LLInt; goto label; \ } while (false) -#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); +#define OFFLINE_ASM_OPCODE_LABEL(opcode) DEFINE_OPCODE(opcode) USE_LABEL(opcode); TRACE_OPCODE(opcode); + +#define OFFLINE_ASM_GLOBAL_LABEL(label) OFFLINE_ASM_GLUE_LABEL(label) #if ENABLE(COMPUTED_GOTO_OPCODES) #define OFFLINE_ASM_GLUE_LABEL(label) label: USE_LABEL(label); @@ -147,6 +151,7 @@ static void Double2Ints(double val, uint32_t& lo, uint32_t& hi) // pseudo register, as well as hides endianness differences. struct CLoopRegister { + CLoopRegister() { i = static_cast<intptr_t>(0xbadbeef0baddbeef); } union { intptr_t i; uintptr_t u; @@ -212,10 +217,15 @@ struct CLoopRegister { #endif // !CPU(BIG_ENDIAN) #endif // !USE(JSVALUE64) + intptr_t* ip; int8_t* i8p; void* vp; + CallFrame* callFrame; ExecState* execState; void* instruction; + VM* vm; + JSCell* cell; + ProtoCallFrame* protoCallFrame; NativeFunction nativeFunc; #if USE(JSVALUE64) int64_t i64; @@ -226,6 +236,13 @@ struct CLoopRegister { Opcode opcode; }; + operator ExecState*() { return execState; } + operator Instruction*() { return reinterpret_cast<Instruction*>(instruction); } + operator VM*() { return vm; } + operator ProtoCallFrame*() { return protoCallFrame; } + operator Register*() { return reinterpret_cast<Register*>(vp); } + operator JSCell*() { return cell; } + #if USE(JSVALUE64) inline void clearHighWord() { i32padding = 0; } #else @@ -237,7 +254,7 @@ struct CLoopRegister { // The llint C++ interpreter loop: // -JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitializationPass) +JSValue CLoop::execute(OpcodeID entryOpcodeID, void* executableAddress, VM* vm, ProtoCallFrame* protoCallFrame, bool isInitializationPass) { #define CAST reinterpret_cast #define SIGN_BIT32(x) ((x) & 0x80000000) @@ -272,8 +289,6 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali return JSValue(); } - ASSERT(callFrame->vm().topCallFrame == callFrame); - // Define the pseudo registers used by the LLINT C Loop backend: ASSERT(sizeof(CLoopRegister) == sizeof(intptr_t)); @@ -308,69 +323,66 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali // 2. 32 bit result values will be in the low 32-bit of t0. // 3. 64 bit result values will be in t0. - CLoopRegister t0, t1, t2, t3; + CLoopRegister t0, t1, t2, t3, t5, t7, sp, cfr, lr, pc; #if USE(JSVALUE64) - CLoopRegister rBasePC, tagTypeNumber, tagMask; + CLoopRegister pcBase, tagTypeNumber, tagMask; #endif - CLoopRegister rRetVPC; CLoopDoubleRegister d0, d1; - // Keep the compiler happy. We don't really need this, but the compiler - // will complain. This makes the warning go away. - t0.i = 0; - t1.i = 0; - - VM* vm = &callFrame->vm(); - - CodeBlock* codeBlock = callFrame->codeBlock(); - Instruction* vPC; - - // rPC is an alias for vPC. Set up the alias: - CLoopRegister& rPC = *CAST<CLoopRegister*>(&vPC); + lr.opcode = getOpcode(llint_return_to_host); + sp.vp = vm->interpreter->stack().topOfStack() + 1; + cfr.callFrame = vm->topCallFrame; +#ifndef NDEBUG + void* startSP = sp.vp; + CallFrame* startCFR = cfr.callFrame; +#endif -#if USE(JSVALUE32_64) - vPC = codeBlock->instructions().begin(); -#else // USE(JSVALUE64) - vPC = 0; - rBasePC.vp = codeBlock->instructions().begin(); + // Initialize the incoming args for doVMEntryToJavaScript: + t0.vp = executableAddress; + t1.vm = vm; + t2.protoCallFrame = protoCallFrame; +#if USE(JSVALUE64) // For the ASM llint, JITStubs takes care of this initialization. We do // it explicitly here for the C loop: tagTypeNumber.i = 0xFFFF000000000000; tagMask.i = 0xFFFF000000000002; #endif // USE(JSVALUE64) - // cfr is an alias for callFrame. Set up this alias: - CLoopRegister& cfr = *CAST<CLoopRegister*>(&callFrame); - - // Simulate a native return PC which should never be used: - rRetVPC.i = 0xbbadbeef; - // Interpreter variables for value passing between opcodes and/or helpers: NativeFunction nativeFunc = 0; JSValue functionReturnValue; - Opcode opcode; + Opcode opcode = getOpcode(entryOpcodeID); - opcode = entryOpcode; +#define PUSH(cloopReg) \ + do { \ + sp.ip--; \ + *sp.ip = cloopReg.i; \ + } while (false) - #if ENABLE(OPCODE_STATS) - #define RECORD_OPCODE_STATS(__opcode) \ - OpcodeStats::recordInstruction(__opcode) - #else - #define RECORD_OPCODE_STATS(__opcode) - #endif +#define POP(cloopReg) \ + do { \ + cloopReg.i = *sp.ip; \ + sp.ip++; \ + } while (false) - #if USE(JSVALUE32_64) - #define FETCH_OPCODE() vPC->u.opcode - #else // USE(JSVALUE64) - #define FETCH_OPCODE() *bitwise_cast<Opcode*>(rBasePC.i8p + rPC.i * 8) - #endif // USE(JSVALUE64) +#if ENABLE(OPCODE_STATS) +#define RECORD_OPCODE_STATS(__opcode) OpcodeStats::recordInstruction(__opcode) +#else +#define RECORD_OPCODE_STATS(__opcode) +#endif - #define NEXT_INSTRUCTION() \ - do { \ - opcode = FETCH_OPCODE(); \ - DISPATCH_OPCODE(); \ - } while (false) +#if USE(JSVALUE32_64) +#define FETCH_OPCODE() pc.opcode +#else // USE(JSVALUE64) +#define FETCH_OPCODE() *bitwise_cast<Opcode*>(pcBase.i8p + pc.i * 8) +#endif // USE(JSVALUE64) + +#define NEXT_INSTRUCTION() \ + do { \ + opcode = FETCH_OPCODE(); \ + DISPATCH_OPCODE(); \ + } while (false) #if ENABLE(COMPUTED_GOTO_OPCODES) @@ -412,14 +424,22 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali #include "LLIntAssembly.h" + OFFLINE_ASM_GLUE_LABEL(llint_return_to_host) + { + ASSERT(startSP == sp.vp); + ASSERT(startCFR == cfr.callFrame); +#if USE(JSVALUE32_64) + return JSValue(t1.i, t0.i); // returning JSValue(tag, payload); +#else + return JSValue::decode(t0.encodedJSValue); +#endif + } + // In the ASM llint, getHostCallReturnValue() is a piece of glue - // function provided by the JIT (see dfg/DFGOperations.cpp). + // function provided by the JIT (see jit/JITOperations.cpp). // We simulate it here with a pseduo-opcode handler. OFFLINE_ASM_GLUE_LABEL(getHostCallReturnValue) { - // The ASM part pops the frame: - callFrame = callFrame->callerFrame(); - // The part in getHostCallReturnValueWithExecState(): JSValue result = vm->hostCallReturnValue; #if USE(JSVALUE32_64) @@ -428,12 +448,8 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali #else t0.encodedJSValue = JSValue::encode(result); #endif - goto doReturnHelper; - } - - OFFLINE_ASM_GLUE_LABEL(returnFromJavaScript) - { - return vm->exception(); + opcode = lr.opcode; + DISPATCH_OPCODE(); } #if !ENABLE(COMPUTED_GOTO_OPCODES) @@ -443,55 +459,6 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali } // END bytecode handler cases. - //======================================================================== - // Bytecode helpers: - - doReturnHelper: { - ASSERT(!!callFrame); - if (callFrame->isVMEntrySentinel()) { -#if USE(JSVALUE32_64) - return JSValue(t1.i, t0.i); // returning JSValue(tag, payload); -#else - return JSValue::decode(t0.encodedJSValue); -#endif - } - - // The normal ASM llint call implementation returns to the caller as - // recorded in rRetVPC, and the caller would fetch the return address - // from ArgumentCount.tag() (see the dispatchAfterCall() macro used in - // the callTargetFunction() macro in the llint asm files). - // - // For the C loop, we don't have the JIT stub to do this work for us. So, - // we jump to llint_generic_return_point. - - vPC = callFrame->currentVPC(); - -#if USE(JSVALUE64) - // Based on LowLevelInterpreter64.asm's dispatchAfterCall(): - - // When returning from a native trampoline call, unlike the assembly - // LLInt, we can't simply return to the caller. In our case, we grab - // the caller's VPC and resume execution there. However, the caller's - // VPC returned by callFrame->currentVPC() is in the form of the real - // address of the target bytecode, but the 64-bit llint expects the - // VPC to be a bytecode offset. Hence, we need to map it back to a - // bytecode offset before we dispatch via the usual dispatch mechanism - // i.e. NEXT_INSTRUCTION(): - - codeBlock = callFrame->codeBlock(); - ASSERT(codeBlock); - rPC.vp = callFrame->currentVPC(); - rPC.i = rPC.i8p - reinterpret_cast<int8_t*>(codeBlock->instructions().begin()); - rPC.i >>= 3; - - rBasePC.vp = codeBlock->instructions().begin(); -#endif // USE(JSVALUE64) - - goto llint_generic_return_point; - - } // END doReturnHelper. - - #if ENABLE(COMPUTED_GOTO_OPCODES) // Keep the compiler happy so that it doesn't complain about unused // labels for the LLInt trampoline glue. The labels are automatically @@ -511,49 +478,40 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali #undef CAST #undef SIGN_BIT32 + return JSValue(); // to suppress a compiler warning. } // Interpreter::llintCLoopExecute() } // namespace JSC -#else // !ENABLE(LLINT_C_LOOP) +#elif !OS(WINDOWS) //============================================================================ // Define the opcode dispatch mechanism when using an ASM loop: // // These are for building an interpreter from generated assembly code: -#if CPU(X86_64) && COMPILER(CLANG) -#define OFFLINE_ASM_BEGIN asm ( \ - ".cfi_startproc\n" - -#define OFFLINE_ASM_END \ - ".cfi_endproc\n" \ -); -#else #define OFFLINE_ASM_BEGIN asm ( #define OFFLINE_ASM_END ); -#endif -#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(llint_##__opcode) -#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_GLOBAL_LABEL(__opcode) +#define OFFLINE_ASM_OPCODE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(llint_##__opcode) +#define OFFLINE_ASM_GLUE_LABEL(__opcode) OFFLINE_ASM_LOCAL_LABEL(__opcode) #if CPU(ARM_THUMB2) #define OFFLINE_ASM_GLOBAL_LABEL(label) \ ".text\n" \ + ".align 4\n" \ ".globl " SYMBOL_STRING(label) "\n" \ HIDE_SYMBOL(label) "\n" \ ".thumb\n" \ ".thumb_func " THUMB_FUNC_PARAM(label) "\n" \ SYMBOL_STRING(label) ":\n" -#elif CPU(X86_64) && COMPILER(CLANG) +#elif CPU(ARM64) #define OFFLINE_ASM_GLOBAL_LABEL(label) \ ".text\n" \ + ".align 4\n" \ ".globl " SYMBOL_STRING(label) "\n" \ HIDE_SYMBOL(label) "\n" \ - SYMBOL_STRING(label) ":\n" \ - ".cfi_def_cfa rbp, 0\n" \ - ".cfi_offset 16, 8\n" \ - ".cfi_offset 6, 0\n" + SYMBOL_STRING(label) ":\n" #else #define OFFLINE_ASM_GLOBAL_LABEL(label) \ ".text\n" \ @@ -568,6 +526,4 @@ JSValue CLoop::execute(CallFrame* callFrame, Opcode entryOpcode, bool isInitiali // for the interpreter, as compiled from LowLevelInterpreter.asm. #include "LLIntAssembly.h" -#endif // !ENABLE(LLINT_C_LOOP) - -#endif // ENABLE(LLINT) +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.h b/Source/JavaScriptCore/llint/LowLevelInterpreter.h index f45a07303..8621dbd5a 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.h +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.h @@ -26,13 +26,9 @@ #ifndef LowLevelInterpreter_h #define LowLevelInterpreter_h -#include <wtf/Platform.h> - -#if ENABLE(LLINT) - #include "Opcode.h" -#if ENABLE(LLINT_C_LOOP) +#if !ENABLE(JIT) namespace JSC { @@ -49,18 +45,6 @@ FOR_EACH_CORE_OPCODE_ID(LLINT_OPCODE_ALIAS) } // namespace JSC -#else // !ENABLE(LLINT_C_LOOP) - -#define LLINT_INSTRUCTION_DECL(opcode, length) extern "C" void llint_##opcode(); - FOR_EACH_OPCODE_ID(LLINT_INSTRUCTION_DECL); -#undef LLINT_INSTRUCTION_DECL - -#define DECLARE_LLINT_NATIVE_HELPER(name, length) extern "C" void name(); - FOR_EACH_LLINT_NATIVE_HELPER(DECLARE_LLINT_NATIVE_HELPER) -#undef DECLARE_LLINT_NATIVE_HELPER - -#endif // !ENABLE(LLINT_C_LOOP) - -#endif // ENABLE(LLINT) +#endif // !ENABLE(JIT) #endif // LowLevelInterpreter_h diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm index f1a470eae..a92d55aa9 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm @@ -1,4 +1,4 @@ -# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. +# Copyright (C) 2011-2016 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -22,54 +22,6 @@ # THE POSSIBILITY OF SUCH DAMAGE. -# Crash course on the language that this is written in (which I just call -# "assembly" even though it's more than that): -# -# - Mostly gas-style operand ordering. The last operand tends to be the -# destination. So "a := b" is written as "mov b, a". But unlike gas, -# comparisons are in-order, so "if (a < b)" is written as -# "bilt a, b, ...". -# -# - "b" = byte, "h" = 16-bit word, "i" = 32-bit word, "p" = pointer. -# Currently this is just 32-bit so "i" and "p" are interchangeable -# except when an op supports one but not the other. -# -# - In general, valid operands for macro invocations and instructions are -# registers (eg "t0"), addresses (eg "4[t0]"), base-index addresses -# (eg "7[t0, t1, 2]"), absolute addresses (eg "0xa0000000[]"), or labels -# (eg "_foo" or ".foo"). Macro invocations can also take anonymous -# macros as operands. Instructions cannot take anonymous macros. -# -# - Labels must have names that begin with either "_" or ".". A "." label -# is local and gets renamed before code gen to minimize namespace -# pollution. A "_" label is an extern symbol (i.e. ".globl"). The "_" -# may or may not be removed during code gen depending on whether the asm -# conventions for C name mangling on the target platform mandate a "_" -# prefix. -# -# - A "macro" is a lambda expression, which may be either anonymous or -# named. But this has caveats. "macro" can take zero or more arguments, -# which may be macros or any valid operands, but it can only return -# code. But you can do Turing-complete things via continuation passing -# style: "macro foo (a, b) b(a) end foo(foo, foo)". Actually, don't do -# that, since you'll just crash the assembler. -# -# - An "if" is a conditional on settings. Any identifier supplied in the -# predicate of an "if" is assumed to be a #define that is available -# during code gen. So you can't use "if" for computation in a macro, but -# you can use it to select different pieces of code for different -# platforms. -# -# - Arguments to macros follow lexical scoping rather than dynamic scoping. -# Const's also follow lexical scoping and may override (hide) arguments -# or other consts. All variables (arguments and constants) can be bound -# to operands. Additionally, arguments (but not constants) can be bound -# to macros. - - -# Below we have a bunch of constant declarations. Each constant must have -# a corresponding ASSERT() in LLIntData.cpp. - # Utilities macro dispatch(advance) addp advance * 4, PC @@ -89,49 +41,47 @@ end macro dispatchAfterCall() loadi ArgumentCount + TagOffset[cfr], PC - loadi 4[PC], t2 - storei t1, TagOffset[cfr, t2, 8] - storei t0, PayloadOffset[cfr, t2, 8] - valueProfile(t1, t0, 28, t3) - dispatch(8) + loadi 4[PC], t3 + storei r1, TagOffset[cfr, t3, 8] + storei r0, PayloadOffset[cfr, t3, 8] + valueProfile(r1, r0, 4 * (CallOpCodeSize - 1), t3) + dispatch(CallOpCodeSize) end -macro cCall2(function, arg1, arg2) - if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS - move arg1, a0 - move arg2, a1 +macro cCall2(function) + if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 call function - elsif X86 - poke arg1, 0 - poke arg2, 1 - call function - elsif SH4 - setargs arg1, arg2 + elsif X86 or X86_WIN + subp 8, sp + push a1 + push a0 call function + addp 16, sp elsif C_LOOP - cloopCallSlowPath function, arg1, arg2 + cloopCallSlowPath function, a0, a1 else error end end -# This barely works. arg3 and arg4 should probably be immediates. -macro cCall4(function, arg1, arg2, arg3, arg4) - if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS - move arg1, a0 - move arg2, a1 - move arg3, a2 - move arg4, a3 - call function - elsif X86 - poke arg1, 0 - poke arg2, 1 - poke arg3, 2 - poke arg4, 3 +macro cCall2Void(function) + if C_LOOP + cloopCallSlowPathVoid function, a0, a1 + else + cCall2(function) + end +end + +macro cCall4(function) + if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 call function - elsif SH4 - setargs arg1, arg2, arg3, arg4 + elsif X86 or X86_WIN + push a3 + push a2 + push a1 + push a0 call function + addp 16, sp elsif C_LOOP error else @@ -140,214 +90,247 @@ macro cCall4(function, arg1, arg2, arg3, arg4) end macro callSlowPath(slowPath) - cCall2(slowPath, cfr, PC) - move t0, PC - move t1, cfr + move cfr, a0 + move PC, a1 + cCall2(slowPath) + move r0, PC end -macro functionPrologue(extraStackSpace) - if X86 - push cfr - move sp, cfr - end - pushCalleeSaves - if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS - push cfr - push lr +macro doVMEntry(makeCall) + functionPrologue() + pushCalleeSaves() + + # x86 needs to load arguments from the stack + if X86 or X86_WIN + loadp 16[cfr], a2 + loadp 12[cfr], a1 + loadp 8[cfr], a0 end - subp extraStackSpace, sp -end -macro functionEpilogue(extraStackSpace) - addp extraStackSpace, sp - if ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS - pop lr - pop cfr + const entry = a0 + const vm = a1 + const protoCallFrame = a2 + + # We are using t3, t4 and t5 as temporaries through the function. + # Since we have the guarantee that tX != aY when X != Y, we are safe from + # aliasing problems with our arguments. + + if ARMv7 + vmEntryRecord(cfr, t3) + move t3, sp + else + vmEntryRecord(cfr, sp) end - popCalleeSaves - if X86 - pop cfr + + storep vm, VMEntryRecord::m_vm[sp] + loadp VM::topCallFrame[vm], t4 + storep t4, VMEntryRecord::m_prevTopCallFrame[sp] + loadp VM::topVMEntryFrame[vm], t4 + storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp] + + # Align stack pointer + if X86_WIN or MIPS + addp CallFrameAlignSlots * SlotSize, sp, t3 + andp ~StackAlignmentMask, t3 + subp t3, CallFrameAlignSlots * SlotSize, sp + elsif ARM or ARMv7 or ARMv7_TRADITIONAL + addp CallFrameAlignSlots * SlotSize, sp, t3 + clrbp t3, StackAlignmentMask, t3 + if ARMv7 + subp t3, CallFrameAlignSlots * SlotSize, t3 + move t3, sp + else + subp t3, CallFrameAlignSlots * SlotSize, sp + end end -end -macro doCallToJavaScript(makeCall, doReturn) - if X86 - const entry = t5 - const vmTopCallFrame = t2 - const protoCallFrame = t4 - - const extraStackSpace = 28 - const previousCFR = t0 - const previousPC = t1 - const temp1 = t0 # Same as previousCFR - const temp2 = t1 # Same as previousPC - const temp3 = t2 # same as vmTopCallFrame - const temp4 = t3 - elsif ARM or ARMv7_TRADITIONAL - const entry = a0 - const vmTopCallFrame = a1 - const protoCallFrame = a2 - const topOfStack = a3 - - const extraStackSpace = 16 - const previousCFR = t3 - const previousPC = lr - const temp1 = t3 # Same as previousCFR - const temp2 = a3 # Same as topOfStack - const temp3 = t4 - const temp4 = t5 - elsif ARMv7 - const entry = a0 - const vmTopCallFrame = a1 - const protoCallFrame = a2 - const topOfStack = a3 - - const extraStackSpace = 28 - const previousCFR = t3 - const previousPC = lr - const temp1 = t3 # Same as previousCFR - const temp2 = a3 # Same as topOfStack - const temp3 = t4 - const temp4 = t5 - elsif MIPS - const entry = a0 - const vmTopCallFrame = a1 - const protoCallFrame = a2 - const topOfStack = a3 - - const extraStackSpace = 36 - const previousCFR = t2 - const previousPC = lr - const temp1 = t3 - const temp2 = t4 - const temp3 = t5 - const temp4 = t6 - elsif SH4 - const entry = a0 - const vmTopCallFrame = a1 - const protoCallFrame = a2 - const topOfStack = a3 - - const extraStackSpace = 20 - const previousCFR = t3 - const previousPC = lr - const temp1 = t3 # Same as previousCFR - const temp2 = a3 # Same as topOfStack - const temp3 = t8 - const temp4 = t9 + loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4 + addp CallFrameHeaderSlots, t4, t4 + lshiftp 3, t4 + subp sp, t4, t3 + + # Ensure that we have enough additional stack capacity for the incoming args, + # and the frame for the JS code we're executing. We need to do this check + # before we start copying the args from the protoCallFrame below. + bpaeq t3, VM::m_jsStackLimit[vm], .stackHeightOK + + if C_LOOP + move entry, t4 + move vm, t5 + cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3 + bpeq t0, 0, .stackCheckFailed + move t4, entry + move t5, vm + jmp .stackHeightOK + +.stackCheckFailed: + move t4, entry + move t5, vm end - if X86 - loadp [sp], previousPC - move cfr, previousCFR + subp 8, sp # Align stack for cCall2() to make a call. + move vm, a0 + move protoCallFrame, a1 + cCall2(_llint_throw_stack_overflow_error) + + if ARMv7 + vmEntryRecord(cfr, t3) + move t3, sp + else + vmEntryRecord(cfr, sp) end - functionPrologue(extraStackSpace) - if X86 - loadp extraStackSpace+20[sp], entry - loadp extraStackSpace+24[sp], vmTopCallFrame - loadp extraStackSpace+28[sp], protoCallFrame - loadp extraStackSpace+32[sp], cfr + + loadp VMEntryRecord::m_vm[sp], t5 + loadp VMEntryRecord::m_prevTopCallFrame[sp], t4 + storep t4, VM::topCallFrame[t5] + loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4 + storep t4, VM::topVMEntryFrame[t5] + + if ARMv7 + subp cfr, CalleeRegisterSaveSize, t5 + move t5, sp else - move cfr, previousCFR - move topOfStack, cfr + subp cfr, CalleeRegisterSaveSize, sp end - subp (CallFrameHeaderSlots-1)*8, cfr - storep 0, ArgumentCount+4[cfr] - storep 0, ArgumentCount[cfr] - storep 0, Callee+4[cfr] - storep vmTopCallFrame, Callee[cfr] - loadp [vmTopCallFrame], temp4 - storep 0, ScopeChain+4[cfr] - storep temp4, ScopeChain[cfr] - storep 0, CodeBlock+4[cfr] - storep 1, CodeBlock[cfr] - storep previousPC, ReturnPC[cfr] - storep previousCFR, CallerFrame[cfr] - move cfr, temp1 - - loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2 - addp CallFrameHeaderSlots, temp2, temp2 - lshiftp 3, temp2 - subp temp2, cfr - storep temp1, CallerFrame[cfr] - - move 5, temp1 + popCalleeSaves() + functionEpilogue() + ret + +.stackHeightOK: + move t3, sp + move 4, t3 .copyHeaderLoop: - subi 1, temp1 - loadp [protoCallFrame, temp1, 8], temp3 - storep temp3, CodeBlock[cfr, temp1, 8] - loadp 4[protoCallFrame, temp1, 8], temp3 - storep temp3, CodeBlock+4[cfr, temp1, 8] - btinz temp1, .copyHeaderLoop - - loadi ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2 - subi 1, temp2 - loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3 - subi 1, temp3 - - bieq temp2, temp3, .copyArgs - move 0, temp1 - move UndefinedTag, temp4 + subi 1, t3 + loadi TagOffset[protoCallFrame, t3, 8], t5 + storei t5, TagOffset + CodeBlock[sp, t3, 8] + loadi PayloadOffset[protoCallFrame, t3, 8], t5 + storei t5, PayloadOffset + CodeBlock[sp, t3, 8] + btinz t3, .copyHeaderLoop + + loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4 + subi 1, t4 + loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t5 + subi 1, t5 + + bieq t4, t5, .copyArgs .fillExtraArgsLoop: - subi 1, temp3 - storep temp1, ThisArgumentOffset+8+PayloadOffset[cfr, temp3, 8] - storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp3, 8] - bineq temp2, temp3, .fillExtraArgsLoop + subi 1, t5 + storei UndefinedTag, ThisArgumentOffset + 8 + TagOffset[sp, t5, 8] + storei 0, ThisArgumentOffset + 8 + PayloadOffset[sp, t5, 8] + bineq t4, t5, .fillExtraArgsLoop .copyArgs: - loadp ProtoCallFrame::args[protoCallFrame], temp1 + loadp ProtoCallFrame::args[protoCallFrame], t3 .copyArgsLoop: - btiz temp2, .copyArgsDone - subi 1, temp2 - loadp PayloadOffset[temp1, temp2, 8], temp3 - loadp TagOffset[temp1, temp2, 8], temp4 - storep temp3, ThisArgumentOffset+8+PayloadOffset[cfr, temp2, 8] - storep temp4, ThisArgumentOffset+8+TagOffset[cfr, temp2, 8] + btiz t4, .copyArgsDone + subi 1, t4 + loadi TagOffset[t3, t4, 8], t5 + storei t5, ThisArgumentOffset + 8 + TagOffset[sp, t4, 8] + loadi PayloadOffset[t3, t4, 8], t5 + storei t5, ThisArgumentOffset + 8 + PayloadOffset[sp, t4, 8] jmp .copyArgsLoop .copyArgsDone: - if X86 - loadp extraStackSpace+24[sp], vmTopCallFrame - end - storep cfr, [vmTopCallFrame] + storep sp, VM::topCallFrame[vm] + storep cfr, VM::topVMEntryFrame[vm] - makeCall(entry, temp1) + makeCall(entry, t3, t4) - bpeq CodeBlock[cfr], 1, .calleeFramePopped - loadp CallerFrame[cfr], cfr + if ARMv7 + vmEntryRecord(cfr, t3) + move t3, sp + else + vmEntryRecord(cfr, sp) + end -.calleeFramePopped: - loadp Callee[cfr], temp3 # VM.topCallFrame - loadp ScopeChain[cfr], temp4 - storep temp4, [temp3] + loadp VMEntryRecord::m_vm[sp], t5 + loadp VMEntryRecord::m_prevTopCallFrame[sp], t4 + storep t4, VM::topCallFrame[t5] + loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t4 + storep t4, VM::topVMEntryFrame[t5] - doReturn(extraStackSpace) -end + if ARMv7 + subp cfr, CalleeRegisterSaveSize, t5 + move t5, sp + else + subp cfr, CalleeRegisterSaveSize, sp + end -macro makeJavaScriptCall(entry, temp) - call entry + popCalleeSaves() + functionEpilogue() + ret end -macro makeHostFunctionCall(entry, temp) - move entry, temp - if X86 - # Put cfr on stack as arg0, also put it in ecx for "fastcall" targets - poke cfr, 0 - move cfr, t2 +macro makeJavaScriptCall(entry, temp, unused) + addp CallerFrameAndPCSize, sp + checkStackPointerAlignment(temp, 0xbad0dc02) + if C_LOOP + cloopCallJSFunction entry else - move cfr, a0 + call entry + end + checkStackPointerAlignment(temp, 0xbad0dc03) + subp CallerFrameAndPCSize, sp +end + +macro makeHostFunctionCall(entry, temp1, temp2) + move entry, temp1 + storep cfr, [sp] + if C_LOOP + move sp, a0 + storep lr, PtrSize[sp] + cloopCallNative temp1 + elsif X86 or X86_WIN + # Put callee frame pointer on stack as arg0, also put it in ecx for "fastcall" targets + move 0, temp2 + move temp2, 4[sp] # put 0 in ReturnPC + move sp, a0 # a0 is ecx + push temp2 # Push dummy arg1 + push a0 + call temp1 + addp 8, sp + else + move sp, a0 + call temp1 end - call temp end -macro doReturnFromJavaScript(extraStackSpace) -_returnFromJavaScript: - functionEpilogue(extraStackSpace) +_handleUncaughtException: + loadp Callee + PayloadOffset[cfr], t3 + andp MarkedBlockMask, t3 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 + restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0) + loadp VM::callFrameForCatch[t3], cfr + storep 0, VM::callFrameForCatch[t3] + + loadp CallerFrame[cfr], cfr + + if ARMv7 + vmEntryRecord(cfr, t3) + move t3, sp + else + vmEntryRecord(cfr, sp) + end + + loadp VMEntryRecord::m_vm[sp], t3 + loadp VMEntryRecord::m_prevTopCallFrame[sp], t5 + storep t5, VM::topCallFrame[t3] + loadp VMEntryRecord::m_prevTopVMEntryFrame[sp], t5 + storep t5, VM::topVMEntryFrame[t3] + + if ARMv7 + subp cfr, CalleeRegisterSaveSize, t3 + move t3, sp + else + subp cfr, CalleeRegisterSaveSize, sp + end + + popCalleeSaves() + functionEpilogue() ret -end macro doReturnFromHostFunction(extraStackSpace) functionEpilogue(extraStackSpace) @@ -359,33 +342,43 @@ end # debugging from. operand should likewise be an immediate, and should identify the operand # in the instruction stream you'd like to print out. macro traceOperand(fromWhere, operand) - cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand) - move t0, PC - move t1, cfr + move fromWhere, a2 + move operand, a3 + move cfr, a0 + move PC, a1 + cCall4(_llint_trace_operand) + move r0, PC + move r1, cfr end # Debugging operation if you'd like to print the value of an operand in the instruction # stream. Same as traceOperand(), but assumes that the operand is a register, and prints its # value. macro traceValue(fromWhere, operand) - cCall4(_llint_trace_value, cfr, PC, fromWhere, operand) - move t0, PC - move t1, cfr + move fromWhere, a2 + move operand, a3 + move cfr, a0 + move PC, a1 + cCall4(_llint_trace_value) + move r0, PC + move r1, cfr end # Call a slowPath for call opcodes. macro callCallSlowPath(slowPath, action) storep PC, ArgumentCount + TagOffset[cfr] - cCall2(slowPath, cfr, PC) - move t1, cfr - action(t0) + move cfr, a0 + move PC, a1 + cCall2(slowPath) + action(r0, r1) end macro callWatchdogTimerHandler(throwHandler) storei PC, ArgumentCount + TagOffset[cfr] - cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC) - move t1, cfr - btpnz t0, throwHandler + move cfr, a0 + move PC, a1 + cCall2(_llint_slow_path_handle_watchdog_timer) + btpnz r0, throwHandler loadi ArgumentCount + TagOffset[cfr], PC end @@ -394,10 +387,12 @@ macro checkSwitchToJITForLoop() 1, macro () storei PC, ArgumentCount + TagOffset[cfr] - cCall2(_llint_loop_osr, cfr, PC) - move t1, cfr - btpz t0, .recover - jmp t0 + move cfr, a0 + move PC, a1 + cCall2(_llint_loop_osr) + btpz r0, .recover + move r1, sp + jmp r0 .recover: loadi ArgumentCount + TagOffset[cfr], PC end) @@ -491,56 +486,79 @@ macro loadConstantOrVariablePayloadUnchecked(index, payload) payload) end +macro storeStructureWithTypeInfo(cell, structure, scratch) + storep structure, JSCell::m_structureID[cell] + + loadi Structure::m_blob + StructureIDBlob::u.words.word2[structure], scratch + storei scratch, JSCell::m_indexingType[cell] +end + macro writeBarrierOnOperand(cellOperand) - if GGC - loadisFromInstruction(cellOperand, t1) - loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone) - checkMarkByte(t2, t1, t3, - macro(marked) - btbz marked, .writeBarrierDone - push cfr, PC - # We make two extra slots because cCall2 will poke. - subp 8, sp - cCall2(_llint_write_barrier_slow, cfr, t2) - addp 8, sp - pop PC, cfr - end - ) - .writeBarrierDone: - end + loadisFromInstruction(cellOperand, t1) + loadConstantOrVariablePayload(t1, CellTag, t2, .writeBarrierDone) + skipIfIsRememberedOrInEden(t2, t1, t3, + macro(cellState) + btbnz cellState, .writeBarrierDone + push cfr, PC + # We make two extra slots because cCall2 will poke. + subp 8, sp + move t2, a1 # t2 can be a0 on x86 + move cfr, a0 + cCall2Void(_llint_write_barrier_slow) + addp 8, sp + pop PC, cfr + end + ) +.writeBarrierDone: end macro writeBarrierOnOperands(cellOperand, valueOperand) - if GGC - loadisFromInstruction(valueOperand, t1) - loadConstantOrVariableTag(t1, t0) - bineq t0, CellTag, .writeBarrierDone - - writeBarrierOnOperand(cellOperand) - .writeBarrierDone: - end + loadisFromInstruction(valueOperand, t1) + loadConstantOrVariableTag(t1, t0) + bineq t0, CellTag, .writeBarrierDone + + writeBarrierOnOperand(cellOperand) +.writeBarrierDone: +end + +macro writeBarrierOnGlobal(valueOperand, loadHelper) + loadisFromInstruction(valueOperand, t1) + loadConstantOrVariableTag(t1, t0) + bineq t0, CellTag, .writeBarrierDone + + loadHelper(t3) + + skipIfIsRememberedOrInEden(t3, t1, t2, + macro(gcData) + btbnz gcData, .writeBarrierDone + push cfr, PC + # We make two extra slots because cCall2 will poke. + subp 8, sp + move cfr, a0 + move t3, a1 + cCall2Void(_llint_write_barrier_slow) + addp 8, sp + pop PC, cfr + end + ) +.writeBarrierDone: end macro writeBarrierOnGlobalObject(valueOperand) - if GGC - loadisFromInstruction(valueOperand, t1) - bineq t0, CellTag, .writeBarrierDone - - loadp CodeBlock[cfr], t3 - loadp CodeBlock::m_globalObject[t3], t3 - checkMarkByte(t3, t1, t2, - macro(marked) - btbz marked, .writeBarrierDone - push cfr, PC - # We make two extra slots because cCall2 will poke. - subp 8, sp - cCall2(_llint_write_barrier_slow, cfr, t3) - addp 8, sp - pop PC, cfr - end - ) - .writeBarrierDone: - end + writeBarrierOnGlobal(valueOperand, + macro(registerToStoreGlobal) + loadp CodeBlock[cfr], registerToStoreGlobal + loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal + end) +end + +macro writeBarrierOnGlobalLexicalEnvironment(valueOperand) + writeBarrierOnGlobal(valueOperand, + macro(registerToStoreGlobal) + loadp CodeBlock[cfr], registerToStoreGlobal + loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal + loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal + end) end macro valueProfile(tag, payload, operand, scratch) @@ -553,22 +571,53 @@ end # Entrypoints into the interpreter # Expects that CodeBlock is in t1, which is what prologue() leaves behind. -macro functionArityCheck(doneLabel, slow_path) +macro functionArityCheck(doneLabel, slowPath) loadi PayloadOffset + ArgumentCount[cfr], t0 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel - cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error - btiz t0, .isArityFixupNeeded - move t1, cfr # t1 contains caller frame + move cfr, a0 + move PC, a1 + cCall2(slowPath) # This slowPath has a simple protocol: t0 = 0 => no error, t0 != 0 => error + btiz r0, .noError + move r1, cfr # r1 contains caller frame jmp _llint_throw_from_slow_path_trampoline -.isArityFixupNeeded: +.noError: + # r1 points to ArityCheckData. + loadp CommonSlowPaths::ArityCheckData::thunkToCall[r1], t3 + btpz t3, .proceedInline + + loadp CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], a0 + call t3 + if ASSERT_ENABLED + loadp ReturnPC[cfr], t0 + loadp [t0], t0 + end + jmp .continue + +.proceedInline: + loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1 + btiz t1, .continue + loadi PayloadOffset + ArgumentCount[cfr], t2 + addi CallFrameHeaderSlots, t2 + + // Check if there are some unaligned slots we can use + move t1, t3 + andi StackAlignmentSlots - 1, t3 + btiz t3, .noExtraSlot +.fillExtraSlots: + move 0, t0 + storei t0, PayloadOffset[cfr, t2, 8] + move UndefinedTag, t0 + storei t0, TagOffset[cfr, t2, 8] + addi 1, t2 + bsubinz 1, t3, .fillExtraSlots + andi ~(StackAlignmentSlots - 1), t1 btiz t1, .continue - // Move frame up "t1" slots +.noExtraSlot: + // Move frame up t1 slots negi t1 move cfr, t3 - loadi PayloadOffset + ArgumentCount[cfr], t2 - addi CallFrameHeaderSlots, t2 .copyLoop: loadi PayloadOffset[t3], t0 storei t0, PayloadOffset[t3, t1, 8] @@ -589,6 +638,7 @@ macro functionArityCheck(doneLabel, slow_path) lshiftp 3, t1 addp t1, cfr + addp t1, sp .continue: # Reload CodeBlock and PC, since the slow_path clobbered it. loadp CodeBlock[cfr], t1 @@ -596,12 +646,11 @@ macro functionArityCheck(doneLabel, slow_path) jmp doneLabel end - macro branchIfException(label) - loadp ScopeChain[cfr], t3 + loadp Callee + PayloadOffset[cfr], t3 andp MarkedBlockMask, t3 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - bieq VM::m_exception + TagOffset[t3], EmptyValueTag, .noException + btiz VM::m_exception[t3], .noException jmp label .noException: end @@ -611,6 +660,7 @@ end _llint_op_enter: traceExecution() + checkStackPointerAlignment(t2, 0xdead00e1) loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars btiz t2, .opEnterDone @@ -627,29 +677,13 @@ _llint_op_enter: dispatch(1) -_llint_op_create_activation: - traceExecution() - loadi 4[PC], t0 - bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateActivationDone - callSlowPath(_llint_slow_path_create_activation) -.opCreateActivationDone: - dispatch(2) - - -_llint_op_init_lazy_reg: - traceExecution() - loadi 4[PC], t0 - storei EmptyValueTag, TagOffset[cfr, t0, 8] - storei 0, PayloadOffset[cfr, t0, 8] - dispatch(2) - - -_llint_op_create_arguments: +_llint_op_get_scope: traceExecution() - loadi 4[PC], t0 - bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opCreateArgumentsDone - callSlowPath(_slow_path_create_arguments) -.opCreateArgumentsDone: + loadi Callee + PayloadOffset[cfr], t0 + loadi JSCallee::m_scope[t0], t0 + loadisFromInstruction(1, t1) + storei CellTag, TagOffset[cfr, t1, 8] + storei t0, PayloadOffset[cfr, t1, 8] dispatch(2) @@ -657,48 +691,39 @@ _llint_op_create_this: traceExecution() loadi 8[PC], t0 loadp PayloadOffset[cfr, t0, 8], t0 - loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1 - loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2 + loadp JSFunction::m_rareData[t0], t5 + btpz t5, .opCreateThisSlow + loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_allocator[t5], t1 + loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_structure[t5], t2 btpz t1, .opCreateThisSlow + loadpFromInstruction(4, t5) + bpeq t5, 1, .hasSeenMultipleCallee + bpneq t5, t0, .opCreateThisSlow +.hasSeenMultipleCallee: allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow) loadi 4[PC], t1 storei CellTag, TagOffset[cfr, t1, 8] storei t0, PayloadOffset[cfr, t1, 8] - dispatch(4) + dispatch(5) .opCreateThisSlow: callSlowPath(_slow_path_create_this) - dispatch(4) - - -_llint_op_get_callee: - traceExecution() - loadi 4[PC], t0 - loadp PayloadOffset + Callee[cfr], t1 - loadpFromInstruction(2, t2) - bpneq t1, t2, .opGetCalleeSlow - storei CellTag, TagOffset[cfr, t0, 8] - storei t1, PayloadOffset[cfr, t0, 8] - dispatch(3) + dispatch(5) -.opGetCalleeSlow: - callSlowPath(_slow_path_get_callee) - dispatch(3) _llint_op_to_this: traceExecution() loadi 4[PC], t0 bineq TagOffset[cfr, t0, 8], CellTag, .opToThisSlow loadi PayloadOffset[cfr, t0, 8], t0 - loadp JSCell::m_structure[t0], t0 - bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], FinalObjectType, .opToThisSlow + bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow loadpFromInstruction(2, t2) - bpneq t0, t2, .opToThisSlow - dispatch(3) + bpneq JSCell::m_structureID[t0], t2, .opToThisSlow + dispatch(4) .opToThisSlow: callSlowPath(_slow_path_to_this) - dispatch(3) + dispatch(4) _llint_op_new_object: @@ -717,53 +742,25 @@ _llint_op_new_object: dispatch(4) -_llint_op_mov: +_llint_op_check_tdz: traceExecution() - loadi 8[PC], t1 - loadi 4[PC], t0 - loadConstantOrVariable(t1, t2, t3) - storei t2, TagOffset[cfr, t0, 8] - storei t3, PayloadOffset[cfr, t0, 8] - dispatch(3) - - -macro notifyWrite(set, valueTag, valuePayload, scratch, slow) - loadb VariableWatchpointSet::m_state[set], scratch - bieq scratch, IsInvalidated, .done - bineq scratch, ClearWatchpoint, .overwrite - storei valueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set] - storei valuePayload, VariableWatchpointSet::m_inferredValue + PayloadOffset[set] - storeb IsWatched, VariableWatchpointSet::m_state[set] - jmp .done + loadisFromInstruction(1, t0) + loadConstantOrVariableTag(t0, t1) + bineq t1, EmptyValueTag, .opNotTDZ + callSlowPath(_slow_path_throw_tdz_error) -.overwrite: - bineq valuePayload, VariableWatchpointSet::m_inferredValue + PayloadOffset[set], .definitelyDifferent - bieq valueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set], .done -.definitelyDifferent: - btbnz VariableWatchpointSet::m_setIsNotEmpty[set], slow - storei EmptyValueTag, VariableWatchpointSet::m_inferredValue + TagOffset[set] - storei 0, VariableWatchpointSet::m_inferredValue + PayloadOffset[set] - storeb IsInvalidated, VariableWatchpointSet::m_state[set] +.opNotTDZ: + dispatch(2) -.done: -end -_llint_op_captured_mov: +_llint_op_mov: traceExecution() loadi 8[PC], t1 - loadConstantOrVariable(t1, t2, t3) - loadpFromInstruction(3, t0) - btpz t0, .opCapturedMovReady - notifyWrite(t0, t2, t3, t1, .opCapturedMovSlow) -.opCapturedMovReady: loadi 4[PC], t0 + loadConstantOrVariable(t1, t2, t3) storei t2, TagOffset[cfr, t0, 8] storei t3, PayloadOffset[cfr, t0, 8] - dispatch(4) - -.opCapturedMovSlow: - callSlowPath(_slow_path_captured_mov) - dispatch(4) + dispatch(3) _llint_op_not: @@ -810,11 +807,11 @@ _llint_op_eq_null: loadi TagOffset[cfr, t0, 8], t1 loadi PayloadOffset[cfr, t0, 8], t0 bineq t1, CellTag, .opEqNullImmediate - loadp JSCell::m_structure[t0], t1 - btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined + btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opEqNullMasqueradesAsUndefined move 0, t1 jmp .opEqNullNotImmediate .opEqNullMasqueradesAsUndefined: + loadp JSCell::m_structureID[t0], t1 loadp CodeBlock[cfr], t0 loadp CodeBlock::m_globalObject[t0], t0 cpeq Structure::m_globalObject[t1], t0, t1 @@ -857,11 +854,11 @@ _llint_op_neq_null: loadi TagOffset[cfr, t0, 8], t1 loadi PayloadOffset[cfr, t0, 8], t0 bineq t1, CellTag, .opNeqNullImmediate - loadp JSCell::m_structure[t0], t1 - btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined + btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .opNeqNullMasqueradesAsUndefined move 1, t1 jmp .opNeqNullNotImmediate .opNeqNullMasqueradesAsUndefined: + loadp JSCell::m_structureID[t0], t1 loadp CodeBlock[cfr], t0 loadp CodeBlock::m_globalObject[t0], t0 cpneq Structure::m_globalObject[t1], t0, t1 @@ -883,12 +880,10 @@ macro strictEq(equalityOperation, slowPath) loadConstantOrVariable2Reg(t0, t2, t0) bineq t2, t3, .slow bib t2, LowestTag, .slow - bineq t2, CellTag, .notString - loadp JSCell::m_structure[t0], t2 - loadp JSCell::m_structure[t1], t3 - bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .notString - bbeq Structure::m_typeInfo + TypeInfo::m_type[t3], StringType, .slow -.notString: + bineq t2, CellTag, .notStringOrSymbol + bbaeq JSCell::m_type[t0], ObjectType, .notStringOrSymbol + bbb JSCell::m_type[t1], ObjectType, .slow +.notStringOrSymbol: loadi 4[PC], t2 equalityOperation(t0, t1, t0) storei BooleanTag, TagOffset[cfr, t2, 8] @@ -955,6 +950,23 @@ _llint_op_to_number: dispatch(3) +_llint_op_to_string: + traceExecution() + loadi 8[PC], t0 + loadi 4[PC], t1 + loadConstantOrVariable(t0, t2, t3) + bineq t2, CellTag, .opToStringSlow + bbneq JSCell::m_type[t3], StringType, .opToStringSlow +.opToStringIsString: + storei t2, TagOffset[cfr, t1, 8] + storei t3, PayloadOffset[cfr, t1, 8] + dispatch(3) + +.opToStringSlow: + callSlowPath(_slow_path_to_string) + dispatch(3) + + _llint_op_negate: traceExecution() loadi 8[PC], t0 @@ -1163,18 +1175,35 @@ _llint_op_bitor: 5) -_llint_op_check_has_instance: +_llint_op_overrides_has_instance: traceExecution() - loadi 12[PC], t1 - loadConstantOrVariablePayload(t1, CellTag, t0, .opCheckHasInstanceSlow) - loadp JSCell::m_structure[t0], t0 - btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow - dispatch(5) -.opCheckHasInstanceSlow: - callSlowPath(_llint_slow_path_check_has_instance) - dispatch(0) + loadisFromInstruction(1, t3) + storei BooleanTag, TagOffset[cfr, t3, 8] + + # First check if hasInstanceValue is the one on Function.prototype[Symbol.hasInstance] + loadisFromInstruction(3, t0) + loadConstantOrVariablePayload(t0, CellTag, t2, .opOverrideshasInstanceValueNotCell) + loadConstantOrVariable(t0, t1, t2) + bineq t1, CellTag, .opOverrideshasInstanceValueNotCell + # We don't need hasInstanceValue's tag register anymore. + loadp CodeBlock[cfr], t1 + loadp CodeBlock::m_globalObject[t1], t1 + loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t1], t1 + bineq t1, t2, .opOverrideshasInstanceValueNotDefault + + # We know the constructor is a cell. + loadisFromInstruction(2, t0) + loadConstantOrVariablePayloadUnchecked(t0, t1) + tbz JSCell::m_flags[t1], ImplementsDefaultHasInstance, t0 + storei t0, PayloadOffset[cfr, t3, 8] + dispatch(4) + +.opOverrideshasInstanceValueNotCell: +.opOverrideshasInstanceValueNotDefault: + storei 1, PayloadOffset[cfr, t3, 8] + dispatch(4) _llint_op_instanceof: traceExecution() @@ -1182,15 +1211,14 @@ _llint_op_instanceof: loadi 12[PC], t0 loadi 4[PC], t3 loadConstantOrVariablePayload(t0, CellTag, t1, .opInstanceofSlow) - loadp JSCell::m_structure[t1], t2 - bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow + bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow loadi 8[PC], t0 loadConstantOrVariablePayload(t0, CellTag, t2, .opInstanceofSlow) # Register state: t1 = prototype, t2 = value move 1, t0 .opInstanceofLoop: - loadp JSCell::m_structure[t2], t2 + loadp JSCell::m_structureID[t2], t2 loadi Structure::m_prototype + PayloadOffset[t2], t2 bpeq t2, t1, .opInstanceofDone btinz t2, .opInstanceofLoop @@ -1205,6 +1233,11 @@ _llint_op_instanceof: callSlowPath(_llint_slow_path_instanceof) dispatch(4) +_llint_op_instanceof_custom: + traceExecution() + callSlowPath(_llint_slow_path_instanceof_custom) + dispatch(5) + _llint_op_is_undefined: traceExecution() @@ -1217,12 +1250,12 @@ _llint_op_is_undefined: storei t3, PayloadOffset[cfr, t0, 8] dispatch(3) .opIsUndefinedCell: - loadp JSCell::m_structure[t3], t1 - btbnz Structure::m_typeInfo + TypeInfo::m_flags[t1], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined + btbnz JSCell::m_flags[t3], MasqueradesAsUndefined, .opIsUndefinedMasqueradesAsUndefined move 0, t1 storei t1, PayloadOffset[cfr, t0, 8] dispatch(3) .opIsUndefinedMasqueradesAsUndefined: + loadp JSCell::m_structureID[t3], t1 loadp CodeBlock[cfr], t3 loadp CodeBlock::m_globalObject[t3], t3 cpeq Structure::m_globalObject[t1], t3, t1 @@ -1260,8 +1293,7 @@ _llint_op_is_string: loadConstantOrVariable(t1, t0, t3) storei BooleanTag, TagOffset[cfr, t2, 8] bineq t0, CellTag, .opIsStringNotCell - loadp JSCell::m_structure[t3], t0 - cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1 + cbeq JSCell::m_type[t3], StringType, t1 storei t1, PayloadOffset[cfr, t2, 8] dispatch(3) .opIsStringNotCell: @@ -1269,6 +1301,21 @@ _llint_op_is_string: dispatch(3) +_llint_op_is_object: + traceExecution() + loadi 8[PC], t1 + loadi 4[PC], t2 + loadConstantOrVariable(t1, t0, t3) + storei BooleanTag, TagOffset[cfr, t2, 8] + bineq t0, CellTag, .opIsObjectNotCell + cbaeq JSCell::m_type[t3], ObjectType, t1 + storei t1, PayloadOffset[cfr, t2, 8] + dispatch(3) +.opIsObjectNotCell: + storep 0, PayloadOffset[cfr, t2, 8] + dispatch(3) + + macro loadPropertyAtVariableOffsetKnownNotInline(propertyOffset, objectAndStorage, tag, payload) assert(macro (ok) bigteq propertyOffset, firstOutOfLineOffset, ok end) negi propertyOffset @@ -1302,54 +1349,29 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, tag, end -_llint_op_init_global_const: - traceExecution() - writeBarrierOnGlobalObject(2) - loadi 8[PC], t1 - loadi 4[PC], t0 - loadConstantOrVariable(t1, t2, t3) - storei t2, TagOffset[t0] - storei t3, PayloadOffset[t0] - dispatch(5) - - # We only do monomorphic get_by_id caching for now, and we do not modify the # opcode. We do, however, allow for the cache to change anytime if fails, since # ping-ponging is free. At best we get lucky and the get_by_id will continue # to take fast path on the new cache. At worst we take slow path, which is what # we would have been doing anyway. -macro getById(getPropertyStorage) +_llint_op_get_by_id: traceExecution() loadi 8[PC], t0 loadi 16[PC], t1 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow) loadi 20[PC], t2 - getPropertyStorage( - t3, - t0, - macro (propertyStorage, scratch) - bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow - loadi 4[PC], t1 - loadi TagOffset[propertyStorage, t2], scratch - loadi PayloadOffset[propertyStorage, t2], t2 - storei scratch, TagOffset[cfr, t1, 8] - storei t2, PayloadOffset[cfr, t1, 8] - valueProfile(scratch, t2, 32, t1) - dispatch(9) - end) - - .opGetByIdSlow: - callSlowPath(_llint_slow_path_get_by_id) - dispatch(9) -end - -_llint_op_get_by_id: - getById(withInlineStorage) - + bineq JSCell::m_structureID[t3], t1, .opGetByIdSlow + loadPropertyAtVariableOffset(t2, t3, t0, t1) + loadi 4[PC], t2 + storei t0, TagOffset[cfr, t2, 8] + storei t1, PayloadOffset[cfr, t2, 8] + valueProfile(t0, t1, 32, t2) + dispatch(9) -_llint_op_get_by_id_out_of_line: - getById(withOutOfLineStorage) +.opGetByIdSlow: + callSlowPath(_llint_slow_path_get_by_id) + dispatch(9) _llint_op_get_array_length: @@ -1357,7 +1379,7 @@ _llint_op_get_array_length: loadi 8[PC], t0 loadp 16[PC], t1 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetArrayLengthSlow) - loadp JSCell::m_structure[t3], t2 + move t3, t2 arrayProfile(t2, t1, t0) btiz t2, IsArray, .opGetArrayLengthSlow btiz t2, IndexingShapeMask, .opGetArrayLengthSlow @@ -1375,119 +1397,142 @@ _llint_op_get_array_length: dispatch(9) -_llint_op_get_arguments_length: - traceExecution() - loadi 8[PC], t0 - loadi 4[PC], t1 - bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentsLengthSlow - loadi ArgumentCount + PayloadOffset[cfr], t2 - subi 1, t2 - storei Int32Tag, TagOffset[cfr, t1, 8] - storei t2, PayloadOffset[cfr, t1, 8] - dispatch(4) - -.opGetArgumentsLengthSlow: - callSlowPath(_llint_slow_path_get_arguments_length) - dispatch(4) - - -macro putById(getPropertyStorage) +_llint_op_put_by_id: traceExecution() writeBarrierOnOperands(1, 3) loadi 4[PC], t3 - loadi 16[PC], t1 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow) - loadi 12[PC], t2 - getPropertyStorage( - t0, - t3, - macro (propertyStorage, scratch) - bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - loadi 20[PC], t1 - loadConstantOrVariable2Reg(t2, scratch, t2) - storei scratch, TagOffset[propertyStorage, t1] - storei t2, PayloadOffset[propertyStorage, t1] - dispatch(9) - end) -end + loadi JSCell::m_structureID[t0], t2 + bineq t2, 16[PC], .opPutByIdSlow -_llint_op_put_by_id: - putById(withInlineStorage) + # At this point, we have: + # t2 -> currentStructureID + # t0 -> object base + # We will lose currentStructureID in the shenanigans below. + + loadi 12[PC], t1 + loadConstantOrVariable(t1, t2, t3) + loadi 32[PC], t1 + + # At this point, we have: + # t0 -> object base + # t1 -> put by id flags + # t2 -> value tag + # t3 -> value payload + + btinz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther + + # We have one of the non-structure type checks. Find out which one. + andi PutByIdSecondaryTypeMask, t1 + bilt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString + + # We are one of the following: String, Symbol, Object, ObjectOrOther, Top + bilt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther + + # We are either ObjectOrOther or Top. + bieq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes + + # Check if we are ObjectOrOther. + bieq t2, CellTag, .opPutByIdTypeCheckObject +.opPutByIdTypeCheckOther: + bieq t2, NullTag, .opPutByIdDoneCheckingTypes + bieq t2, UndefinedTag, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckLessThanObjectOrOther: + # We are either String, Symbol or Object. + bineq t2, CellTag, .opPutByIdSlow + bieq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject + bieq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol + bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow +.opPutByIdTypeCheckObject: + bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow +.opPutByIdTypeCheckSymbol: + bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckLessThanString: + # We are one of the following: Bottom, Boolean, Other, Int32, Number. + bilt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32 + + # We are either Int32 or Number. + bieq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber + + bieq t2, Int32Tag, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckNumber: + bib t2, LowestTag + 1, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckLessThanInt32: + # We are one of the following: Bottom, Boolean, Other + bineq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther + bieq t2, BooleanTag, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckBottomOrOther: + bieq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther + jmp .opPutByIdSlow + +.opPutByIdTypeCheckObjectWithStructureOrOther: + bieq t2, CellTag, .opPutByIdTypeCheckObjectWithStructure + btinz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther + jmp .opPutByIdSlow + +.opPutByIdTypeCheckObjectWithStructure: + andi PutByIdSecondaryTypeMask, t1 + bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow + +.opPutByIdDoneCheckingTypes: + loadi 24[PC], t1 + + btiz t1, .opPutByIdNotTransition + + # This is the transition case. t1 holds the new Structure*. If we have a chain, we need to + # check it. t0 is the base. We may clobber t1 to use it as scratch. + loadp 28[PC], t3 + btpz t3, .opPutByIdTransitionDirect + + loadi 16[PC], t2 # Need old structure again. + loadp StructureChain::m_vector[t3], t3 + assert(macro (ok) btpnz t3, ok end) + + loadp Structure::m_prototype[t2], t2 + btpz t2, .opPutByIdTransitionChainDone +.opPutByIdTransitionChainLoop: + loadp [t3], t1 + bpneq t1, JSCell::m_structureID[t2], .opPutByIdSlow + addp 4, t3 + loadp Structure::m_prototype[t1], t2 + btpnz t2, .opPutByIdTransitionChainLoop + +.opPutByIdTransitionChainDone: + loadi 24[PC], t1 + +.opPutByIdTransitionDirect: + storei t1, JSCell::m_structureID[t0] + +.opPutByIdNotTransition: + # The only thing live right now is t0, which holds the base. + loadi 12[PC], t1 + loadConstantOrVariable(t1, t2, t3) + loadi 20[PC], t1 + storePropertyAtVariableOffset(t1, t0, t2, t3) + dispatch(9) .opPutByIdSlow: callSlowPath(_llint_slow_path_put_by_id) dispatch(9) -_llint_op_put_by_id_out_of_line: - putById(withOutOfLineStorage) - - -macro putByIdTransition(additionalChecks, getPropertyStorage) - traceExecution() - writeBarrierOnOperand(1) - loadi 4[PC], t3 - loadi 16[PC], t1 - loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow) - loadi 12[PC], t2 - bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - additionalChecks(t1, t3) - loadi 20[PC], t1 - getPropertyStorage( - t0, - t3, - macro (propertyStorage, scratch) - addp t1, propertyStorage, t3 - loadConstantOrVariable2Reg(t2, t1, t2) - storei t1, TagOffset[t3] - loadi 24[PC], t1 - storei t2, PayloadOffset[t3] - storep t1, JSCell::m_structure[t0] - dispatch(9) - end) -end - -macro noAdditionalChecks(oldStructure, scratch) -end - -macro structureChainChecks(oldStructure, scratch) - const protoCell = oldStructure # Reusing the oldStructure register for the proto - - loadp 28[PC], scratch - assert(macro (ok) btpnz scratch, ok end) - loadp StructureChain::m_vector[scratch], scratch - assert(macro (ok) btpnz scratch, ok end) - bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done -.loop: - loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell - loadp JSCell::m_structure[protoCell], oldStructure - bpneq oldStructure, [scratch], .opPutByIdSlow - addp 4, scratch - bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop -.done: -end - -_llint_op_put_by_id_transition_direct: - putByIdTransition(noAdditionalChecks, withInlineStorage) - - -_llint_op_put_by_id_transition_direct_out_of_line: - putByIdTransition(noAdditionalChecks, withOutOfLineStorage) - - -_llint_op_put_by_id_transition_normal: - putByIdTransition(structureChainChecks, withInlineStorage) - - -_llint_op_put_by_id_transition_normal_out_of_line: - putByIdTransition(structureChainChecks, withOutOfLineStorage) - - _llint_op_get_by_val: traceExecution() loadi 8[PC], t2 loadConstantOrVariablePayload(t2, CellTag, t0, .opGetByValSlow) - loadp JSCell::m_structure[t0], t2 + move t0, t2 loadp 16[PC], t3 arrayProfile(t2, t3, t1) loadi 12[PC], t3 @@ -1537,61 +1582,6 @@ _llint_op_get_by_val: dispatch(6) -_llint_op_get_argument_by_val: - # FIXME: At some point we should array profile this. Right now it isn't necessary - # since the DFG will never turn a get_argument_by_val into a GetByVal. - traceExecution() - loadi 8[PC], t0 - loadi 12[PC], t1 - bineq TagOffset[cfr, t0, 8], EmptyValueTag, .opGetArgumentByValSlow - loadConstantOrVariablePayload(t1, Int32Tag, t2, .opGetArgumentByValSlow) - addi 1, t2 - loadi ArgumentCount + PayloadOffset[cfr], t1 - biaeq t2, t1, .opGetArgumentByValSlow - loadi 4[PC], t3 - loadi ThisArgumentOffset + TagOffset[cfr, t2, 8], t0 - loadi ThisArgumentOffset + PayloadOffset[cfr, t2, 8], t1 - storei t0, TagOffset[cfr, t3, 8] - storei t1, PayloadOffset[cfr, t3, 8] - valueProfile(t0, t1, 20, t2) - dispatch(6) - -.opGetArgumentByValSlow: - callSlowPath(_llint_slow_path_get_argument_by_val) - dispatch(6) - - -_llint_op_get_by_pname: - traceExecution() - loadi 12[PC], t0 - loadConstantOrVariablePayload(t0, CellTag, t1, .opGetByPnameSlow) - loadi 16[PC], t0 - bpneq t1, PayloadOffset[cfr, t0, 8], .opGetByPnameSlow - loadi 8[PC], t0 - loadConstantOrVariablePayload(t0, CellTag, t2, .opGetByPnameSlow) - loadi 20[PC], t0 - loadi PayloadOffset[cfr, t0, 8], t3 - loadp JSCell::m_structure[t2], t0 - bpneq t0, JSPropertyNameIterator::m_cachedStructure[t3], .opGetByPnameSlow - loadi 24[PC], t0 - loadi [cfr, t0, 8], t0 - subi 1, t0 - biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow - bilt t0, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], .opGetByPnameInlineProperty - addi firstOutOfLineOffset, t0 - subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t3], t0 -.opGetByPnameInlineProperty: - loadPropertyAtVariableOffset(t0, t2, t1, t3) - loadi 4[PC], t0 - storei t1, TagOffset[cfr, t0, 8] - storei t3, PayloadOffset[cfr, t0, 8] - dispatch(7) - -.opGetByPnameSlow: - callSlowPath(_llint_slow_path_get_by_pname) - dispatch(7) - - macro contiguousPutByVal(storeCallback) biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds .storeResult: @@ -1608,12 +1598,12 @@ macro contiguousPutByVal(storeCallback) jmp .storeResult end -macro putByVal(holeCheck, slowPath) +macro putByVal(slowPath) traceExecution() writeBarrierOnOperands(1, 3) loadi 4[PC], t0 loadConstantOrVariablePayload(t0, CellTag, t1, .opPutByValSlow) - loadp JSCell::m_structure[t1], t2 + move t1, t2 loadp 16[PC], t3 arrayProfile(t2, t3, t0) loadi 8[PC], t0 @@ -1659,7 +1649,7 @@ macro putByVal(holeCheck, slowPath) .opPutByValNotContiguous: bineq t2, ArrayStorageShape, .opPutByValSlow biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds - holeCheck(ArrayStorage::m_vector + TagOffset[t0, t3, 8], .opPutByValArrayStorageEmpty) + bieq ArrayStorage::m_vector + TagOffset[t0, t3, 8], EmptyValueTag, .opPutByValArrayStorageEmpty .opPutByValArrayStorageStoreResult: loadi 12[PC], t2 loadConstantOrVariable2Reg(t2, t1, t2) @@ -1685,13 +1675,10 @@ macro putByVal(holeCheck, slowPath) end _llint_op_put_by_val: - putByVal(macro(addr, slowPath) - bieq addr, EmptyValueTag, slowPath - end, _llint_slow_path_put_by_val) + putByVal(_llint_slow_path_put_by_val) _llint_op_put_by_val_direct: - putByVal(macro(addr, slowPath) - end, _llint_slow_path_put_by_val_direct) + putByVal(_llint_slow_path_put_by_val_direct) _llint_op_jmp: traceExecution() @@ -1719,8 +1706,8 @@ macro equalNull(cellHandler, immediateHandler) loadi TagOffset[cfr, t0, 8], t1 loadi PayloadOffset[cfr, t0, 8], t0 bineq t1, CellTag, .immediate - loadp JSCell::m_structure[t0], t2 - cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target) + loadp JSCell::m_structureID[t0], t2 + cellHandler(t2, JSCell::m_flags[t0], .target) dispatch(3) .target: @@ -1850,8 +1837,7 @@ _llint_op_switch_char: loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2 addp t3, t2 bineq t1, CellTag, .opSwitchCharFallThrough - loadp JSCell::m_structure[t0], t1 - bbneq Structure::m_typeInfo + TypeInfo::m_type[t1], StringType, .opSwitchCharFallThrough + bbneq JSCell::m_type[t0], StringType, .opSwitchCharFallThrough bineq JSString::m_length[t0], 1, .opSwitchCharFallThrough loadp JSString::m_value[t0], t0 btpz t0, .opSwitchOnRope @@ -1877,35 +1863,18 @@ _llint_op_switch_char: dispatch(0) -_llint_op_new_func: - traceExecution() - btiz 12[PC], .opNewFuncUnchecked - loadi 4[PC], t1 - bineq TagOffset[cfr, t1, 8], EmptyValueTag, .opNewFuncDone -.opNewFuncUnchecked: - callSlowPath(_llint_slow_path_new_func) -.opNewFuncDone: - dispatch(4) - - -_llint_op_new_captured_func: - traceExecution() - callSlowPath(_slow_path_new_captured_func) - dispatch(4) - - macro arrayProfileForCall() loadi 16[PC], t3 negi t3 bineq ThisArgumentOffset + TagOffset[cfr, t3, 8], CellTag, .done loadi ThisArgumentOffset + PayloadOffset[cfr, t3, 8], t0 - loadp JSCell::m_structure[t0], t0 - loadp 24[PC], t1 - storep t0, ArrayProfile::m_lastSeenStructure[t1] + loadp JSCell::m_structureID[t0], t0 + loadpFromInstruction(CallOpCodeSize - 2, t1) + storep t0, ArrayProfile::m_lastSeenStructureID[t1] .done: end -macro doCall(slowPath) +macro doCall(slowPath, prepareCall) loadi 8[PC], t0 loadi 20[PC], t1 loadp LLIntCallLinkInfo::callee[t1], t2 @@ -1915,42 +1884,19 @@ macro doCall(slowPath) lshifti 3, t3 negi t3 addp cfr, t3 # t3 contains the new value of cfr - loadp JSFunction::m_scope[t2], t0 storei t2, Callee + PayloadOffset[t3] - storei t0, ScopeChain + PayloadOffset[t3] loadi 12[PC], t2 storei PC, ArgumentCount + TagOffset[cfr] - storep cfr, CallerFrame[t3] storei t2, ArgumentCount + PayloadOffset[t3] storei CellTag, Callee + TagOffset[t3] - storei CellTag, ScopeChain + TagOffset[t3] - move t3, cfr - callTargetFunction(t1) + move t3, sp + prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4) + callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1]) .opCallSlow: - slowPathForCall(slowPath) + slowPathForCall(slowPath, prepareCall) end - -_llint_op_tear_off_activation: - traceExecution() - loadi 4[PC], t0 - bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffActivationNotCreated - callSlowPath(_llint_slow_path_tear_off_activation) -.opTearOffActivationNotCreated: - dispatch(2) - - -_llint_op_tear_off_arguments: - traceExecution() - loadi 4[PC], t0 - addi 1, t0 # Get the unmodifiedArgumentsRegister - bieq TagOffset[cfr, t0, 8], EmptyValueTag, .opTearOffArgumentsNotCreated - callSlowPath(_llint_slow_path_tear_off_arguments) -.opTearOffArgumentsNotCreated: - dispatch(3) - - _llint_op_ret: traceExecution() checkSwitchToJITForEpilogue() @@ -1959,30 +1905,13 @@ _llint_op_ret: doReturn() -_llint_op_ret_object_or_this: - traceExecution() - checkSwitchToJITForEpilogue() - loadi 4[PC], t2 - loadConstantOrVariable(t2, t1, t0) - bineq t1, CellTag, .opRetObjectOrThisNotObject - loadp JSCell::m_structure[t0], t2 - bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject - doReturn() - -.opRetObjectOrThisNotObject: - loadi 8[PC], t2 - loadConstantOrVariable(t2, t1, t0) - doReturn() - - _llint_op_to_primitive: traceExecution() loadi 8[PC], t2 loadi 4[PC], t3 loadConstantOrVariable(t2, t1, t0) bineq t1, CellTag, .opToPrimitiveIsImm - loadp JSCell::m_structure[t0], t2 - bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase + bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase .opToPrimitiveIsImm: storei t1, TagOffset[cfr, t3, 8] storei t0, PayloadOffset[cfr, t3, 8] @@ -1993,101 +1922,45 @@ _llint_op_to_primitive: dispatch(3) -_llint_op_next_pname: - traceExecution() - loadi 12[PC], t1 - loadi 16[PC], t2 - loadi PayloadOffset[cfr, t1, 8], t0 - bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd - loadi 20[PC], t2 - loadi PayloadOffset[cfr, t2, 8], t2 - loadp JSPropertyNameIterator::m_jsStrings[t2], t3 - loadi [t3, t0, 8], t3 - addi 1, t0 - storei t0, PayloadOffset[cfr, t1, 8] - loadi 4[PC], t1 - storei CellTag, TagOffset[cfr, t1, 8] - storei t3, PayloadOffset[cfr, t1, 8] - loadi 8[PC], t3 - loadi PayloadOffset[cfr, t3, 8], t3 - loadp JSCell::m_structure[t3], t1 - bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow - loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0 - loadp StructureChain::m_vector[t0], t0 - btpz [t0], .opNextPnameTarget -.opNextPnameCheckPrototypeLoop: - bieq Structure::m_prototype + TagOffset[t1], NullTag, .opNextPnameSlow - loadp Structure::m_prototype + PayloadOffset[t1], t2 - loadp JSCell::m_structure[t2], t1 - bpneq t1, [t0], .opNextPnameSlow - addp 4, t0 - btpnz [t0], .opNextPnameCheckPrototypeLoop -.opNextPnameTarget: - dispatchBranch(24[PC]) - -.opNextPnameEnd: - dispatch(7) - -.opNextPnameSlow: - callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target. - dispatch(0) - - _llint_op_catch: # This is where we end up from the JIT's throw trampoline (because the # machine code return address will be set to _llint_op_catch), and from # the interpreter's throw trampoline (see _llint_throw_trampoline). # The throwing code must have known that we were throwing to the interpreter, # and have set VM::targetInterpreterPCForThrow. - loadp ScopeChain[cfr], t3 + loadp Callee + PayloadOffset[cfr], t3 andp MarkedBlockMask, t3 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - loadp VM::callFrameForThrow[t3], cfr - loadi VM::targetInterpreterPCForThrow[t3], PC - loadi VM::m_exception + PayloadOffset[t3], t0 - loadi VM::m_exception + TagOffset[t3], t1 - storei 0, VM::m_exception + PayloadOffset[t3] - storei EmptyValueTag, VM::m_exception + TagOffset[t3] - loadi 4[PC], t2 - storei t0, PayloadOffset[cfr, t2, 8] - storei t1, TagOffset[cfr, t2, 8] - traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above. - dispatch(2) - - -# Gives you the scope in t0, while allowing you to optionally perform additional checks on the -# scopes as they are traversed. scopeCheck() is called with two arguments: the register -# holding the scope, and a register that can be used for scratch. Note that this does not -# use t3, so you can hold stuff in t3 if need be. -macro getDeBruijnScope(deBruijinIndexOperand, scopeCheck) - loadp ScopeChain + PayloadOffset[cfr], t0 - loadi deBruijinIndexOperand, t2 - - btiz t2, .done + restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0) + loadp VM::callFrameForCatch[t3], cfr + storep 0, VM::callFrameForCatch[t3] + restoreStackPointerAfterCall() - loadp CodeBlock[cfr], t1 - bineq CodeBlock::m_codeType[t1], FunctionCode, .loop - btbz CodeBlock::m_needsActivation[t1], .loop + loadi VM::targetInterpreterPCForThrow[t3], PC - loadi CodeBlock::m_activationRegister[t1], t1 + callSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler) + bpeq r1, 0, .isCatchableException + jmp _llint_throw_from_slow_path_trampoline - # Need to conditionally skip over one scope. - bieq TagOffset[cfr, t1, 8], EmptyValueTag, .noActivation - scopeCheck(t0, t1) - loadp JSScope::m_next[t0], t0 -.noActivation: - subi 1, t2 +.isCatchableException: + loadp Callee + PayloadOffset[cfr], t3 + andp MarkedBlockMask, t3 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - btiz t2, .done -.loop: - scopeCheck(t0, t1) - loadp JSScope::m_next[t0], t0 - subi 1, t2 - btinz t2, .loop + loadi VM::m_exception[t3], t0 + storei 0, VM::m_exception[t3] + loadi 4[PC], t2 + storei t0, PayloadOffset[cfr, t2, 8] + storei CellTag, TagOffset[cfr, t2, 8] -.done: + loadi Exception::m_value + TagOffset[t0], t1 + loadi Exception::m_value + PayloadOffset[t0], t0 + loadi 8[PC], t2 + storei t0, PayloadOffset[cfr, t2, 8] + storei t1, TagOffset[cfr, t2, 8] -end + traceExecution() # This needs to be here because we don't want to clobber t0, t1, t2, t3 above. + dispatch(3) _llint_op_end: traceExecution() @@ -2105,8 +1978,10 @@ _llint_throw_from_slow_path_trampoline: # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so # the throw target is not necessarily interpreted code, we come to here. # This essentially emulates the JIT's throwing protocol. - loadp CodeBlock[cfr], t1 - loadp CodeBlock::m_vm[t1], t1 + loadp Callee[cfr], t1 + andp MarkedBlockMask, t1 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 + copyCalleeSavesToVMCalleeSavesBuffer(t1, t2) jmp VM::targetMachinePCForThrow[t1] @@ -2116,81 +1991,62 @@ _llint_throw_during_call_trampoline: macro nativeCallTrampoline(executableOffsetToFunction) + + functionPrologue() storep 0, CodeBlock[cfr] - loadp CallerFrame[cfr], t0 - loadi ScopeChain + PayloadOffset[t0], t1 - storei CellTag, ScopeChain + TagOffset[cfr] - storei t1, ScopeChain + PayloadOffset[cfr] - if X86 - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 + loadi Callee + PayloadOffset[cfr], t1 + // Callee is still in t1 for code below + if X86 or X86_WIN + subp 8, sp # align stack pointer + andp MarkedBlockMask, t1 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t3 storep cfr, VM::topCallFrame[t3] - peek 0, t1 - storep t1, ReturnPC[cfr] - move cfr, t2 # t2 = ecx - subp 16 - 4, sp + move cfr, a0 # a0 = ecx + storep a0, [sp] loadi Callee + PayloadOffset[cfr], t1 loadp JSFunction::m_executable[t1], t1 - move t0, cfr + checkStackPointerAlignment(t3, 0xdead0001) call executableOffsetToFunction[t1] - addp 16 - 4, sp - loadp ScopeChain[cfr], t3 + loadp Callee + PayloadOffset[cfr], t3 andp MarkedBlockMask, t3 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - elsif ARM or ARMv7 or ARMv7_TRADITIONAL or MIPS or SH4 - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - storep cfr, VM::topCallFrame[t3] - move t0, t2 - preserveReturnAddressAfterCall(t3) - storep t3, ReturnPC[cfr] - move cfr, t0 + addp 8, sp + elsif ARM or ARMv7 or ARMv7_TRADITIONAL or C_LOOP or MIPS or SH4 + subp 8, sp # align stack pointer + # t1 already contains the Callee. + andp MarkedBlockMask, t1 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 + storep cfr, VM::topCallFrame[t1] + move cfr, a0 loadi Callee + PayloadOffset[cfr], t1 loadp JSFunction::m_executable[t1], t1 - move t2, cfr - if MIPS or SH4 - move t0, a0 + checkStackPointerAlignment(t3, 0xdead0001) + if C_LOOP + cloopCallNative executableOffsetToFunction[t1] + else + call executableOffsetToFunction[t1] end - call executableOffsetToFunction[t1] - restoreReturnAddressBeforeReturn(t3) - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - elsif C_LOOP - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - storep cfr, VM::topCallFrame[t3] - move t0, t2 - preserveReturnAddressAfterCall(t3) - storep t3, ReturnPC[cfr] - move cfr, t0 - loadi Callee + PayloadOffset[cfr], t1 - loadp JSFunction::m_executable[t1], t1 - move t2, cfr - cloopCallNative executableOffsetToFunction[t1] - restoreReturnAddressBeforeReturn(t3) - loadp ScopeChain[cfr], t3 + loadp Callee + PayloadOffset[cfr], t3 andp MarkedBlockMask, t3 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 + addp 8, sp else error end - bineq VM::m_exception + TagOffset[t3], EmptyValueTag, .exception + + functionEpilogue() + btinz VM::m_exception[t3], .handleException ret -.exception: - preserveReturnAddressAfterCall(t1) # This is really only needed on X86 - loadi ArgumentCount + TagOffset[cfr], PC - callSlowPath(_llint_throw_from_native_call) + +.handleException: + storep cfr, VM::topCallFrame[t3] + restoreStackPointerAfterCall() jmp _llint_throw_from_slow_path_trampoline end -macro getGlobalObject(dst) - loadp CodeBlock[cfr], t0 - loadp CodeBlock::m_globalObject[t0], t0 +macro getConstantScope(dst) + loadpFromInstruction(6, t0) loadisFromInstruction(dst, t1) storei CellTag, TagOffset[cfr, t1, 8] storei t0, PayloadOffset[cfr, t1, 8] @@ -2205,14 +2061,10 @@ end macro resolveScope() loadp CodeBlock[cfr], t0 - loadisFromInstruction(4, t2) - btbz CodeBlock::m_needsActivation[t0], .resolveScopeAfterActivationCheck - loadis CodeBlock::m_activationRegister[t0], t1 - btpz PayloadOffset[cfr, t1, 8], .resolveScopeAfterActivationCheck - addi 1, t2 + loadisFromInstruction(5, t2) -.resolveScopeAfterActivationCheck: - loadp ScopeChain[cfr], t0 + loadisFromInstruction(2, t0) + loadp PayloadOffset[cfr, t0, 8], t0 btiz t2, .resolveScopeLoopEnd .resolveScopeLoop: @@ -2229,51 +2081,67 @@ end _llint_op_resolve_scope: traceExecution() - loadisFromInstruction(3, t0) + loadisFromInstruction(4, t0) #rGlobalProperty: bineq t0, GlobalProperty, .rGlobalVar - getGlobalObject(1) - dispatch(6) + getConstantScope(1) + dispatch(7) .rGlobalVar: - bineq t0, GlobalVar, .rClosureVar - getGlobalObject(1) - dispatch(6) + bineq t0, GlobalVar, .rGlobalLexicalVar + getConstantScope(1) + dispatch(7) + +.rGlobalLexicalVar: + bineq t0, GlobalLexicalVar, .rClosureVar + getConstantScope(1) + dispatch(7) .rClosureVar: - bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks + bineq t0, ClosureVar, .rModuleVar resolveScope() - dispatch(6) + dispatch(7) + +.rModuleVar: + bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks + getConstantScope(1) + dispatch(7) .rGlobalPropertyWithVarInjectionChecks: bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks varInjectionCheck(.rDynamic) - getGlobalObject(1) - dispatch(6) + getConstantScope(1) + dispatch(7) .rGlobalVarWithVarInjectionChecks: - bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks + bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks varInjectionCheck(.rDynamic) - getGlobalObject(1) - dispatch(6) + getConstantScope(1) + dispatch(7) + +.rGlobalLexicalVarWithVarInjectionChecks: + bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks + varInjectionCheck(.rDynamic) + getConstantScope(1) + dispatch(7) .rClosureVarWithVarInjectionChecks: bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic varInjectionCheck(.rDynamic) resolveScope() - dispatch(6) + dispatch(7) .rDynamic: - callSlowPath(_llint_slow_path_resolve_scope) - dispatch(6) + callSlowPath(_slow_path_resolve_scope) + dispatch(7) macro loadWithStructureCheck(operand, slowPath) loadisFromInstruction(operand, t0) - loadp [cfr, t0, 8], t0 + loadp PayloadOffset[cfr, t0, 8], t0 loadpFromInstruction(5, t1) - bpneq JSCell::m_structure[t0], t1, slowPath + bpneq JSCell::m_structureID[t0], t1, slowPath end macro getProperty() @@ -2285,10 +2153,11 @@ macro getProperty() storei t2, PayloadOffset[cfr, t0, 8] end -macro getGlobalVar() +macro getGlobalVar(tdzCheckIfNecessary) loadpFromInstruction(6, t0) loadp TagOffset[t0], t1 loadp PayloadOffset[t0], t2 + tdzCheckIfNecessary(t1) valueProfile(t1, t2, 28, t0) loadisFromInstruction(1, t0) storei t1, TagOffset[cfr, t0, 8] @@ -2296,10 +2165,9 @@ macro getGlobalVar() end macro getClosureVar() - loadp JSVariableObject::m_registers[t0], t0 loadisFromInstruction(6, t3) - loadp TagOffset[t0, t3, 8], t1 - loadp PayloadOffset[t0, t3, 8], t2 + loadp JSEnvironmentRecord_variables + TagOffset[t0, t3, 8], t1 + loadp JSEnvironmentRecord_variables + PayloadOffset[t0, t3, 8], t2 valueProfile(t1, t2, 28, t0) loadisFromInstruction(1, t0) storei t1, TagOffset[cfr, t0, 8] @@ -2309,7 +2177,7 @@ end _llint_op_get_from_scope: traceExecution() loadisFromInstruction(4, t0) - andi ResolveModeMask, t0 + andi ResolveTypeMask, t0 #gGlobalProperty: bineq t0, GlobalProperty, .gGlobalVar @@ -2318,8 +2186,16 @@ _llint_op_get_from_scope: dispatch(8) .gGlobalVar: - bineq t0, GlobalVar, .gClosureVar - getGlobalVar() + bineq t0, GlobalVar, .gGlobalLexicalVar + getGlobalVar(macro(t) end) + dispatch(8) + +.gGlobalLexicalVar: + bineq t0, GlobalLexicalVar, .gClosureVar + getGlobalVar( + macro(tag) + bieq tag, EmptyValueTag, .gDynamic + end) dispatch(8) .gClosureVar: @@ -2335,10 +2211,18 @@ _llint_op_get_from_scope: dispatch(8) .gGlobalVarWithVarInjectionChecks: - bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks + bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks varInjectionCheck(.gDynamic) - loadVariable(2, t2, t1, t0) - getGlobalVar() + getGlobalVar(macro(t) end) + dispatch(8) + +.gGlobalLexicalVarWithVarInjectionChecks: + bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks + varInjectionCheck(.gDynamic) + getGlobalVar( + macro(tag) + bieq tag, EmptyValueTag, .gDynamic + end) dispatch(8) .gClosureVarWithVarInjectionChecks: @@ -2360,11 +2244,11 @@ macro putProperty() storePropertyAtVariableOffset(t1, t0, t2, t3) end -macro putGlobalVar() +macro putGlobalVariable() loadisFromInstruction(3, t0) loadConstantOrVariable(t0, t1, t2) loadpFromInstruction(5, t3) - notifyWrite(t3, t1, t2, t0, .pDynamic) + notifyWrite(t3, .pDynamic) loadpFromInstruction(6, t0) storei t1, TagOffset[t0] storei t2, PayloadOffset[t0] @@ -2373,19 +2257,37 @@ end macro putClosureVar() loadisFromInstruction(3, t1) loadConstantOrVariable(t1, t2, t3) - loadp JSVariableObject::m_registers[t0], t0 loadisFromInstruction(6, t1) - storei t2, TagOffset[t0, t1, 8] - storei t3, PayloadOffset[t0, t1, 8] + storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8] + storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8] +end + +macro putLocalClosureVar() + loadisFromInstruction(3, t1) + loadConstantOrVariable(t1, t2, t3) + loadpFromInstruction(5, t5) + btpz t5, .noVariableWatchpointSet + notifyWrite(t5, .pDynamic) +.noVariableWatchpointSet: + loadisFromInstruction(6, t1) + storei t2, JSEnvironmentRecord_variables + TagOffset[t0, t1, 8] + storei t3, JSEnvironmentRecord_variables + PayloadOffset[t0, t1, 8] end _llint_op_put_to_scope: traceExecution() loadisFromInstruction(4, t0) - andi ResolveModeMask, t0 + andi ResolveTypeMask, t0 -#pGlobalProperty: +#pLocalClosureVar: + bineq t0, LocalClosureVar, .pGlobalProperty + writeBarrierOnOperands(1, 3) + loadVariable(1, t2, t1, t0) + putLocalClosureVar() + dispatch(7) + +.pGlobalProperty: bineq t0, GlobalProperty, .pGlobalVar writeBarrierOnOperands(1, 3) loadWithStructureCheck(1, .pDynamic) @@ -2393,9 +2295,15 @@ _llint_op_put_to_scope: dispatch(7) .pGlobalVar: - bineq t0, GlobalVar, .pClosureVar + bineq t0, GlobalVar, .pGlobalLexicalVar writeBarrierOnGlobalObject(3) - putGlobalVar() + putGlobalVariable() + dispatch(7) + +.pGlobalLexicalVar: + bineq t0, GlobalLexicalVar, .pClosureVar + writeBarrierOnGlobalLexicalEnvironment(3) + putGlobalVariable() dispatch(7) .pClosureVar: @@ -2413,20 +2321,142 @@ _llint_op_put_to_scope: dispatch(7) .pGlobalVarWithVarInjectionChecks: - bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks + bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks writeBarrierOnGlobalObject(3) varInjectionCheck(.pDynamic) - putGlobalVar() + putGlobalVariable() + dispatch(7) + +.pGlobalLexicalVarWithVarInjectionChecks: + bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks + writeBarrierOnGlobalLexicalEnvironment(3) + varInjectionCheck(.pDynamic) + putGlobalVariable() dispatch(7) .pClosureVarWithVarInjectionChecks: - bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic + bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar writeBarrierOnOperands(1, 3) varInjectionCheck(.pDynamic) loadVariable(1, t2, t1, t0) putClosureVar() dispatch(7) +.pModuleVar: + bineq t0, ModuleVar, .pDynamic + callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error) + dispatch(7) + .pDynamic: callSlowPath(_llint_slow_path_put_to_scope) dispatch(7) + + +_llint_op_get_from_arguments: + traceExecution() + loadisFromInstruction(2, t0) + loadi PayloadOffset[cfr, t0, 8], t0 + loadi 12[PC], t1 + loadi DirectArguments_storage + TagOffset[t0, t1, 8], t2 + loadi DirectArguments_storage + PayloadOffset[t0, t1, 8], t3 + loadisFromInstruction(1, t1) + valueProfile(t2, t3, 16, t0) + storei t2, TagOffset[cfr, t1, 8] + storei t3, PayloadOffset[cfr, t1, 8] + dispatch(5) + + +_llint_op_put_to_arguments: + traceExecution() + writeBarrierOnOperands(1, 3) + loadisFromInstruction(1, t0) + loadi PayloadOffset[cfr, t0, 8], t0 + loadisFromInstruction(3, t1) + loadConstantOrVariable(t1, t2, t3) + loadi 8[PC], t1 + storei t2, DirectArguments_storage + TagOffset[t0, t1, 8] + storei t3, DirectArguments_storage + PayloadOffset[t0, t1, 8] + dispatch(4) + + +_llint_op_get_parent_scope: + traceExecution() + loadisFromInstruction(2, t0) + loadp PayloadOffset[cfr, t0, 8], t0 + loadp JSScope::m_next[t0], t0 + loadisFromInstruction(1, t1) + storei CellTag, TagOffset[cfr, t1, 8] + storei t0, PayloadOffset[cfr, t1, 8] + dispatch(3) + + +_llint_op_profile_type: + traceExecution() + loadp CodeBlock[cfr], t1 + loadp CodeBlock::m_vm[t1], t1 + # t1 is holding the pointer to the typeProfilerLog. + loadp VM::m_typeProfilerLog[t1], t1 + + # t0 is holding the payload, t5 is holding the tag. + loadisFromInstruction(1, t2) + loadConstantOrVariable(t2, t5, t0) + + bieq t5, EmptyValueTag, .opProfileTypeDone + + # t2 is holding the pointer to the current log entry. + loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2 + + # Store the JSValue onto the log entry. + storei t5, TypeProfilerLog::LogEntry::value + TagOffset[t2] + storei t0, TypeProfilerLog::LogEntry::value + PayloadOffset[t2] + + # Store the TypeLocation onto the log entry. + loadpFromInstruction(2, t3) + storep t3, TypeProfilerLog::LogEntry::location[t2] + + bieq t5, CellTag, .opProfileTypeIsCell + storei 0, TypeProfilerLog::LogEntry::structureID[t2] + jmp .opProfileTypeSkipIsCell +.opProfileTypeIsCell: + loadi JSCell::m_structureID[t0], t3 + storei t3, TypeProfilerLog::LogEntry::structureID[t2] +.opProfileTypeSkipIsCell: + + # Increment the current log entry. + addp sizeof TypeProfilerLog::LogEntry, t2 + storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1] + + loadp TypeProfilerLog::m_logEndPtr[t1], t1 + bpneq t2, t1, .opProfileTypeDone + callSlowPath(_slow_path_profile_type_clear_log) + +.opProfileTypeDone: + dispatch(6) + + +_llint_op_profile_control_flow: + traceExecution() + loadpFromInstruction(1, t0) + loadi BasicBlockLocation::m_executionCount[t0], t1 + addi 1, t1 + bieq t1, 0, .done # We overflowed. + storei t1, BasicBlockLocation::m_executionCount[t0] +.done: + dispatch(2) + + +_llint_op_get_rest_length: + traceExecution() + loadi PayloadOffset + ArgumentCount[cfr], t0 + subi 1, t0 + loadisFromInstruction(2, t1) + bilteq t0, t1, .storeZero + subi t1, t0 + jmp .finish +.storeZero: + move 0, t0 +.finish: + loadisFromInstruction(1, t1) + storei t0, PayloadOffset[cfr, t1, 8] + storei Int32Tag, TagOffset[cfr, t1, 8] + dispatch(3) diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm index 1bc5ed68d..85173bc82 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm @@ -1,4 +1,4 @@ -# Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. +# Copyright (C) 2011-2016 Apple Inc. All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions @@ -46,225 +46,289 @@ macro dispatchAfterCall() loadp CodeBlock[cfr], PB loadp CodeBlock::m_instructions[PB], PB loadisFromInstruction(1, t1) - storeq t0, [cfr, t1, 8] - valueProfile(t0, 7, t2) - dispatch(8) + storeq r0, [cfr, t1, 8] + valueProfile(r0, (CallOpCodeSize - 1), t3) + dispatch(CallOpCodeSize) end -macro cCall2(function, arg1, arg2) - if X86_64 - move arg1, t5 - move arg2, t4 +macro cCall2(function) + checkStackPointerAlignment(t4, 0xbad0c002) + if X86_64 or ARM64 call function - elsif ARM64 - move arg1, t0 - move arg2, t1 + elsif X86_64_WIN + # Note: this implementation is only correct if the return type size is > 8 bytes. + # See macro cCall2Void for an implementation when the return type <= 8 bytes. + # On Win64, when the return type is larger than 8 bytes, we need to allocate space on the stack for the return value. + # On entry rcx (a0), should contain a pointer to this stack space. The other parameters are shifted to the right, + # rdx (a1) should contain the first argument, and r8 (a2) should contain the second argument. + # On return, rax contains a pointer to this stack value, and we then need to copy the 16 byte return value into rax (r0) and rdx (r1) + # since the return value is expected to be split between the two. + # See http://msdn.microsoft.com/en-us/library/7572ztz4.aspx + move a1, a2 + move a0, a1 + subp 48, sp + move sp, a0 + addp 32, a0 call function + addp 48, sp + move 8[r0], r1 + move [r0], r0 elsif C_LOOP - cloopCallSlowPath function, arg1, arg2 + cloopCallSlowPath function, a0, a1 else error end end +macro cCall2Void(function) + if C_LOOP + cloopCallSlowPathVoid function, a0, a1 + elsif X86_64_WIN + # Note: we cannot use the cCall2 macro for Win64 in this case, + # as the Win64 cCall2 implemenation is only correct when the return type size is > 8 bytes. + # On Win64, rcx and rdx are used for passing the first two parameters. + # We also need to make room on the stack for all four parameter registers. + # See http://msdn.microsoft.com/en-us/library/ms235286.aspx + subp 32, sp + call function + addp 32, sp + else + cCall2(function) + end +end + # This barely works. arg3 and arg4 should probably be immediates. -macro cCall4(function, arg1, arg2, arg3, arg4) - if X86_64 - move arg1, t5 - move arg2, t4 - move arg3, t1 - move arg4, t2 +macro cCall4(function) + checkStackPointerAlignment(t4, 0xbad0c004) + if X86_64 or ARM64 call function - elsif ARM64 - move arg1, t0 - move arg2, t1 - move arg3, t2 - move arg4, t3 + elsif X86_64_WIN + # On Win64, rcx, rdx, r8, and r9 are used for passing the first four parameters. + # We also need to make room on the stack for all four parameter registers. + # See http://msdn.microsoft.com/en-us/library/ms235286.aspx + subp 64, sp call function - elsif C_LOOP - error + addp 64, sp else error end end -macro functionPrologue(extraStackSpace) - if X86_64 - push cfr - move sp, cfr - elsif ARM64 - pushLRAndFP - end - pushCalleeSaves - if X86_64 - subp extraStackSpace, sp - end -end +macro doVMEntry(makeCall) + functionPrologue() + pushCalleeSaves() -macro functionEpilogue(extraStackSpace) - if X86_64 - addp extraStackSpace, sp - end - popCalleeSaves - if X86_64 - pop cfr - elsif ARM64 - popLRAndFP - end -end + const entry = a0 + const vm = a1 + const protoCallFrame = a2 -macro doCallToJavaScript(makeCall, doReturn) - if X86_64 - const entry = t5 - const vmTopCallFrame = t4 - const protoCallFrame = t1 - const topOfStack = t2 - - const extraStackSpace = 8 - const previousCFR = t0 - const previousPC = t6 - const temp1 = t0 - const temp2 = t3 - const temp3 = t6 - elsif ARM64 - const entry = a0 - const vmTopCallFrame = a1 - const protoCallFrame = a2 - const topOfStack = a3 - - const extraStackSpace = 0 - const previousCFR = t4 - const previousPC = lr - const temp1 = t3 - const temp2 = t5 - const temp3 = t6 - end + vmEntryRecord(cfr, sp) + + checkStackPointerAlignment(t4, 0xbad0dc01) + + storep vm, VMEntryRecord::m_vm[sp] + loadp VM::topCallFrame[vm], t4 + storep t4, VMEntryRecord::m_prevTopCallFrame[sp] + loadp VM::topVMEntryFrame[vm], t4 + storep t4, VMEntryRecord::m_prevTopVMEntryFrame[sp] + + loadi ProtoCallFrame::paddedArgCount[protoCallFrame], t4 + addp CallFrameHeaderSlots, t4, t4 + lshiftp 3, t4 + subp sp, t4, t3 - functionPrologue(extraStackSpace) - - move topOfStack, cfr - subp (CallFrameHeaderSlots-1)*8, cfr - storep 0, ArgumentCount[cfr] - storep vmTopCallFrame, Callee[cfr] - loadp [vmTopCallFrame], temp1 - storep temp1, ScopeChain[cfr] - storep 1, CodeBlock[cfr] - if X86_64 - loadp 7*8[sp], previousPC - loadp 6*8[sp], previousCFR + # Ensure that we have enough additional stack capacity for the incoming args, + # and the frame for the JS code we're executing. We need to do this check + # before we start copying the args from the protoCallFrame below. + bpaeq t3, VM::m_jsStackLimit[vm], .stackHeightOK + + if C_LOOP + move entry, t4 + move vm, t5 + cloopCallSlowPath _llint_stack_check_at_vm_entry, vm, t3 + bpeq t0, 0, .stackCheckFailed + move t4, entry + move t5, vm + jmp .stackHeightOK + +.stackCheckFailed: + move t4, entry + move t5, vm end - storep previousPC, ReturnPC[cfr] - storep previousCFR, CallerFrame[cfr] - move cfr, temp1 - loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp2 - addp CallFrameHeaderSlots, temp2, temp2 - lshiftp 3, temp2 - subp temp2, cfr - storep temp1, CallerFrame[cfr] + move vm, a0 + move protoCallFrame, a1 + cCall2(_llint_throw_stack_overflow_error) + + vmEntryRecord(cfr, t4) + + loadp VMEntryRecord::m_vm[t4], vm + loadp VMEntryRecord::m_prevTopCallFrame[t4], extraTempReg + storep extraTempReg, VM::topCallFrame[vm] + loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], extraTempReg + storep extraTempReg, VM::topVMEntryFrame[vm] - move 5, temp1 + subp cfr, CalleeRegisterSaveSize, sp + + popCalleeSaves() + functionEpilogue() + ret + +.stackHeightOK: + move t3, sp + move 4, t3 .copyHeaderLoop: - subi 1, temp1 - loadp [protoCallFrame, temp1, 8], temp3 - storep temp3, CodeBlock[cfr, temp1, 8] - btinz temp1, .copyHeaderLoop - - loadi ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], temp2 - subi 1, temp2 - loadi ProtoCallFrame::paddedArgCount[protoCallFrame], temp3 - subi 1, temp3 - - bieq temp2, temp3, .copyArgs - move ValueUndefined, temp1 + subi 1, t3 + loadq [protoCallFrame, t3, 8], extraTempReg + storeq extraTempReg, CodeBlock[sp, t3, 8] + btinz t3, .copyHeaderLoop + + loadi PayloadOffset + ProtoCallFrame::argCountAndCodeOriginValue[protoCallFrame], t4 + subi 1, t4 + loadi ProtoCallFrame::paddedArgCount[protoCallFrame], extraTempReg + subi 1, extraTempReg + + bieq t4, extraTempReg, .copyArgs + move ValueUndefined, t3 .fillExtraArgsLoop: - subi 1, temp3 - storep temp1, ThisArgumentOffset+8[cfr, temp3, 8] - bineq temp2, temp3, .fillExtraArgsLoop + subi 1, extraTempReg + storeq t3, ThisArgumentOffset + 8[sp, extraTempReg, 8] + bineq t4, extraTempReg, .fillExtraArgsLoop .copyArgs: - loadp ProtoCallFrame::args[protoCallFrame], temp1 + loadp ProtoCallFrame::args[protoCallFrame], t3 .copyArgsLoop: - btiz temp2, .copyArgsDone - subi 1, temp2 - loadp [temp1, temp2, 8], temp3 - storep temp3, ThisArgumentOffset+8[cfr, temp2, 8] + btiz t4, .copyArgsDone + subi 1, t4 + loadq [t3, t4, 8], extraTempReg + storeq extraTempReg, ThisArgumentOffset + 8[sp, t4, 8] jmp .copyArgsLoop .copyArgsDone: - storep cfr, [vmTopCallFrame] + if ARM64 + move sp, t4 + storep t4, VM::topCallFrame[vm] + else + storep sp, VM::topCallFrame[vm] + end + storep cfr, VM::topVMEntryFrame[vm] - move 0xffff000000000000, csr1 - addp 2, csr1, csr2 + checkStackPointerAlignment(extraTempReg, 0xbad0dc02) - makeCall(entry, temp1) + makeCall(entry, t3) - bpeq CodeBlock[cfr], 1, .calleeFramePopped - loadp CallerFrame[cfr], cfr + # We may have just made a call into a JS function, so we can't rely on sp + # for anything but the fact that our own locals (ie the VMEntryRecord) are + # not below it. It also still has to be aligned, though. + checkStackPointerAlignment(t2, 0xbad0dc03) + + vmEntryRecord(cfr, t4) + + loadp VMEntryRecord::m_vm[t4], vm + loadp VMEntryRecord::m_prevTopCallFrame[t4], t2 + storep t2, VM::topCallFrame[vm] + loadp VMEntryRecord::m_prevTopVMEntryFrame[t4], t2 + storep t2, VM::topVMEntryFrame[vm] -.calleeFramePopped: - loadp Callee[cfr], temp2 # VM.topCallFrame - loadp ScopeChain[cfr], temp3 - storep temp3, [temp2] + subp cfr, CalleeRegisterSaveSize, sp - doReturn(extraStackSpace) + popCalleeSaves() + functionEpilogue() + + ret end + macro makeJavaScriptCall(entry, temp) - call entry + addp 16, sp + if C_LOOP + cloopCallJSFunction entry + else + call entry + end + subp 16, sp end + macro makeHostFunctionCall(entry, temp) move entry, temp - if X86_64 - move cfr, t5 - elsif ARM64 or C_LOOP - move cfr, a0 + storep cfr, [sp] + move sp, a0 + if C_LOOP + storep lr, 8[sp] + cloopCallNative temp + elsif X86_64_WIN + # We need to allocate 32 bytes on the stack for the shadow space. + subp 32, sp + call temp + addp 32, sp + else + call temp end - call temp end -macro doReturnFromJavaScript(extraStackSpace) -_returnFromJavaScript: - functionEpilogue(extraStackSpace) - ret -end -macro doReturnFromHostFunction(extraStackSpace) - functionEpilogue(extraStackSpace) +_handleUncaughtException: + loadp Callee[cfr], t3 + andp MarkedBlockMask, t3 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 + restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0) + loadp VM::callFrameForCatch[t3], cfr + storep 0, VM::callFrameForCatch[t3] + + loadp CallerFrame[cfr], cfr + vmEntryRecord(cfr, t2) + + loadp VMEntryRecord::m_vm[t2], t3 + loadp VMEntryRecord::m_prevTopCallFrame[t2], extraTempReg + storep extraTempReg, VM::topCallFrame[t3] + loadp VMEntryRecord::m_prevTopVMEntryFrame[t2], extraTempReg + storep extraTempReg, VM::topVMEntryFrame[t3] + + subp cfr, CalleeRegisterSaveSize, sp + + popCalleeSaves() + functionEpilogue() ret -end + macro prepareStateForCCall() leap [PB, PC, 8], PC - move PB, t3 end macro restoreStateAfterCCall() - move t0, PC - move t1, cfr - move t3, PB + move r0, PC subp PB, PC rshiftp 3, PC end macro callSlowPath(slowPath) prepareStateForCCall() - cCall2(slowPath, cfr, PC) + move cfr, a0 + move PC, a1 + cCall2(slowPath) restoreStateAfterCCall() end macro traceOperand(fromWhere, operand) prepareStateForCCall() - cCall4(_llint_trace_operand, cfr, PC, fromWhere, operand) + move fromWhere, a2 + move operand, a3 + move cfr, a0 + move PC, a1 + cCall4(_llint_trace_operand) restoreStateAfterCCall() end macro traceValue(fromWhere, operand) prepareStateForCCall() - cCall4(_llint_trace_value, cfr, PC, fromWhere, operand) + move fromWhere, a2 + move operand, a3 + move cfr, a0 + move PC, a1 + cCall4(_llint_trace_value) restoreStateAfterCCall() end @@ -272,18 +336,19 @@ end macro callCallSlowPath(slowPath, action) storei PC, ArgumentCount + TagOffset[cfr] prepareStateForCCall() - cCall2(slowPath, cfr, PC) - move t1, cfr - action(t0) + move cfr, a0 + move PC, a1 + cCall2(slowPath) + action(r0, r1) end macro callWatchdogTimerHandler(throwHandler) storei PC, ArgumentCount + TagOffset[cfr] prepareStateForCCall() - cCall2(_llint_slow_path_handle_watchdog_timer, cfr, PC) - move t1, cfr - btpnz t0, throwHandler - move t3, PB + move cfr, a0 + move PC, a1 + cCall2(_llint_slow_path_handle_watchdog_timer) + btpnz r0, throwHandler loadi ArgumentCount + TagOffset[cfr], PC end @@ -293,12 +358,13 @@ macro checkSwitchToJITForLoop() macro() storei PC, ArgumentCount + TagOffset[cfr] prepareStateForCCall() - cCall2(_llint_loop_osr, cfr, PC) - move t1, cfr - btpz t0, .recover - jmp t0 + move cfr, a0 + move PC, a1 + cCall2(_llint_loop_osr) + btpz r0, .recover + move r1, sp + jmp r0 .recover: - move t3, PB loadi ArgumentCount + TagOffset[cfr], PC end) end @@ -332,50 +398,64 @@ macro loadConstantOrVariableCell(index, value, slow) end macro writeBarrierOnOperand(cellOperand) - if GGC - loadisFromInstruction(cellOperand, t1) - loadConstantOrVariableCell(t1, t2, .writeBarrierDone) - checkMarkByte(t2, t1, t3, - macro(marked) - btbz marked, .writeBarrierDone - push PB, PC - cCall2(_llint_write_barrier_slow, cfr, t2) - pop PC, PB - end - ) - .writeBarrierDone: - end + loadisFromInstruction(cellOperand, t1) + loadConstantOrVariableCell(t1, t2, .writeBarrierDone) + skipIfIsRememberedOrInEden(t2, t1, t3, + macro(cellState) + btbnz cellState, .writeBarrierDone + push PB, PC + move t2, a1 # t2 can be a0 (not on 64 bits, but better safe than sorry) + move cfr, a0 + cCall2Void(_llint_write_barrier_slow) + pop PC, PB + end + ) +.writeBarrierDone: end macro writeBarrierOnOperands(cellOperand, valueOperand) - if GGC - loadisFromInstruction(valueOperand, t1) - loadConstantOrVariable(t1, t0) - btpz t0, .writeBarrierDone - - writeBarrierOnOperand(cellOperand) - .writeBarrierDone: - end + loadisFromInstruction(valueOperand, t1) + loadConstantOrVariableCell(t1, t0, .writeBarrierDone) + btpz t0, .writeBarrierDone + + writeBarrierOnOperand(cellOperand) +.writeBarrierDone: +end + +macro writeBarrierOnGlobal(valueOperand, loadHelper) + loadisFromInstruction(valueOperand, t1) + loadConstantOrVariableCell(t1, t0, .writeBarrierDone) + btpz t0, .writeBarrierDone + + loadHelper(t3) + skipIfIsRememberedOrInEden(t3, t1, t2, + macro(gcData) + btbnz gcData, .writeBarrierDone + push PB, PC + move cfr, a0 + move t3, a1 + cCall2Void(_llint_write_barrier_slow) + pop PC, PB + end + ) +.writeBarrierDone: end macro writeBarrierOnGlobalObject(valueOperand) - if GGC - loadisFromInstruction(valueOperand, t1) - loadConstantOrVariable(t1, t0) - btpz t0, .writeBarrierDone - - loadp CodeBlock[cfr], t3 - loadp CodeBlock::m_globalObject[t3], t3 - checkMarkByte(t3, t1, t2, - macro(marked) - btbz marked, .writeBarrierDone - push PB, PC - cCall2(_llint_write_barrier_slow, cfr, t3) - pop PC, PB - end - ) - .writeBarrierDone: - end + writeBarrierOnGlobal(valueOperand, + macro(registerToStoreGlobal) + loadp CodeBlock[cfr], registerToStoreGlobal + loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal + end) +end + +macro writeBarrierOnGlobalLexicalEnvironment(valueOperand) + writeBarrierOnGlobal(valueOperand, + macro(registerToStoreGlobal) + loadp CodeBlock[cfr], registerToStoreGlobal + loadp CodeBlock::m_globalObject[registerToStoreGlobal], registerToStoreGlobal + loadp JSGlobalObject::m_globalLexicalEnvironment[registerToStoreGlobal], registerToStoreGlobal + end) end macro valueProfile(value, operand, scratch) @@ -383,27 +463,69 @@ macro valueProfile(value, operand, scratch) storeq value, ValueProfile::m_buckets[scratch] end +macro structureIDToStructureWithScratch(structureIDThenStructure, scratch) + loadp CodeBlock[cfr], scratch + loadp CodeBlock::m_vm[scratch], scratch + loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[scratch], scratch + loadp [scratch, structureIDThenStructure, 8], structureIDThenStructure +end + +macro loadStructureWithScratch(cell, structure, scratch) + loadi JSCell::m_structureID[cell], structure + structureIDToStructureWithScratch(structure, scratch) +end + +macro loadStructureAndClobberFirstArg(cell, structure) + loadi JSCell::m_structureID[cell], structure + loadp CodeBlock[cfr], cell + loadp CodeBlock::m_vm[cell], cell + loadp VM::heap + Heap::m_structureIDTable + StructureIDTable::m_table[cell], cell + loadp [cell, structure, 8], structure +end + +macro storeStructureWithTypeInfo(cell, structure, scratch) + loadq Structure::m_blob + StructureIDBlob::u.doubleWord[structure], scratch + storeq scratch, JSCell::m_structureID[cell] +end # Entrypoints into the interpreter. # Expects that CodeBlock is in t1, which is what prologue() leaves behind. -macro functionArityCheck(doneLabel, slow_path) +macro functionArityCheck(doneLabel, slowPath) loadi PayloadOffset + ArgumentCount[cfr], t0 biaeq t0, CodeBlock::m_numParameters[t1], doneLabel prepareStateForCCall() - cCall2(slow_path, cfr, PC) # This slow_path has a simple protocol: t0 = 0 => no error, t0 != 0 => error - btiz t0, .isArityFixupNeeded - move t1, cfr # t1 contains caller frame + move cfr, a0 + move PC, a1 + cCall2(slowPath) # This slowPath has the protocol: r0 = 0 => no error, r0 != 0 => error + btiz r0, .noError + move r1, cfr # r1 contains caller frame jmp _llint_throw_from_slow_path_trampoline -.isArityFixupNeeded: +.noError: + loadi CommonSlowPaths::ArityCheckData::paddedStackSpace[r1], t1 btiz t1, .continue + loadi PayloadOffset + ArgumentCount[cfr], t2 + addi CallFrameHeaderSlots, t2 - // Move frame up "t1" slots + // Check if there are some unaligned slots we can use + move t1, t3 + andi StackAlignmentSlots - 1, t3 + btiz t3, .noExtraSlot + move ValueUndefined, t0 +.fillExtraSlots: + storeq t0, [cfr, t2, 8] + addi 1, t2 + bsubinz 1, t3, .fillExtraSlots + andi ~(StackAlignmentSlots - 1), t1 + btiz t1, .continue + +.noExtraSlot: + // Move frame up t1 slots negq t1 move cfr, t3 - loadi PayloadOffset + ArgumentCount[cfr], t2 - addi CallFrameHeaderSlots, t2 + subp CalleeSaveSpaceAsVirtualRegisters * 8, t3 + addi CalleeSaveSpaceAsVirtualRegisters, t2 .copyLoop: loadq [t3], t0 storeq t0, [t3, t1, 8] @@ -420,6 +542,7 @@ macro functionArityCheck(doneLabel, slow_path) lshiftp 3, t1 addp t1, cfr + addp t1, sp .continue: # Reload CodeBlock and reset PC, since the slow_path clobbered them. @@ -429,9 +552,8 @@ macro functionArityCheck(doneLabel, slow_path) jmp doneLabel end - macro branchIfException(label) - loadp ScopeChain[cfr], t3 + loadp Callee[cfr], t3 andp MarkedBlockMask, t3 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 btqz VM::m_exception[t3], .noException @@ -441,17 +563,20 @@ end # Instruction implementations - _llint_op_enter: traceExecution() + checkStackPointerAlignment(t2, 0xdead00e1) loadp CodeBlock[cfr], t2 // t2<CodeBlock> = cfr.CodeBlock loadi CodeBlock::m_numVars[t2], t2 // t2<size_t> = t2<CodeBlock>.m_numVars + subq CalleeSaveSpaceAsVirtualRegisters, t2 + move cfr, t1 + subq CalleeSaveSpaceAsVirtualRegisters * 8, t1 btiz t2, .opEnterDone move ValueUndefined, t0 negi t2 sxi2q t2, t2 .opEnterLoop: - storeq t0, [cfr, t2, 8] + storeq t0, [t1, t2, 8] addq 1, t2 btqnz t2, .opEnterLoop .opEnterDone: @@ -459,28 +584,12 @@ _llint_op_enter: dispatch(1) -_llint_op_create_activation: - traceExecution() - loadisFromInstruction(1, t0) - bqneq [cfr, t0, 8], ValueEmpty, .opCreateActivationDone - callSlowPath(_llint_slow_path_create_activation) -.opCreateActivationDone: - dispatch(2) - - -_llint_op_init_lazy_reg: - traceExecution() - loadisFromInstruction(1, t0) - storeq ValueEmpty, [cfr, t0, 8] - dispatch(2) - - -_llint_op_create_arguments: +_llint_op_get_scope: traceExecution() - loadisFromInstruction(1, t0) - bqneq [cfr, t0, 8], ValueEmpty, .opCreateArgumentsDone - callSlowPath(_slow_path_create_arguments) -.opCreateArgumentsDone: + loadp Callee[cfr], t0 + loadp JSCallee::m_scope[t0], t0 + loadisFromInstruction(1, t1) + storeq t0, [cfr, t1, 8] dispatch(2) @@ -488,46 +597,39 @@ _llint_op_create_this: traceExecution() loadisFromInstruction(2, t0) loadp [cfr, t0, 8], t0 - loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_allocator[t0], t1 - loadp JSFunction::m_allocationProfile + ObjectAllocationProfile::m_structure[t0], t2 + loadp JSFunction::m_rareData[t0], t3 + btpz t3, .opCreateThisSlow + loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_allocator[t3], t1 + loadp FunctionRareData::m_objectAllocationProfile + ObjectAllocationProfile::m_structure[t3], t2 btpz t1, .opCreateThisSlow + loadpFromInstruction(4, t3) + bpeq t3, 1, .hasSeenMultipleCallee + bpneq t3, t0, .opCreateThisSlow +.hasSeenMultipleCallee: allocateJSObject(t1, t2, t0, t3, .opCreateThisSlow) loadisFromInstruction(1, t1) storeq t0, [cfr, t1, 8] - dispatch(4) + dispatch(5) .opCreateThisSlow: callSlowPath(_slow_path_create_this) - dispatch(4) - - -_llint_op_get_callee: - traceExecution() - loadisFromInstruction(1, t0) - loadp Callee[cfr], t1 - loadpFromInstruction(2, t2) - bpneq t1, t2, .opGetCalleeSlow - storep t1, [cfr, t0, 8] - dispatch(3) + dispatch(5) -.opGetCalleeSlow: - callSlowPath(_slow_path_get_callee) - dispatch(3) _llint_op_to_this: traceExecution() loadisFromInstruction(1, t0) loadq [cfr, t0, 8], t0 btqnz t0, tagMask, .opToThisSlow - loadp JSCell::m_structure[t0], t0 - bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], FinalObjectType, .opToThisSlow + bbneq JSCell::m_type[t0], FinalObjectType, .opToThisSlow + loadStructureWithScratch(t0, t1, t2) loadpFromInstruction(2, t2) - bpneq t0, t2, .opToThisSlow - dispatch(3) + bpneq t1, t2, .opToThisSlow + dispatch(4) .opToThisSlow: callSlowPath(_slow_path_to_this) - dispatch(3) + dispatch(4) _llint_op_new_object: @@ -545,47 +647,24 @@ _llint_op_new_object: dispatch(4) -_llint_op_mov: +_llint_op_check_tdz: traceExecution() - loadisFromInstruction(2, t1) loadisFromInstruction(1, t0) - loadConstantOrVariable(t1, t2) - storeq t2, [cfr, t0, 8] - dispatch(3) - - -macro notifyWrite(set, value, scratch, slow) - loadb VariableWatchpointSet::m_state[set], scratch - bieq scratch, IsInvalidated, .done - bineq scratch, ClearWatchpoint, .overwrite - storeq value, VariableWatchpointSet::m_inferredValue[set] - storeb IsWatched, VariableWatchpointSet::m_state[set] - jmp .done + loadConstantOrVariable(t0, t1) + bqneq t1, ValueEmpty, .opNotTDZ + callSlowPath(_slow_path_throw_tdz_error) -.overwrite: - bqeq value, VariableWatchpointSet::m_inferredValue[set], .done - btbnz VariableWatchpointSet::m_setIsNotEmpty[set], slow - storeq 0, VariableWatchpointSet::m_inferredValue[set] - storeb IsInvalidated, VariableWatchpointSet::m_state[set] +.opNotTDZ: + dispatch(2) -.done: -end -_llint_op_captured_mov: +_llint_op_mov: traceExecution() loadisFromInstruction(2, t1) - loadConstantOrVariable(t1, t2) - loadpFromInstruction(3, t0) - btpz t0, .opCapturedMovReady - notifyWrite(t0, t2, t1, .opCapturedMovSlow) -.opCapturedMovReady: loadisFromInstruction(1, t0) + loadConstantOrVariable(t1, t2) storeq t2, [cfr, t0, 8] - dispatch(4) - -.opCapturedMovSlow: - callSlowPath(_slow_path_captured_mov) - dispatch(4) + dispatch(3) _llint_op_not: @@ -637,11 +716,11 @@ macro equalNullComparison() loadisFromInstruction(2, t0) loadq [cfr, t0, 8], t0 btqnz t0, tagMask, .immediate - loadp JSCell::m_structure[t0], t2 - btbnz Structure::m_typeInfo + TypeInfo::m_flags[t2], MasqueradesAsUndefined, .masqueradesAsUndefined + btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined move 0, t0 jmp .done .masqueradesAsUndefined: + loadStructureWithScratch(t0, t2, t1) loadp CodeBlock[cfr], t0 loadp CodeBlock::m_globalObject[t0], t0 cpeq Structure::m_globalObject[t2], t0, t0 @@ -751,6 +830,22 @@ _llint_op_to_number: dispatch(3) +_llint_op_to_string: + traceExecution() + loadisFromInstruction(2, t1) + loadisFromInstruction(1, t2) + loadConstantOrVariable(t1, t0) + btqnz t0, tagMask, .opToStringSlow + bbneq JSCell::m_type[t0], StringType, .opToStringSlow +.opToStringIsString: + storeq t0, [cfr, t2, 8] + dispatch(3) + +.opToStringSlow: + callSlowPath(_slow_path_to_string) + dispatch(3) + + _llint_op_negate: traceExecution() loadisFromInstruction(2, t0) @@ -868,7 +963,7 @@ _llint_op_sub: _llint_op_div: traceExecution() - if X86_64 + if X86_64 or X86_64_WIN binaryOpCustomStore( macro (left, right, slow, index) # Assume t3 is scratchable. @@ -974,41 +1069,50 @@ _llint_op_bitor: 5) -_llint_op_check_has_instance: +_llint_op_overrides_has_instance: traceExecution() + loadisFromInstruction(1, t3) + loadisFromInstruction(3, t1) - loadConstantOrVariableCell(t1, t0, .opCheckHasInstanceSlow) - loadp JSCell::m_structure[t0], t0 - btbz Structure::m_typeInfo + TypeInfo::m_flags[t0], ImplementsDefaultHasInstance, .opCheckHasInstanceSlow - dispatch(5) + loadConstantOrVariable(t1, t0) + loadp CodeBlock[cfr], t2 + loadp CodeBlock::m_globalObject[t2], t2 + loadp JSGlobalObject::m_functionProtoHasInstanceSymbolFunction[t2], t2 + bqneq t0, t2, .opOverridesHasInstanceNotDefaultSymbol -.opCheckHasInstanceSlow: - callSlowPath(_llint_slow_path_check_has_instance) - dispatch(0) + loadisFromInstruction(2, t1) + loadConstantOrVariable(t1, t0) + tbz JSCell::m_flags[t0], ImplementsDefaultHasInstance, t1 + orq ValueFalse, t1 + storeq t1, [cfr, t3, 8] + dispatch(4) + +.opOverridesHasInstanceNotDefaultSymbol: + storeq ValueTrue, [cfr, t3, 8] + dispatch(4) _llint_op_instanceof: traceExecution() # Actually do the work. loadisFromInstruction(3, t0) - loadisFromInstruction(1, t3) loadConstantOrVariableCell(t0, t1, .opInstanceofSlow) - loadp JSCell::m_structure[t1], t2 - bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opInstanceofSlow + bbb JSCell::m_type[t1], ObjectType, .opInstanceofSlow loadisFromInstruction(2, t0) loadConstantOrVariableCell(t0, t2, .opInstanceofSlow) # Register state: t1 = prototype, t2 = value move 1, t0 .opInstanceofLoop: - loadp JSCell::m_structure[t2], t2 - loadq Structure::m_prototype[t2], t2 + loadStructureAndClobberFirstArg(t2, t3) + loadq Structure::m_prototype[t3], t2 bqeq t2, t1, .opInstanceofDone btqz t2, tagMask, .opInstanceofLoop move 0, t0 .opInstanceofDone: orq ValueFalse, t0 + loadisFromInstruction(1, t3) storeq t0, [cfr, t3, 8] dispatch(4) @@ -1016,6 +1120,10 @@ _llint_op_instanceof: callSlowPath(_llint_slow_path_instanceof) dispatch(4) +_llint_op_instanceof_custom: + traceExecution() + callSlowPath(_llint_slow_path_instanceof_custom) + dispatch(5) _llint_op_is_undefined: traceExecution() @@ -1028,17 +1136,17 @@ _llint_op_is_undefined: storeq t3, [cfr, t2, 8] dispatch(3) .opIsUndefinedCell: - loadp JSCell::m_structure[t0], t0 - btbnz Structure::m_typeInfo + TypeInfo::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined + btbnz JSCell::m_flags[t0], MasqueradesAsUndefined, .masqueradesAsUndefined move ValueFalse, t1 storeq t1, [cfr, t2, 8] dispatch(3) .masqueradesAsUndefined: + loadStructureWithScratch(t0, t3, t1) loadp CodeBlock[cfr], t1 loadp CodeBlock::m_globalObject[t1], t1 - cpeq Structure::m_globalObject[t0], t1, t3 - orq ValueFalse, t3 - storeq t3, [cfr, t2, 8] + cpeq Structure::m_globalObject[t3], t1, t0 + orq ValueFalse, t0 + storeq t0, [cfr, t2, 8] dispatch(3) @@ -1071,8 +1179,7 @@ _llint_op_is_string: loadisFromInstruction(1, t2) loadConstantOrVariable(t1, t0) btqnz t0, tagMask, .opIsStringNotCell - loadp JSCell::m_structure[t0], t0 - cbeq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, t1 + cbeq JSCell::m_type[t0], StringType, t1 orq ValueFalse, t1 storeq t1, [cfr, t2, 8] dispatch(3) @@ -1081,9 +1188,25 @@ _llint_op_is_string: dispatch(3) -macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) +_llint_op_is_object: + traceExecution() + loadisFromInstruction(2, t1) + loadisFromInstruction(1, t2) + loadConstantOrVariable(t1, t0) + btqnz t0, tagMask, .opIsObjectNotCell + cbaeq JSCell::m_type[t0], ObjectType, t1 + orq ValueFalse, t1 + storeq t1, [cfr, t2, 8] + dispatch(3) +.opIsObjectNotCell: + storeq ValueFalse, [cfr, t2, 8] + dispatch(3) + + +macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value, slow) bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage + copyBarrier(objectAndStorage, slow) negi propertyOffsetAsInt sxi2q propertyOffsetAsInt, propertyOffsetAsInt jmp .ready @@ -1094,9 +1217,10 @@ macro loadPropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) end -macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value) +macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value, slow) bilt propertyOffsetAsInt, firstOutOfLineOffset, .isInline loadp JSObject::m_butterfly[objectAndStorage], objectAndStorage + copyBarrier(objectAndStorage, slow) negi propertyOffsetAsInt sxi2q propertyOffsetAsInt, propertyOffsetAsInt jmp .ready @@ -1106,50 +1230,23 @@ macro storePropertyAtVariableOffset(propertyOffsetAsInt, objectAndStorage, value storeq value, (firstOutOfLineOffset - 2) * 8[objectAndStorage, propertyOffsetAsInt, 8] end -_llint_op_init_global_const: - traceExecution() - writeBarrierOnGlobalObject(2) - loadisFromInstruction(2, t1) - loadpFromInstruction(1, t0) - loadConstantOrVariable(t1, t2) - storeq t2, [t0] - dispatch(5) - - -macro getById(getPropertyStorage) +_llint_op_get_by_id: traceExecution() - # We only do monomorphic get_by_id caching for now, and we do not modify the - # opcode. We do, however, allow for the cache to change anytime if fails, since - # ping-ponging is free. At best we get lucky and the get_by_id will continue - # to take fast path on the new cache. At worst we take slow path, which is what - # we would have been doing anyway. loadisFromInstruction(2, t0) - loadpFromInstruction(4, t1) loadConstantOrVariableCell(t0, t3, .opGetByIdSlow) - loadisFromInstruction(5, t2) - getPropertyStorage( - t3, - t0, - macro (propertyStorage, scratch) - bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow - loadisFromInstruction(1, t1) - loadq [propertyStorage, t2], scratch - storeq scratch, [cfr, t1, 8] - valueProfile(scratch, 8, t1) - dispatch(9) - end) - - .opGetByIdSlow: - callSlowPath(_llint_slow_path_get_by_id) - dispatch(9) -end - -_llint_op_get_by_id: - getById(withInlineStorage) - + loadi JSCell::m_structureID[t3], t1 + loadisFromInstruction(4, t2) + bineq t2, t1, .opGetByIdSlow + loadisFromInstruction(5, t1) + loadisFromInstruction(1, t2) + loadPropertyAtVariableOffset(t1, t3, t0, .opGetByIdSlow) + storeq t0, [cfr, t2, 8] + valueProfile(t0, 8, t1) + dispatch(9) -_llint_op_get_by_id_out_of_line: - getById(withOutOfLineStorage) +.opGetByIdSlow: + callSlowPath(_llint_slow_path_get_by_id) + dispatch(9) _llint_op_get_array_length: @@ -1157,12 +1254,13 @@ _llint_op_get_array_length: loadisFromInstruction(2, t0) loadpFromInstruction(4, t1) loadConstantOrVariableCell(t0, t3, .opGetArrayLengthSlow) - loadp JSCell::m_structure[t3], t2 + move t3, t2 arrayProfile(t2, t1, t0) btiz t2, IsArray, .opGetArrayLengthSlow btiz t2, IndexingShapeMask, .opGetArrayLengthSlow loadisFromInstruction(1, t1) loadp JSObject::m_butterfly[t3], t0 + copyBarrier(t0, .opGetArrayLengthSlow) loadi -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], t0 bilt t0, 0, .opGetArrayLengthSlow orq tagTypeNumber, t0 @@ -1175,122 +1273,157 @@ _llint_op_get_array_length: dispatch(9) -_llint_op_get_arguments_length: - traceExecution() - loadisFromInstruction(2, t0) - loadisFromInstruction(1, t1) - btqnz [cfr, t0, 8], .opGetArgumentsLengthSlow - loadi ArgumentCount + PayloadOffset[cfr], t2 - subi 1, t2 - orq tagTypeNumber, t2 - storeq t2, [cfr, t1, 8] - dispatch(4) - -.opGetArgumentsLengthSlow: - callSlowPath(_llint_slow_path_get_arguments_length) - dispatch(4) - - -macro putById(getPropertyStorage) - traceExecution() - writeBarrierOnOperands(1, 3) - loadisFromInstruction(1, t3) - loadpFromInstruction(4, t1) - loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) - loadisFromInstruction(3, t2) - getPropertyStorage( - t0, - t3, - macro (propertyStorage, scratch) - bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - loadisFromInstruction(5, t1) - loadConstantOrVariable(t2, scratch) - storeq scratch, [propertyStorage, t1] - dispatch(9) - end) -end - _llint_op_put_by_id: - putById(withInlineStorage) - -.opPutByIdSlow: - callSlowPath(_llint_slow_path_put_by_id) - dispatch(9) - - -_llint_op_put_by_id_out_of_line: - putById(withOutOfLineStorage) - - -macro putByIdTransition(additionalChecks, getPropertyStorage) traceExecution() - writeBarrierOnOperand(1) + writeBarrierOnOperands(1, 3) loadisFromInstruction(1, t3) - loadpFromInstruction(4, t1) loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) - loadisFromInstruction(3, t2) - bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - additionalChecks(t1, t3) - loadisFromInstruction(5, t1) - getPropertyStorage( - t0, - t3, - macro (propertyStorage, scratch) - addp t1, propertyStorage, t3 - loadConstantOrVariable(t2, t1) - storeq t1, [t3] - loadpFromInstruction(6, t1) - storep t1, JSCell::m_structure[t0] - dispatch(9) - end) -end + loadisFromInstruction(4, t2) + bineq t2, JSCell::m_structureID[t0], .opPutByIdSlow -macro noAdditionalChecks(oldStructure, scratch) -end + # At this point, we have: + # t2 -> current structure ID + # t0 -> object base -macro structureChainChecks(oldStructure, scratch) - const protoCell = oldStructure # Reusing the oldStructure register for the proto - loadpFromInstruction(7, scratch) - assert(macro (ok) btpnz scratch, ok end) - loadp StructureChain::m_vector[scratch], scratch - assert(macro (ok) btpnz scratch, ok end) - bqeq Structure::m_prototype[oldStructure], ValueNull, .done -.loop: - loadq Structure::m_prototype[oldStructure], protoCell - loadp JSCell::m_structure[protoCell], oldStructure - bpneq oldStructure, [scratch], .opPutByIdSlow - addp 8, scratch - bqneq Structure::m_prototype[oldStructure], ValueNull, .loop -.done: -end + loadisFromInstruction(3, t1) + loadConstantOrVariable(t1, t3) + + loadpFromInstruction(8, t1) + + # At this point, we have: + # t0 -> object base + # t1 -> put by id flags + # t2 -> current structure ID + # t3 -> value to put + + btpnz t1, PutByIdPrimaryTypeMask, .opPutByIdTypeCheckObjectWithStructureOrOther + + # We have one of the non-structure type checks. Find out which one. + andp PutByIdSecondaryTypeMask, t1 + bplt t1, PutByIdSecondaryTypeString, .opPutByIdTypeCheckLessThanString + + # We are one of the following: String, Symbol, Object, ObjectOrOther, Top + bplt t1, PutByIdSecondaryTypeObjectOrOther, .opPutByIdTypeCheckLessThanObjectOrOther + + # We are either ObjectOrOther or Top. + bpeq t1, PutByIdSecondaryTypeTop, .opPutByIdDoneCheckingTypes + + # Check if we are ObjectOrOther. + btqz t3, tagMask, .opPutByIdTypeCheckObject +.opPutByIdTypeCheckOther: + andq ~TagBitUndefined, t3 + bqeq t3, ValueNull, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckLessThanObjectOrOther: + # We are either String, Symbol or Object. + btqnz t3, tagMask, .opPutByIdSlow + bpeq t1, PutByIdSecondaryTypeObject, .opPutByIdTypeCheckObject + bpeq t1, PutByIdSecondaryTypeSymbol, .opPutByIdTypeCheckSymbol + bbeq JSCell::m_type[t3], StringType, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow +.opPutByIdTypeCheckObject: + bbaeq JSCell::m_type[t3], ObjectType, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow +.opPutByIdTypeCheckSymbol: + bbeq JSCell::m_type[t3], SymbolType, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckLessThanString: + # We are one of the following: Bottom, Boolean, Other, Int32, Number + bplt t1, PutByIdSecondaryTypeInt32, .opPutByIdTypeCheckLessThanInt32 + + # We are either Int32 or Number. + bpeq t1, PutByIdSecondaryTypeNumber, .opPutByIdTypeCheckNumber + + bqaeq t3, tagTypeNumber, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckNumber: + btqnz t3, tagTypeNumber, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckLessThanInt32: + # We are one of the following: Bottom, Boolean, Other. + bpneq t1, PutByIdSecondaryTypeBoolean, .opPutByIdTypeCheckBottomOrOther + xorq ValueFalse, t3 + btqz t3, ~1, .opPutByIdDoneCheckingTypes + jmp .opPutByIdSlow + +.opPutByIdTypeCheckBottomOrOther: + bpeq t1, PutByIdSecondaryTypeOther, .opPutByIdTypeCheckOther + jmp .opPutByIdSlow + +.opPutByIdTypeCheckObjectWithStructureOrOther: + btqz t3, tagMask, .opPutByIdTypeCheckObjectWithStructure + btpnz t1, PutByIdPrimaryTypeObjectWithStructureOrOther, .opPutByIdTypeCheckOther + jmp .opPutByIdSlow + +.opPutByIdTypeCheckObjectWithStructure: + urshiftp 3, t1 + bineq t1, JSCell::m_structureID[t3], .opPutByIdSlow + +.opPutByIdDoneCheckingTypes: + loadisFromInstruction(6, t1) + + btiz t1, .opPutByIdNotTransition -_llint_op_put_by_id_transition_direct: - putByIdTransition(noAdditionalChecks, withInlineStorage) + # This is the transition case. t1 holds the new structureID. t2 holds the old structure ID. + # If we have a chain, we need to check it. t0 is the base. We may clobber t1 to use it as + # scratch. + loadpFromInstruction(7, t3) + btpz t3, .opPutByIdTransitionDirect + loadp StructureChain::m_vector[t3], t3 + assert(macro (ok) btpnz t3, ok end) -_llint_op_put_by_id_transition_direct_out_of_line: - putByIdTransition(noAdditionalChecks, withOutOfLineStorage) + structureIDToStructureWithScratch(t2, t1) + loadq Structure::m_prototype[t2], t2 + bqeq t2, ValueNull, .opPutByIdTransitionChainDone +.opPutByIdTransitionChainLoop: + # At this point, t2 contains a prototye, and [t3] contains the Structure* that we want that + # prototype to have. We don't want to have to load the Structure* for t2. Instead, we load + # the Structure* from [t3], and then we compare its id to the id in the header of t2. + loadp [t3], t1 + loadi JSCell::m_structureID[t2], t2 + # Now, t1 has the Structure* and t2 has the StructureID that we want that Structure* to have. + bineq t2, Structure::m_blob + StructureIDBlob::u.fields.structureID[t1], .opPutByIdSlow + addp 8, t3 + loadq Structure::m_prototype[t1], t2 + bqneq t2, ValueNull, .opPutByIdTransitionChainLoop +.opPutByIdTransitionChainDone: + # Reload the new structure, since we clobbered it above. + loadisFromInstruction(6, t1) -_llint_op_put_by_id_transition_normal: - putByIdTransition(structureChainChecks, withInlineStorage) +.opPutByIdTransitionDirect: + storei t1, JSCell::m_structureID[t0] +.opPutByIdNotTransition: + # The only thing live right now is t0, which holds the base. + loadisFromInstruction(3, t1) + loadConstantOrVariable(t1, t2) + loadisFromInstruction(5, t1) + storePropertyAtVariableOffset(t1, t0, t2, .opPutByIdSlow) + dispatch(9) -_llint_op_put_by_id_transition_normal_out_of_line: - putByIdTransition(structureChainChecks, withOutOfLineStorage) +.opPutByIdSlow: + callSlowPath(_llint_slow_path_put_by_id) + dispatch(9) _llint_op_get_by_val: traceExecution() loadisFromInstruction(2, t2) loadConstantOrVariableCell(t2, t0, .opGetByValSlow) - loadp JSCell::m_structure[t0], t2 loadpFromInstruction(4, t3) + move t0, t2 arrayProfile(t2, t3, t1) loadisFromInstruction(3, t3) loadConstantOrVariableInt32(t3, t1, .opGetByValSlow) sxi2q t1, t1 loadp JSObject::m_butterfly[t0], t3 + copyBarrier(t3, .opGetByValSlow) andi IndexingShapeMask, t2 bieq t2, Int32Shape, .opGetByValIsContiguous bineq t2, ContiguousShape, .opGetByValNotContiguous @@ -1333,61 +1466,6 @@ _llint_op_get_by_val: dispatch(6) -_llint_op_get_argument_by_val: - # FIXME: At some point we should array profile this. Right now it isn't necessary - # since the DFG will never turn a get_argument_by_val into a GetByVal. - traceExecution() - loadisFromInstruction(2, t0) - loadisFromInstruction(3, t1) - btqnz [cfr, t0, 8], .opGetArgumentByValSlow - loadConstantOrVariableInt32(t1, t2, .opGetArgumentByValSlow) - addi 1, t2 - loadi ArgumentCount + PayloadOffset[cfr], t1 - biaeq t2, t1, .opGetArgumentByValSlow - loadisFromInstruction(1, t3) - loadpFromInstruction(5, t1) - loadq ThisArgumentOffset[cfr, t2, 8], t0 - storeq t0, [cfr, t3, 8] - valueProfile(t0, 5, t1) - dispatch(6) - -.opGetArgumentByValSlow: - callSlowPath(_llint_slow_path_get_argument_by_val) - dispatch(6) - - -_llint_op_get_by_pname: - traceExecution() - loadisFromInstruction(3, t1) - loadConstantOrVariable(t1, t0) - loadisFromInstruction(4, t1) - assertNotConstant(t1) - bqneq t0, [cfr, t1, 8], .opGetByPnameSlow - loadisFromInstruction(2, t2) - loadisFromInstruction(5, t3) - loadConstantOrVariableCell(t2, t0, .opGetByPnameSlow) - assertNotConstant(t3) - loadq [cfr, t3, 8], t1 - loadp JSCell::m_structure[t0], t2 - bpneq t2, JSPropertyNameIterator::m_cachedStructure[t1], .opGetByPnameSlow - loadisFromInstruction(6, t3) - loadi PayloadOffset[cfr, t3, 8], t3 - subi 1, t3 - biaeq t3, JSPropertyNameIterator::m_numCacheableSlots[t1], .opGetByPnameSlow - bilt t3, JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], .opGetByPnameInlineProperty - addi firstOutOfLineOffset, t3 - subi JSPropertyNameIterator::m_cachedStructureInlineCapacity[t1], t3 -.opGetByPnameInlineProperty: - loadPropertyAtVariableOffset(t3, t0, t0) - loadisFromInstruction(1, t1) - storeq t0, [cfr, t1, 8] - dispatch(7) - -.opGetByPnameSlow: - callSlowPath(_llint_slow_path_get_by_pname) - dispatch(7) - - macro contiguousPutByVal(storeCallback) biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.publicLength[t0], .outOfBounds .storeResult: @@ -1404,18 +1482,19 @@ macro contiguousPutByVal(storeCallback) jmp .storeResult end -macro putByVal(holeCheck, slowPath) +macro putByVal(slowPath) traceExecution() writeBarrierOnOperands(1, 3) loadisFromInstruction(1, t0) loadConstantOrVariableCell(t0, t1, .opPutByValSlow) - loadp JSCell::m_structure[t1], t2 loadpFromInstruction(4, t3) + move t1, t2 arrayProfile(t2, t3, t0) loadisFromInstruction(2, t0) loadConstantOrVariableInt32(t0, t3, .opPutByValSlow) sxi2q t3, t3 loadp JSObject::m_butterfly[t1], t0 + copyBarrier(t0, .opPutByValSlow) andi IndexingShapeMask, t2 bineq t2, Int32Shape, .opPutByValNotInt32 contiguousPutByVal( @@ -1452,7 +1531,7 @@ macro putByVal(holeCheck, slowPath) .opPutByValNotContiguous: bineq t2, ArrayStorageShape, .opPutByValSlow biaeq t3, -sizeof IndexingHeader + IndexingHeader::u.lengths.vectorLength[t0], .opPutByValOutOfBounds - holeCheck(ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty) + btqz ArrayStorage::m_vector[t0, t3, 8], .opPutByValArrayStorageEmpty .opPutByValArrayStorageStoreResult: loadisFromInstruction(3, t2) loadConstantOrVariable(t2, t1) @@ -1477,13 +1556,10 @@ macro putByVal(holeCheck, slowPath) end _llint_op_put_by_val: - putByVal(macro(slot, slowPath) - btqz slot, slowPath - end, _llint_slow_path_put_by_val) + putByVal(_llint_slow_path_put_by_val) _llint_op_put_by_val_direct: - putByVal(macro(slot, slowPath) - end, _llint_slow_path_put_by_val_direct) + putByVal(_llint_slow_path_put_by_val_direct) _llint_op_jmp: @@ -1513,8 +1589,8 @@ macro equalNull(cellHandler, immediateHandler) assertNotConstant(t0) loadq [cfr, t0, 8], t0 btqnz t0, tagMask, .immediate - loadp JSCell::m_structure[t0], t2 - cellHandler(t2, Structure::m_typeInfo + TypeInfo::m_flags[t2], .target) + loadStructureWithScratch(t0, t2, t1) + cellHandler(t2, JSCell::m_flags[t0], .target) dispatch(3) .target: @@ -1646,8 +1722,7 @@ _llint_op_switch_char: loadp CodeBlock::RareData::m_switchJumpTables + VectorBufferOffset[t2], t2 addp t3, t2 btqnz t1, tagMask, .opSwitchCharFallThrough - loadp JSCell::m_structure[t1], t0 - bbneq Structure::m_typeInfo + TypeInfo::m_type[t0], StringType, .opSwitchCharFallThrough + bbneq JSCell::m_type[t1], StringType, .opSwitchCharFallThrough bineq JSString::m_length[t1], 1, .opSwitchCharFallThrough loadp JSString::m_value[t1], t0 btpz t0, .opSwitchOnRope @@ -1673,36 +1748,18 @@ _llint_op_switch_char: dispatch(0) -_llint_op_new_func: - traceExecution() - loadisFromInstruction(3, t2) - btiz t2, .opNewFuncUnchecked - loadisFromInstruction(1, t1) - btqnz [cfr, t1, 8], .opNewFuncDone -.opNewFuncUnchecked: - callSlowPath(_llint_slow_path_new_func) -.opNewFuncDone: - dispatch(4) - - -_llint_op_new_captured_func: - traceExecution() - callSlowPath(_slow_path_new_captured_func) - dispatch(4) - - macro arrayProfileForCall() loadisFromInstruction(4, t3) negp t3 loadq ThisArgumentOffset[cfr, t3, 8], t0 btqnz t0, tagMask, .done - loadp JSCell::m_structure[t0], t0 - loadpFromInstruction(6, t1) - storep t0, ArrayProfile::m_lastSeenStructure[t1] + loadpFromInstruction((CallOpCodeSize - 2), t1) + loadi JSCell::m_structureID[t0], t3 + storei t3, ArrayProfile::m_lastSeenStructureID[t1] .done: end -macro doCall(slowPath) +macro doCall(slowPath, prepareCall) loadisFromInstruction(2, t0) loadpFromInstruction(5, t1) loadp LLIntCallLinkInfo::callee[t1], t2 @@ -1712,61 +1769,23 @@ macro doCall(slowPath) lshifti 3, t3 negp t3 addp cfr, t3 - loadp JSFunction::m_scope[t2], t0 storeq t2, Callee[t3] - storeq t0, ScopeChain[t3] loadisFromInstruction(3, t2) storei PC, ArgumentCount + TagOffset[cfr] - storeq cfr, CallerFrame[t3] storei t2, ArgumentCount + PayloadOffset[t3] - move t3, cfr - callTargetFunction(t1) + move t3, sp + prepareCall(LLIntCallLinkInfo::machineCodeTarget[t1], t2, t3, t4) + callTargetFunction(LLIntCallLinkInfo::machineCodeTarget[t1]) .opCallSlow: - slowPathForCall(slowPath) + slowPathForCall(slowPath, prepareCall) end - -_llint_op_tear_off_activation: - traceExecution() - loadisFromInstruction(1, t0) - btqz [cfr, t0, 8], .opTearOffActivationNotCreated - callSlowPath(_llint_slow_path_tear_off_activation) -.opTearOffActivationNotCreated: - dispatch(2) - - -_llint_op_tear_off_arguments: - traceExecution() - loadisFromInstruction(1, t0) - addq 1, t0 # Get the unmodifiedArgumentsRegister - btqz [cfr, t0, 8], .opTearOffArgumentsNotCreated - callSlowPath(_llint_slow_path_tear_off_arguments) -.opTearOffArgumentsNotCreated: - dispatch(3) - - _llint_op_ret: traceExecution() checkSwitchToJITForEpilogue() loadisFromInstruction(1, t2) - loadConstantOrVariable(t2, t0) - doReturn() - - -_llint_op_ret_object_or_this: - traceExecution() - checkSwitchToJITForEpilogue() - loadisFromInstruction(1, t2) - loadConstantOrVariable(t2, t0) - btqnz t0, tagMask, .opRetObjectOrThisNotObject - loadp JSCell::m_structure[t0], t2 - bbb Structure::m_typeInfo + TypeInfo::m_type[t2], ObjectType, .opRetObjectOrThisNotObject - doReturn() - -.opRetObjectOrThisNotObject: - loadisFromInstruction(2, t2) - loadConstantOrVariable(t2, t0) + loadConstantOrVariable(t2, r0) doReturn() @@ -1776,8 +1795,7 @@ _llint_op_to_primitive: loadisFromInstruction(1, t3) loadConstantOrVariable(t2, t0) btqnz t0, tagMask, .opToPrimitiveIsImm - loadp JSCell::m_structure[t0], t2 - bbneq Structure::m_typeInfo + TypeInfo::m_type[t2], StringType, .opToPrimitiveSlowCase + bbaeq JSCell::m_type[t0], ObjectType, .opToPrimitiveSlowCase .opToPrimitiveIsImm: storeq t0, [cfr, t3, 8] dispatch(3) @@ -1787,70 +1805,46 @@ _llint_op_to_primitive: dispatch(3) -_llint_op_next_pname: - traceExecution() - loadisFromInstruction(3, t1) - loadisFromInstruction(4, t2) - assertNotConstant(t1) - assertNotConstant(t2) - loadi PayloadOffset[cfr, t1, 8], t0 - bieq t0, PayloadOffset[cfr, t2, 8], .opNextPnameEnd - loadisFromInstruction(5, t2) - assertNotConstant(t2) - loadp [cfr, t2, 8], t2 - loadp JSPropertyNameIterator::m_jsStrings[t2], t3 - loadq [t3, t0, 8], t3 - addi 1, t0 - storei t0, PayloadOffset[cfr, t1, 8] - loadisFromInstruction(1, t1) - storeq t3, [cfr, t1, 8] - loadisFromInstruction(2, t3) - assertNotConstant(t3) - loadq [cfr, t3, 8], t3 - loadp JSCell::m_structure[t3], t1 - bpneq t1, JSPropertyNameIterator::m_cachedStructure[t2], .opNextPnameSlow - loadp JSPropertyNameIterator::m_cachedPrototypeChain[t2], t0 - loadp StructureChain::m_vector[t0], t0 - btpz [t0], .opNextPnameTarget -.opNextPnameCheckPrototypeLoop: - bqeq Structure::m_prototype[t1], ValueNull, .opNextPnameSlow - loadq Structure::m_prototype[t1], t2 - loadp JSCell::m_structure[t2], t1 - bpneq t1, [t0], .opNextPnameSlow - addp 8, t0 - btpnz [t0], .opNextPnameCheckPrototypeLoop -.opNextPnameTarget: - dispatchIntIndirect(6) - -.opNextPnameEnd: - dispatch(7) - -.opNextPnameSlow: - callSlowPath(_llint_slow_path_next_pname) # This either keeps the PC where it was (causing us to loop) or sets it to target. - dispatch(0) - - _llint_op_catch: # This is where we end up from the JIT's throw trampoline (because the # machine code return address will be set to _llint_op_catch), and from # the interpreter's throw trampoline (see _llint_throw_trampoline). # The throwing code must have known that we were throwing to the interpreter, # and have set VM::targetInterpreterPCForThrow. - loadp ScopeChain[cfr], t3 + loadp Callee[cfr], t3 andp MarkedBlockMask, t3 loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - loadp VM::callFrameForThrow[t3], cfr + restoreCalleeSavesFromVMCalleeSavesBuffer(t3, t0) + loadp VM::callFrameForCatch[t3], cfr + storep 0, VM::callFrameForCatch[t3] + restoreStackPointerAfterCall() + loadp CodeBlock[cfr], PB loadp CodeBlock::m_instructions[PB], PB loadp VM::targetInterpreterPCForThrow[t3], PC subp PB, PC rshiftp 3, PC + + callSlowPath(_llint_slow_path_check_if_exception_is_uncatchable_and_notify_profiler) + bpeq r1, 0, .isCatchableException + jmp _llint_throw_from_slow_path_trampoline + +.isCatchableException: + loadp Callee[cfr], t3 + andp MarkedBlockMask, t3 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 + loadq VM::m_exception[t3], t0 storeq 0, VM::m_exception[t3] loadisFromInstruction(1, t2) storeq t0, [cfr, t2, 8] + + loadq Exception::m_value[t0], t3 + loadisFromInstruction(2, t2) + storeq t3, [cfr, t2, 8] + traceExecution() - dispatch(2) + dispatch(3) _llint_op_end: @@ -1858,18 +1852,24 @@ _llint_op_end: checkSwitchToJITForEpilogue() loadisFromInstruction(1, t0) assertNotConstant(t0) - loadq [cfr, t0, 8], t0 + loadq [cfr, t0, 8], r0 doReturn() _llint_throw_from_slow_path_trampoline: + loadp Callee[cfr], t1 + andp MarkedBlockMask, t1 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 + copyCalleeSavesToVMCalleeSavesBuffer(t1, t2) + callSlowPath(_llint_slow_path_handle_exception) # When throwing from the interpreter (i.e. throwing from LLIntSlowPaths), so # the throw target is not necessarily interpreted code, we come to here. # This essentially emulates the JIT's throwing protocol. - loadp CodeBlock[cfr], t1 - loadp CodeBlock::m_vm[t1], t1 + loadp Callee[cfr], t1 + andp MarkedBlockMask, t1 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 jmp VM::targetMachinePCForThrow[t1] @@ -1879,90 +1879,48 @@ _llint_throw_during_call_trampoline: macro nativeCallTrampoline(executableOffsetToFunction) + + functionPrologue() storep 0, CodeBlock[cfr] - if X86_64 - loadp ScopeChain[cfr], t0 - andp MarkedBlockMask, t0 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t0], t0 - storep cfr, VM::topCallFrame[t0] - loadp CallerFrame[cfr], t0 - loadq ScopeChain[t0], t1 - storeq t1, ScopeChain[cfr] - peek 0, t1 - storep t1, ReturnPC[cfr] - move cfr, t5 # t5 = rdi - subp 16 - 8, sp - loadp Callee[cfr], t4 # t4 = rsi - loadp JSFunction::m_executable[t4], t1 - move t0, cfr # Restore cfr to avoid loading from stack - call executableOffsetToFunction[t1] - addp 16 - 8, sp - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - elsif ARM64 - loadp ScopeChain[cfr], t0 - andp MarkedBlockMask, t0 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t0], t0 - storep cfr, VM::topCallFrame[t0] - loadp CallerFrame[cfr], t2 - loadp ScopeChain[t2], t1 - storep t1, ScopeChain[cfr] - preserveReturnAddressAfterCall(t3) - storep t3, ReturnPC[cfr] - move cfr, t0 - loadp Callee[cfr], t1 - loadp JSFunction::m_executable[t1], t1 - move t2, cfr # Restore cfr to avoid loading from stack - call executableOffsetToFunction[t1] - restoreReturnAddressBeforeReturn(t3) - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - elsif C_LOOP - loadp CallerFrame[cfr], t0 - loadp ScopeChain[t0], t1 - storep t1, ScopeChain[cfr] - - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - storep cfr, VM::topCallFrame[t3] - - move t0, t2 - preserveReturnAddressAfterCall(t3) - storep t3, ReturnPC[cfr] - move cfr, t0 - loadp Callee[cfr], t1 - loadp JSFunction::m_executable[t1], t1 - move t2, cfr + loadp Callee[cfr], t0 + andp MarkedBlockMask, t0, t1 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t1], t1 + storep cfr, VM::topCallFrame[t1] + if ARM64 or C_LOOP + storep lr, ReturnPC[cfr] + end + move cfr, a0 + loadp Callee[cfr], t1 + loadp JSFunction::m_executable[t1], t1 + checkStackPointerAlignment(t3, 0xdead0001) + if C_LOOP cloopCallNative executableOffsetToFunction[t1] - - restoreReturnAddressBeforeReturn(t3) - loadp ScopeChain[cfr], t3 - andp MarkedBlockMask, t3 - loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 else - error + if X86_64_WIN + subp 32, sp + end + call executableOffsetToFunction[t1] + if X86_64_WIN + addp 32, sp + end end + loadp Callee[cfr], t3 + andp MarkedBlockMask, t3 + loadp MarkedBlock::m_weakSet + WeakSet::m_vm[t3], t3 - btqnz VM::m_exception[t3], .exception + functionEpilogue() + + btqnz VM::m_exception[t3], .handleException ret -.exception: - preserveReturnAddressAfterCall(t1) # This is really only needed on X86_64 - loadi ArgumentCount + TagOffset[cfr], PC - loadp CodeBlock[cfr], PB - loadp CodeBlock::m_vm[PB], t0 - loadp CodeBlock::m_instructions[PB], PB - storep cfr, VM::topCallFrame[t0] - callSlowPath(_llint_throw_from_native_call) + +.handleException: + storep cfr, VM::topCallFrame[t3] + restoreStackPointerAfterCall() jmp _llint_throw_from_slow_path_trampoline end - -macro getGlobalObject(dst) - loadp CodeBlock[cfr], t0 - loadp CodeBlock::m_globalObject[t0], t0 +macro getConstantScope(dst) + loadpFromInstruction(6, t0) loadisFromInstruction(dst, t1) storeq t0, [cfr, t1, 8] end @@ -1975,15 +1933,9 @@ macro varInjectionCheck(slowPath) end macro resolveScope() - loadp CodeBlock[cfr], t0 - loadisFromInstruction(4, t2) - btbz CodeBlock::m_needsActivation[t0], .resolveScopeAfterActivationCheck - loadis CodeBlock::m_activationRegister[t0], t1 - btpz [cfr, t1, 8], .resolveScopeAfterActivationCheck - addi 1, t2 - -.resolveScopeAfterActivationCheck: - loadp ScopeChain[cfr], t0 + loadisFromInstruction(5, t2) + loadisFromInstruction(2, t0) + loadp [cfr, t0, 8], t0 btiz t2, .resolveScopeLoopEnd .resolveScopeLoop: @@ -1999,73 +1951,90 @@ end _llint_op_resolve_scope: traceExecution() - loadisFromInstruction(3, t0) + loadisFromInstruction(4, t0) #rGlobalProperty: bineq t0, GlobalProperty, .rGlobalVar - getGlobalObject(1) - dispatch(6) + getConstantScope(1) + dispatch(7) .rGlobalVar: - bineq t0, GlobalVar, .rClosureVar - getGlobalObject(1) - dispatch(6) + bineq t0, GlobalVar, .rGlobalLexicalVar + getConstantScope(1) + dispatch(7) + +.rGlobalLexicalVar: + bineq t0, GlobalLexicalVar, .rClosureVar + getConstantScope(1) + dispatch(7) .rClosureVar: - bineq t0, ClosureVar, .rGlobalPropertyWithVarInjectionChecks + bineq t0, ClosureVar, .rModuleVar resolveScope() - dispatch(6) + dispatch(7) + +.rModuleVar: + bineq t0, ModuleVar, .rGlobalPropertyWithVarInjectionChecks + getConstantScope(1) + dispatch(7) .rGlobalPropertyWithVarInjectionChecks: bineq t0, GlobalPropertyWithVarInjectionChecks, .rGlobalVarWithVarInjectionChecks varInjectionCheck(.rDynamic) - getGlobalObject(1) - dispatch(6) + getConstantScope(1) + dispatch(7) .rGlobalVarWithVarInjectionChecks: - bineq t0, GlobalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks + bineq t0, GlobalVarWithVarInjectionChecks, .rGlobalLexicalVarWithVarInjectionChecks varInjectionCheck(.rDynamic) - getGlobalObject(1) - dispatch(6) + getConstantScope(1) + dispatch(7) + +.rGlobalLexicalVarWithVarInjectionChecks: + bineq t0, GlobalLexicalVarWithVarInjectionChecks, .rClosureVarWithVarInjectionChecks + varInjectionCheck(.rDynamic) + getConstantScope(1) + dispatch(7) .rClosureVarWithVarInjectionChecks: bineq t0, ClosureVarWithVarInjectionChecks, .rDynamic varInjectionCheck(.rDynamic) resolveScope() - dispatch(6) + dispatch(7) .rDynamic: - callSlowPath(_llint_slow_path_resolve_scope) - dispatch(6) + callSlowPath(_slow_path_resolve_scope) + dispatch(7) macro loadWithStructureCheck(operand, slowPath) loadisFromInstruction(operand, t0) loadq [cfr, t0, 8], t0 + loadStructureWithScratch(t0, t2, t1) loadpFromInstruction(5, t1) - bpneq JSCell::m_structure[t0], t1, slowPath + bpneq t2, t1, slowPath end -macro getProperty() +macro getProperty(slow) loadisFromInstruction(6, t1) - loadPropertyAtVariableOffset(t1, t0, t2) + loadPropertyAtVariableOffset(t1, t0, t2, slow) valueProfile(t2, 7, t0) loadisFromInstruction(1, t0) storeq t2, [cfr, t0, 8] end -macro getGlobalVar() +macro getGlobalVar(tdzCheckIfNecessary) loadpFromInstruction(6, t0) loadq [t0], t0 + tdzCheckIfNecessary(t0) valueProfile(t0, 7, t1) loadisFromInstruction(1, t1) storeq t0, [cfr, t1, 8] end macro getClosureVar() - loadp JSVariableObject::m_registers[t0], t0 loadisFromInstruction(6, t1) - loadq [t0, t1, 8], t0 + loadq JSEnvironmentRecord_variables[t0, t1, 8], t0 valueProfile(t0, 7, t1) loadisFromInstruction(1, t1) storeq t0, [cfr, t1, 8] @@ -2074,17 +2043,25 @@ end _llint_op_get_from_scope: traceExecution() loadisFromInstruction(4, t0) - andi ResolveModeMask, t0 + andi ResolveTypeMask, t0 #gGlobalProperty: bineq t0, GlobalProperty, .gGlobalVar loadWithStructureCheck(2, .gDynamic) - getProperty() + getProperty(.gDynamic) dispatch(8) .gGlobalVar: - bineq t0, GlobalVar, .gClosureVar - getGlobalVar() + bineq t0, GlobalVar, .gGlobalLexicalVar + getGlobalVar(macro(v) end) + dispatch(8) + +.gGlobalLexicalVar: + bineq t0, GlobalLexicalVar, .gClosureVar + getGlobalVar( + macro (value) + bqeq value, ValueEmpty, .gDynamic + end) dispatch(8) .gClosureVar: @@ -2096,14 +2073,22 @@ _llint_op_get_from_scope: .gGlobalPropertyWithVarInjectionChecks: bineq t0, GlobalPropertyWithVarInjectionChecks, .gGlobalVarWithVarInjectionChecks loadWithStructureCheck(2, .gDynamic) - getProperty() + getProperty(.gDynamic) dispatch(8) .gGlobalVarWithVarInjectionChecks: - bineq t0, GlobalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks + bineq t0, GlobalVarWithVarInjectionChecks, .gGlobalLexicalVarWithVarInjectionChecks varInjectionCheck(.gDynamic) - loadVariable(2, t0) - getGlobalVar() + getGlobalVar(macro(v) end) + dispatch(8) + +.gGlobalLexicalVarWithVarInjectionChecks: + bineq t0, GlobalLexicalVarWithVarInjectionChecks, .gClosureVarWithVarInjectionChecks + varInjectionCheck(.gDynamic) + getGlobalVar( + macro (value) + bqeq value, ValueEmpty, .gDynamic + end) dispatch(8) .gClosureVarWithVarInjectionChecks: @@ -2118,47 +2103,82 @@ _llint_op_get_from_scope: dispatch(8) -macro putProperty() +macro putProperty(slow) loadisFromInstruction(3, t1) loadConstantOrVariable(t1, t2) loadisFromInstruction(6, t1) - storePropertyAtVariableOffset(t1, t0, t2) + storePropertyAtVariableOffset(t1, t0, t2, slow) end -macro putGlobalVar() +macro putGlobalVariable() loadisFromInstruction(3, t0) loadConstantOrVariable(t0, t1) loadpFromInstruction(5, t2) - notifyWrite(t2, t1, t0, .pDynamic) loadpFromInstruction(6, t0) + notifyWrite(t2, .pDynamic) storeq t1, [t0] end macro putClosureVar() loadisFromInstruction(3, t1) loadConstantOrVariable(t1, t2) - loadp JSVariableObject::m_registers[t0], t0 loadisFromInstruction(6, t1) - storeq t2, [t0, t1, 8] + storeq t2, JSEnvironmentRecord_variables[t0, t1, 8] +end + +macro putLocalClosureVar() + loadisFromInstruction(3, t1) + loadConstantOrVariable(t1, t2) + loadpFromInstruction(5, t3) + btpz t3, .noVariableWatchpointSet + notifyWrite(t3, .pDynamic) +.noVariableWatchpointSet: + loadisFromInstruction(6, t1) + storeq t2, JSEnvironmentRecord_variables[t0, t1, 8] +end + +macro checkTDZInGlobalPutToScopeIfNecessary() + loadisFromInstruction(4, t0) + andi InitializationModeMask, t0 + rshifti InitializationModeShift, t0 + bieq t0, Initialization, .noNeedForTDZCheck + loadpFromInstruction(6, t0) + loadq [t0], t0 + bqeq t0, ValueEmpty, .pDynamic +.noNeedForTDZCheck: end _llint_op_put_to_scope: traceExecution() loadisFromInstruction(4, t0) - andi ResolveModeMask, t0 + andi ResolveTypeMask, t0 -#pGlobalProperty: +#pLocalClosureVar: + bineq t0, LocalClosureVar, .pGlobalProperty + writeBarrierOnOperands(1, 3) + loadVariable(1, t0) + putLocalClosureVar() + dispatch(7) + +.pGlobalProperty: bineq t0, GlobalProperty, .pGlobalVar writeBarrierOnOperands(1, 3) loadWithStructureCheck(1, .pDynamic) - putProperty() + putProperty(.pDynamic) dispatch(7) .pGlobalVar: - bineq t0, GlobalVar, .pClosureVar + bineq t0, GlobalVar, .pGlobalLexicalVar writeBarrierOnGlobalObject(3) - putGlobalVar() + putGlobalVariable() + dispatch(7) + +.pGlobalLexicalVar: + bineq t0, GlobalLexicalVar, .pClosureVar + writeBarrierOnGlobalLexicalEnvironment(3) + checkTDZInGlobalPutToScopeIfNecessary() + putGlobalVariable() dispatch(7) .pClosureVar: @@ -2172,24 +2192,132 @@ _llint_op_put_to_scope: bineq t0, GlobalPropertyWithVarInjectionChecks, .pGlobalVarWithVarInjectionChecks writeBarrierOnOperands(1, 3) loadWithStructureCheck(1, .pDynamic) - putProperty() + putProperty(.pDynamic) dispatch(7) .pGlobalVarWithVarInjectionChecks: - bineq t0, GlobalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks + bineq t0, GlobalVarWithVarInjectionChecks, .pGlobalLexicalVarWithVarInjectionChecks writeBarrierOnGlobalObject(3) varInjectionCheck(.pDynamic) - putGlobalVar() + putGlobalVariable() + dispatch(7) + +.pGlobalLexicalVarWithVarInjectionChecks: + bineq t0, GlobalLexicalVarWithVarInjectionChecks, .pClosureVarWithVarInjectionChecks + writeBarrierOnGlobalLexicalEnvironment(3) + varInjectionCheck(.pDynamic) + checkTDZInGlobalPutToScopeIfNecessary() + putGlobalVariable() dispatch(7) .pClosureVarWithVarInjectionChecks: - bineq t0, ClosureVarWithVarInjectionChecks, .pDynamic + bineq t0, ClosureVarWithVarInjectionChecks, .pModuleVar writeBarrierOnOperands(1, 3) varInjectionCheck(.pDynamic) loadVariable(1, t0) putClosureVar() dispatch(7) +.pModuleVar: + bineq t0, ModuleVar, .pDynamic + callSlowPath(_slow_path_throw_strict_mode_readonly_property_write_error) + dispatch(7) + .pDynamic: callSlowPath(_llint_slow_path_put_to_scope) dispatch(7) + + +_llint_op_get_from_arguments: + traceExecution() + loadVariable(2, t0) + loadi 24[PB, PC, 8], t1 + loadq DirectArguments_storage[t0, t1, 8], t0 + valueProfile(t0, 4, t1) + loadisFromInstruction(1, t1) + storeq t0, [cfr, t1, 8] + dispatch(5) + + +_llint_op_put_to_arguments: + traceExecution() + writeBarrierOnOperands(1, 3) + loadVariable(1, t0) + loadi 16[PB, PC, 8], t1 + loadisFromInstruction(3, t3) + loadConstantOrVariable(t3, t2) + storeq t2, DirectArguments_storage[t0, t1, 8] + dispatch(4) + + +_llint_op_get_parent_scope: + traceExecution() + loadVariable(2, t0) + loadp JSScope::m_next[t0], t0 + loadisFromInstruction(1, t1) + storeq t0, [cfr, t1, 8] + dispatch(3) + + +_llint_op_profile_type: + traceExecution() + loadp CodeBlock[cfr], t1 + loadp CodeBlock::m_vm[t1], t1 + # t1 is holding the pointer to the typeProfilerLog. + loadp VM::m_typeProfilerLog[t1], t1 + # t2 is holding the pointer to the current log entry. + loadp TypeProfilerLog::m_currentLogEntryPtr[t1], t2 + + # t0 is holding the JSValue argument. + loadisFromInstruction(1, t3) + loadConstantOrVariable(t3, t0) + + bqeq t0, ValueEmpty, .opProfileTypeDone + # Store the JSValue onto the log entry. + storeq t0, TypeProfilerLog::LogEntry::value[t2] + + # Store the TypeLocation onto the log entry. + loadpFromInstruction(2, t3) + storep t3, TypeProfilerLog::LogEntry::location[t2] + + btqz t0, tagMask, .opProfileTypeIsCell + storei 0, TypeProfilerLog::LogEntry::structureID[t2] + jmp .opProfileTypeSkipIsCell +.opProfileTypeIsCell: + loadi JSCell::m_structureID[t0], t3 + storei t3, TypeProfilerLog::LogEntry::structureID[t2] +.opProfileTypeSkipIsCell: + + # Increment the current log entry. + addp sizeof TypeProfilerLog::LogEntry, t2 + storep t2, TypeProfilerLog::m_currentLogEntryPtr[t1] + + loadp TypeProfilerLog::m_logEndPtr[t1], t1 + bpneq t2, t1, .opProfileTypeDone + callSlowPath(_slow_path_profile_type_clear_log) + +.opProfileTypeDone: + dispatch(6) + +_llint_op_profile_control_flow: + traceExecution() + loadpFromInstruction(1, t0) + addq 1, BasicBlockLocation::m_executionCount[t0] + dispatch(2) + + +_llint_op_get_rest_length: + traceExecution() + loadi PayloadOffset + ArgumentCount[cfr], t0 + subi 1, t0 + loadisFromInstruction(2, t1) + bilteq t0, t1, .storeZero + subi t1, t0 + jmp .boxUp +.storeZero: + move 0, t0 +.boxUp: + orq tagTypeNumber, t0 + loadisFromInstruction(1, t1) + storeq t0, [cfr, t1, 8] + dispatch(3) |
