diff options
Diffstat (limited to 'Source/JavaScriptCore/jit')
83 files changed, 4997 insertions, 8518 deletions
diff --git a/Source/JavaScriptCore/jit/ArityCheckFailReturnThunks.cpp b/Source/JavaScriptCore/jit/ArityCheckFailReturnThunks.cpp deleted file mode 100644 index d522b8125..000000000 --- a/Source/JavaScriptCore/jit/ArityCheckFailReturnThunks.cpp +++ /dev/null @@ -1,135 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "ArityCheckFailReturnThunks.h" - -#if ENABLE(JIT) - -#include "AssemblyHelpers.h" -#include "LinkBuffer.h" -#include "JSCInlines.h" -#include "StackAlignment.h" - -namespace JSC { - -ArityCheckFailReturnThunks::ArityCheckFailReturnThunks() - : m_nextSize(0) -{ -} - -ArityCheckFailReturnThunks::~ArityCheckFailReturnThunks() { } - -CodeLocationLabel* ArityCheckFailReturnThunks::returnPCsFor( - VM& vm, unsigned numExpectedArgumentsIncludingThis) -{ - ASSERT(numExpectedArgumentsIncludingThis >= 1); - - numExpectedArgumentsIncludingThis = WTF::roundUpToMultipleOf( - stackAlignmentRegisters(), numExpectedArgumentsIncludingThis); - - { - ConcurrentJITLocker locker(m_lock); - if (numExpectedArgumentsIncludingThis < m_nextSize) - return m_returnPCArrays.last().get(); - } - - ASSERT(!isCompilationThread()); - - numExpectedArgumentsIncludingThis = std::max(numExpectedArgumentsIncludingThis, m_nextSize * 2); - - AssemblyHelpers jit(&vm, 0); - - Vector<AssemblyHelpers::Label> labels; - - for (unsigned size = m_nextSize; size <= numExpectedArgumentsIncludingThis; size += stackAlignmentRegisters()) { - labels.append(jit.label()); - - jit.load32( - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + - PayloadOffset), - GPRInfo::regT4); - jit.add32( - AssemblyHelpers::TrustedImm32( - JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + size - 1), - GPRInfo::regT4, GPRInfo::regT2); - jit.lshift32(AssemblyHelpers::TrustedImm32(3), GPRInfo::regT2); - jit.addPtr(AssemblyHelpers::stackPointerRegister, GPRInfo::regT2); - jit.loadPtr(GPRInfo::regT2, GPRInfo::regT2); - - jit.addPtr( - AssemblyHelpers::TrustedImm32(size * sizeof(Register)), - AssemblyHelpers::stackPointerRegister); - - // Thunks like ours want to use the return PC to figure out where things - // were saved. So, we pay it forward. - jit.store32( - GPRInfo::regT4, - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + - PayloadOffset)); - - jit.jump(GPRInfo::regT2); - } - - // Sadly, we cannot fail here because the LLInt may need us. - LinkBuffer linkBuffer(vm, jit, GLOBAL_THUNK_ID, JITCompilationMustSucceed); - - unsigned returnPCsSize = numExpectedArgumentsIncludingThis / stackAlignmentRegisters() + 1; - std::unique_ptr<CodeLocationLabel[]> returnPCs = - std::make_unique<CodeLocationLabel[]>(returnPCsSize); - for (unsigned size = 0; size <= numExpectedArgumentsIncludingThis; size += stackAlignmentRegisters()) { - unsigned index = size / stackAlignmentRegisters(); - RELEASE_ASSERT(index < returnPCsSize); - if (size < m_nextSize) - returnPCs[index] = m_returnPCArrays.last()[index]; - else - returnPCs[index] = linkBuffer.locationOf(labels[(size - m_nextSize) / stackAlignmentRegisters()]); - } - - CodeLocationLabel* result = returnPCs.get(); - - { - ConcurrentJITLocker locker(m_lock); - m_returnPCArrays.append(WTF::move(returnPCs)); - m_refs.append(FINALIZE_CODE(linkBuffer, ("Arity check fail return thunks for up to numArgs = %u", numExpectedArgumentsIncludingThis))); - m_nextSize = numExpectedArgumentsIncludingThis + stackAlignmentRegisters(); - } - - return result; -} - -CodeLocationLabel ArityCheckFailReturnThunks::returnPCFor(VM& vm, unsigned slotsToAdd) -{ - return returnPCsFor(vm, slotsToAdd)[slotsToAdd / stackAlignmentRegisters()]; -} - -} // namespace JSC - -#endif // ENABLE(JIT) - diff --git a/Source/JavaScriptCore/jit/ArityCheckFailReturnThunks.h b/Source/JavaScriptCore/jit/ArityCheckFailReturnThunks.h deleted file mode 100644 index b2d034139..000000000 --- a/Source/JavaScriptCore/jit/ArityCheckFailReturnThunks.h +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef ArityCheckFailReturnThunks_h -#define ArityCheckFailReturnThunks_h - -#if ENABLE(JIT) - -#include "CodeLocation.h" -#include "ConcurrentJITLock.h" -#include <wtf/HashMap.h> - -namespace JSC { - -class ArityCheckFailReturnThunks { -public: - ArityCheckFailReturnThunks(); - ~ArityCheckFailReturnThunks(); - - // Returns a pointer to an array of return labels indexed by missingArgs. - CodeLocationLabel* returnPCsFor(VM&, unsigned numExpectedArgumentsIncludingThis); - - CodeLocationLabel returnPCFor(VM&, unsigned slotsToAdd); - -private: - Vector<std::unique_ptr<CodeLocationLabel[]>> m_returnPCArrays; - unsigned m_nextSize; - Vector<MacroAssemblerCodeRef> m_refs; - ConcurrentJITLock m_lock; -}; - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // ArityCheckFailReturnThunks_h - diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp index 09133a8e6..ddf1d6359 100644 --- a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp +++ b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,9 +28,6 @@ #if ENABLE(JIT) -#include "JITOperations.h" -#include "JSCInlines.h" - namespace JSC { ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin) @@ -55,14 +52,6 @@ Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock* return result.iterator->value; } -void AssemblyHelpers::purifyNaN(FPRReg fpr) -{ - MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr); - static const double NaN = PNaN; - loadDouble(TrustedImmPtr(&NaN), fpr); - notNaN.link(this); -} - #if ENABLE(SAMPLING_FLAGS) void AssemblyHelpers::setSamplingFlag(int32_t flag) { @@ -85,7 +74,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) { #if CPU(X86_64) Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu))); - abortWithReason(AHIsNotInt32); + breakpoint(); checkInt32.link(this); #else UNUSED_PARAM(gpr); @@ -95,14 +84,14 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr) { Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister); - abortWithReason(AHIsNotJSInt32); + breakpoint(); checkJSInt32.link(this); } void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) { Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister); - abortWithReason(AHIsNotJSNumber); + breakpoint(); checkJSNumber.link(this); } @@ -111,27 +100,15 @@ void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr) Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister); Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister); checkJSInt32.link(this); - abortWithReason(AHIsNotJSDouble); + breakpoint(); checkJSNumber.link(this); } void AssemblyHelpers::jitAssertIsCell(GPRReg gpr) { Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister); - abortWithReason(AHIsNotCell); - checkCell.link(this); -} - -void AssemblyHelpers::jitAssertTagsInPlace() -{ - Jump ok = branch64(Equal, GPRInfo::tagTypeNumberRegister, TrustedImm64(TagTypeNumber)); - abortWithReason(AHTagTypeNumberNotInPlace); breakpoint(); - ok.link(this); - - ok = branch64(Equal, GPRInfo::tagMaskRegister, TrustedImm64(TagMask)); - abortWithReason(AHTagMaskNotInPlace); - ok.link(this); + checkCell.link(this); } #elif USE(JSVALUE32_64) void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) @@ -142,7 +119,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr) { Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag)); - abortWithReason(AHIsNotJSInt32); + breakpoint(); checkJSInt32.link(this); } @@ -150,7 +127,7 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) { Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag)); Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag)); - abortWithReason(AHIsNotJSNumber); + breakpoint(); checkJSInt32.link(this); checkJSDouble.link(this); } @@ -158,112 +135,33 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr) { Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag)); - abortWithReason(AHIsNotJSDouble); + breakpoint(); checkJSDouble.link(this); } void AssemblyHelpers::jitAssertIsCell(GPRReg gpr) { Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag)); - abortWithReason(AHIsNotCell); + breakpoint(); checkCell.link(this); } - -void AssemblyHelpers::jitAssertTagsInPlace() -{ -} #endif // USE(JSVALUE32_64) void AssemblyHelpers::jitAssertHasValidCallFrame() { Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7)); - abortWithReason(AHCallFrameMisaligned); + breakpoint(); checkCFR.link(this); } void AssemblyHelpers::jitAssertIsNull(GPRReg gpr) { Jump checkNull = branchTestPtr(Zero, gpr); - abortWithReason(AHIsNotNull); + breakpoint(); checkNull.link(this); } - -void AssemblyHelpers::jitAssertArgumentCountSane() -{ - Jump ok = branch32(Below, payloadFor(JSStack::ArgumentCount), TrustedImm32(10000000)); - abortWithReason(AHInsaneArgumentCount); - ok.link(this); -} #endif // !ASSERT_DISABLED -void AssemblyHelpers::callExceptionFuzz() -{ - if (!Options::enableExceptionFuzz()) - return; - - ASSERT(stackAlignmentBytes() >= sizeof(void*) * 2); - subPtr(TrustedImm32(stackAlignmentBytes()), stackPointerRegister); - poke(GPRInfo::returnValueGPR, 0); - poke(GPRInfo::returnValueGPR2, 1); - move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR); - call(GPRInfo::nonPreservedNonReturnGPR); - peek(GPRInfo::returnValueGPR, 0); - peek(GPRInfo::returnValueGPR2, 1); - addPtr(TrustedImm32(stackAlignmentBytes()), stackPointerRegister); -} - -AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind, ExceptionJumpWidth width) -{ - callExceptionFuzz(); - - if (width == FarJumpWidth) - kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck); - - Jump result; -#if USE(JSVALUE64) - result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException())); -#elif USE(JSVALUE32_64) - result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0)); -#endif - - if (width == NormalJumpWidth) - return result; - - PatchableJump realJump = patchableJump(); - result.link(this); - - return realJump.m_jump; -} - -void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest) -{ - const Structure* structurePtr = static_cast<const Structure*>(structure.m_value); -#if USE(JSVALUE64) - jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset())); - if (!ASSERT_DISABLED) { - Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id())); - jit.abortWithReason(AHStructureIDIsValid); - correctStructure.link(&jit); - - Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()), TrustedImm32(structurePtr->indexingType())); - jit.abortWithReason(AHIndexingTypeIsValid); - correctIndexingType.link(&jit); - - Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type())); - jit.abortWithReason(AHTypeInfoIsValid); - correctType.link(&jit); - - Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags())); - jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid); - correctFlags.link(&jit); - } -#else - // Do a 32-bit wide store to initialize the cell's fields. - jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeOffset())); - jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset())); -#endif -} - } // namespace JSC #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.h b/Source/JavaScriptCore/jit/AssemblyHelpers.h index 0b7cc456a..36d583139 100644 --- a/Source/JavaScriptCore/jit/AssemblyHelpers.h +++ b/Source/JavaScriptCore/jit/AssemblyHelpers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,8 @@ #ifndef AssemblyHelpers_h #define AssemblyHelpers_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "CodeBlock.h" @@ -33,7 +35,6 @@ #include "GPRInfo.h" #include "JITCode.h" #include "MacroAssembler.h" -#include "TypeofType.h" #include "VM.h" namespace JSC { @@ -57,142 +58,8 @@ public: CodeBlock* codeBlock() { return m_codeBlock; } VM* vm() { return m_vm; } AssemblerType_T& assembler() { return m_assembler; } - - void checkStackPointerAlignment() - { - // This check is both unneeded and harder to write correctly for ARM64 -#if !defined(NDEBUG) && !CPU(ARM64) - Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf)); - abortWithReason(AHStackPointerMisaligned); - stackPointerAligned.link(this); -#endif - } - - template<typename T> - void storeCell(T cell, Address address) - { -#if USE(JSVALUE64) - store64(cell, address); -#else - store32(cell, address.withOffset(PayloadOffset)); - store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset)); -#endif - } - - void storeValue(JSValueRegs regs, Address address) - { -#if USE(JSVALUE64) - store64(regs.gpr(), address); -#else - store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); - store32(regs.tagGPR(), address.withOffset(TagOffset)); -#endif - } - - void storeValue(JSValueRegs regs, BaseIndex address) - { -#if USE(JSVALUE64) - store64(regs.gpr(), address); -#else - store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); - store32(regs.tagGPR(), address.withOffset(TagOffset)); -#endif - } - - void storeValue(JSValueRegs regs, void* address) - { -#if USE(JSVALUE64) - store64(regs.gpr(), address); -#else - store32(regs.payloadGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset)); - store32(regs.tagGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset)); -#endif - } - - void loadValue(Address address, JSValueRegs regs) - { -#if USE(JSVALUE64) - load64(address, regs.gpr()); -#else - if (address.base == regs.payloadGPR()) { - load32(address.withOffset(TagOffset), regs.tagGPR()); - load32(address.withOffset(PayloadOffset), regs.payloadGPR()); - } else { - load32(address.withOffset(PayloadOffset), regs.payloadGPR()); - load32(address.withOffset(TagOffset), regs.tagGPR()); - } -#endif - } - void loadValue(BaseIndex address, JSValueRegs regs) - { -#if USE(JSVALUE64) - load64(address, regs.gpr()); -#else - if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) { - // We actually could handle the case where the registers are aliased to both - // tag and payload, but we don't for now. - RELEASE_ASSERT(address.base != regs.tagGPR()); - RELEASE_ASSERT(address.index != regs.tagGPR()); - - load32(address.withOffset(TagOffset), regs.tagGPR()); - load32(address.withOffset(PayloadOffset), regs.payloadGPR()); - } else { - load32(address.withOffset(PayloadOffset), regs.payloadGPR()); - load32(address.withOffset(TagOffset), regs.tagGPR()); - } -#endif - } - - void moveTrustedValue(JSValue value, JSValueRegs regs) - { -#if USE(JSVALUE64) - move(TrustedImm64(JSValue::encode(value)), regs.gpr()); -#else - move(TrustedImm32(value.tag()), regs.tagGPR()); - move(TrustedImm32(value.payload()), regs.payloadGPR()); -#endif - } - - void storeTrustedValue(JSValue value, Address address) - { -#if USE(JSVALUE64) - store64(TrustedImm64(JSValue::encode(value)), address); -#else - store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); - store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); -#endif - } - - void storeTrustedValue(JSValue value, BaseIndex address) - { -#if USE(JSVALUE64) - store64(TrustedImm64(JSValue::encode(value)), address); -#else - store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); - store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); -#endif - } - #if CPU(X86_64) || CPU(X86) - static size_t prologueStackPointerDelta() - { - // Prologue only saves the framePointerRegister - return sizeof(void*); - } - - void emitFunctionPrologue() - { - push(framePointerRegister); - move(stackPointerRegister, framePointerRegister); - } - - void emitFunctionEpilogue() - { - move(framePointerRegister, stackPointerRegister); - pop(framePointerRegister); - } - void preserveReturnAddressAfterCall(GPRReg reg) { pop(reg); @@ -210,24 +77,6 @@ public: #endif // CPU(X86_64) || CPU(X86) #if CPU(ARM) || CPU(ARM64) - static size_t prologueStackPointerDelta() - { - // Prologue saves the framePointerRegister and linkRegister - return 2 * sizeof(void*); - } - - void emitFunctionPrologue() - { - pushPair(framePointerRegister, linkRegister); - move(stackPointerRegister, framePointerRegister); - } - - void emitFunctionEpilogue() - { - move(framePointerRegister, stackPointerRegister); - popPair(framePointerRegister, linkRegister); - } - ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) { move(linkRegister, reg); @@ -245,12 +94,6 @@ public: #endif #if CPU(MIPS) - static size_t prologueStackPointerDelta() - { - // Prologue saves the framePointerRegister and returnAddressRegister - return 2 * sizeof(void*); - } - ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) { move(returnAddressRegister, reg); @@ -268,26 +111,6 @@ public: #endif #if CPU(SH4) - static size_t prologueStackPointerDelta() - { - // Prologue saves the framePointerRegister and link register - return 2 * sizeof(void*); - } - - void emitFunctionPrologue() - { - push(linkRegister); - push(framePointerRegister); - move(stackPointerRegister, framePointerRegister); - } - - void emitFunctionEpilogue() - { - move(framePointerRegister, stackPointerRegister); - pop(framePointerRegister); - pop(linkRegister); - } - ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) { m_assembler.stspr(reg); @@ -304,20 +127,10 @@ public: } #endif - void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) - { - loadPtr(Address(from, entry * sizeof(Register)), to); - } - void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) + void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to) { - load32(Address(from, entry * sizeof(Register)), to); + loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to); } -#if USE(JSVALUE64) - void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) - { - load64(Address(from, entry * sizeof(Register)), to); - } -#endif // USE(JSVALUE64) void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry) { storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); @@ -337,6 +150,10 @@ public: storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset())); } + void emitGetReturnPCFromCallFrameHeaderPtr(RegisterID to) + { + loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), to); + } void emitPutReturnPCToCallFrameHeader(RegisterID from) { storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); @@ -346,59 +163,6 @@ public: storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); } - // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header - // fields before the code from emitFunctionPrologue() has executed. - // First, the access is via the stack pointer. Second, the address calculation must also take - // into account that the stack pointer may not have been adjusted down for the return PC and/or - // caller's frame pointer. On some platforms, the callee is responsible for pushing the - // "link register" containing the return address in the function prologue. -#if USE(JSVALUE64) - void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry) - { - storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta())); - } -#else - void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry) - { - storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - } - - void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, JSStack::CallFrameHeaderEntry entry) - { - storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); - } -#endif - - JumpList branchIfNotEqual(JSValueRegs regs, JSValue value) - { -#if USE(JSVALUE64) - return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value))); -#else - JumpList result; - result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag()))); - if (value.isEmpty() || value.isUndefinedOrNull()) - return result; // These don't have anything interesting in the payload. - result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()))); - return result; -#endif - } - - Jump branchIfEqual(JSValueRegs regs, JSValue value) - { -#if USE(JSVALUE64) - return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value))); -#else - Jump notEqual; - // These don't have anything interesting in the payload. - if (!value.isEmpty() && !value.isUndefinedOrNull()) - notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())); - Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag())); - if (notEqual.isSet()) - notEqual.link(this); - return result; -#endif - } - Jump branchIfNotCell(GPRReg reg) { #if USE(JSVALUE64) @@ -407,161 +171,6 @@ public: return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag)); #endif } - Jump branchIfNotCell(JSValueRegs regs) - { -#if USE(JSVALUE64) - return branchIfNotCell(regs.gpr()); -#else - return branchIfNotCell(regs.tagGPR()); -#endif - } - - Jump branchIfCell(GPRReg reg) - { -#if USE(JSVALUE64) - return branchTest64(MacroAssembler::Zero, reg, GPRInfo::tagMaskRegister); -#else - return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag)); -#endif - } - Jump branchIfCell(JSValueRegs regs) - { -#if USE(JSVALUE64) - return branchIfCell(regs.gpr()); -#else - return branchIfCell(regs.tagGPR()); -#endif - } - - Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR) - { -#if USE(JSVALUE64) - move(regs.gpr(), tempGPR); - and64(TrustedImm32(~TagBitUndefined), tempGPR); - return branch64(Equal, tempGPR, TrustedImm64(ValueNull)); -#else - or32(TrustedImm32(1), regs.tagGPR(), tempGPR); - return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag)); -#endif - } - - Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR) - { -#if USE(JSVALUE64) - move(regs.gpr(), tempGPR); - and64(TrustedImm32(~TagBitUndefined), tempGPR); - return branch64(NotEqual, tempGPR, TrustedImm64(ValueNull)); -#else - or32(TrustedImm32(1), regs.tagGPR(), tempGPR); - return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag)); -#endif - } - - Jump branchIfInt32(JSValueRegs regs) - { -#if USE(JSVALUE64) - return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister); -#else - return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag)); -#endif - } - - Jump branchIfNotInt32(JSValueRegs regs) - { -#if USE(JSVALUE64) - return branch64(Below, regs.gpr(), GPRInfo::tagTypeNumberRegister); -#else - return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag)); -#endif - } - - // Note that the tempGPR is not used in 64-bit mode. - Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR) - { -#if USE(JSVALUE64) - UNUSED_PARAM(tempGPR); - return branchTest64(NonZero, regs.gpr(), GPRInfo::tagTypeNumberRegister); -#else - add32(TrustedImm32(1), regs.tagGPR(), tempGPR); - return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); -#endif - } - - // Note that the tempGPR is not used in 64-bit mode. - Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR) - { -#if USE(JSVALUE64) - UNUSED_PARAM(tempGPR); - return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister); -#else - add32(TrustedImm32(1), regs.tagGPR(), tempGPR); - return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); -#endif - } - - // Note that the tempGPR is not used in 32-bit mode. - Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR) - { -#if USE(JSVALUE64) - move(regs.gpr(), tempGPR); - xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR); - return branchTest64(Zero, tempGPR, TrustedImm32(static_cast<int32_t>(~1))); -#else - UNUSED_PARAM(tempGPR); - return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag)); -#endif - } - - // Note that the tempGPR is not used in 32-bit mode. - Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR) - { -#if USE(JSVALUE64) - move(regs.gpr(), tempGPR); - xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR); - return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast<int32_t>(~1))); -#else - UNUSED_PARAM(tempGPR); - return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag)); -#endif - } - - Jump branchIfObject(GPRReg cellGPR) - { - return branch8( - AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); - } - - Jump branchIfNotObject(GPRReg cellGPR) - { - return branch8( - Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); - } - - Jump branchIfType(GPRReg cellGPR, JSType type) - { - return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); - } - - Jump branchIfNotType(GPRReg cellGPR, JSType type) - { - return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); - } - - Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); } - Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); } - Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); } - Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); } - Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); } - Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); } - - Jump branchIfEmpty(JSValueRegs regs) - { -#if USE(JSVALUE64) - return branchTest64(Zero, regs.gpr()); -#else - return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::EmptyValueTag)); -#endif - } static Address addressForByteOffset(ptrdiff_t byteOffset) { @@ -574,10 +183,6 @@ public: } static Address addressFor(VirtualRegister virtualRegister) { - // NB. It's tempting on some architectures to sometimes use an offset from the stack - // register because for some offsets that will encode to a smaller instruction. But we - // cannot do this. We use this in places where the stack pointer has been moved to some - // unpredictable location. ASSERT(virtualRegister.isValid()); return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register)); } @@ -589,7 +194,7 @@ public: static Address tagFor(VirtualRegister virtualRegister) { ASSERT(virtualRegister.isValid()); - return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset); + return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); } static Address tagFor(int operand) { @@ -599,69 +204,33 @@ public: static Address payloadFor(VirtualRegister virtualRegister) { ASSERT(virtualRegister.isValid()); - return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset); + return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); } static Address payloadFor(int operand) { return payloadFor(static_cast<VirtualRegister>(operand)); } - // Access to our fixed callee CallFrame. - static Address calleeFrameSlot(int slot) - { - ASSERT(slot >= JSStack::CallerFrameAndPCSize); - return Address(stackPointerRegister, sizeof(Register) * (slot - JSStack::CallerFrameAndPCSize)); - } - - // Access to our fixed callee CallFrame. - static Address calleeArgumentSlot(int argument) - { - return calleeFrameSlot(virtualRegisterForArgument(argument).offset()); - } - - static Address calleeFrameTagSlot(int slot) - { - return calleeFrameSlot(slot).withOffset(TagOffset); - } - - static Address calleeFramePayloadSlot(int slot) - { - return calleeFrameSlot(slot).withOffset(PayloadOffset); - } - - static Address calleeArgumentTagSlot(int argument) - { - return calleeArgumentSlot(argument).withOffset(TagOffset); - } - - static Address calleeArgumentPayloadSlot(int argument) + Jump branchIfNotObject(GPRReg structureReg) { - return calleeArgumentSlot(argument).withOffset(PayloadOffset); + return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)); } - static Address calleeFrameCallerFrame() + static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg) { - return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset()); - } - - static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg, GPRReg preserve5 = InvalidGPRReg) - { - if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0 && preserve5 != GPRInfo::regT0) + if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0) return GPRInfo::regT0; - if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1 && preserve5 != GPRInfo::regT1) + if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1) return GPRInfo::regT1; - if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2 && preserve5 != GPRInfo::regT2) + if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2) return GPRInfo::regT2; - if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3 && preserve5 != GPRInfo::regT3) + if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3) return GPRInfo::regT3; - if (preserve1 != GPRInfo::regT4 && preserve2 != GPRInfo::regT4 && preserve3 != GPRInfo::regT4 && preserve4 != GPRInfo::regT4 && preserve5 != GPRInfo::regT4) - return GPRInfo::regT4; - - return GPRInfo::regT5; + return GPRInfo::regT4; } // Add a debug call. This call has no effect on JIT code execution state. @@ -729,8 +298,6 @@ public: void jitAssertIsCell(GPRReg); void jitAssertHasValidCallFrame(); void jitAssertIsNull(GPRReg); - void jitAssertTagsInPlace(); - void jitAssertArgumentCountSane(); #else void jitAssertIsInt32(GPRReg) { } void jitAssertIsJSInt32(GPRReg) { } @@ -739,11 +306,7 @@ public: void jitAssertIsCell(GPRReg) { } void jitAssertHasValidCallFrame() { } void jitAssertIsNull(GPRReg) { } - void jitAssertTagsInPlace() { } - void jitAssertArgumentCountSane() { } #endif - - void purifyNaN(FPRReg); // These methods convert between doubles, and doubles boxed and JSValues. #if USE(JSVALUE64) @@ -754,22 +317,13 @@ public: jitAssertIsJSDouble(gpr); return gpr; } - FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, FPRReg fpr) + FPRReg unboxDouble(GPRReg gpr, FPRReg fpr) { + jitAssertIsJSDouble(gpr); add64(GPRInfo::tagTypeNumberRegister, gpr); move64ToDouble(gpr, fpr); return fpr; } - FPRReg unboxDouble(GPRReg gpr, FPRReg fpr) - { - jitAssertIsJSDouble(gpr); - return unboxDoubleWithoutAssertions(gpr, fpr); - } - - void boxDouble(FPRReg fpr, JSValueRegs regs) - { - boxDouble(fpr, regs.gpr()); - } // Here are possible arrangements of source, target, scratch: // - source, target, scratch can all be separate registers. @@ -803,36 +357,17 @@ public: { moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR); } - - void boxDouble(FPRReg fpr, JSValueRegs regs) - { - boxDouble(fpr, regs.tagGPR(), regs.payloadGPR()); - } #endif - void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR) + enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck }; + Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck) { #if USE(JSVALUE64) - add32(TrustedImm32(ValueFalse), boolGPR, payloadGPR); -#else - move(boolGPR, payloadGPR); -#endif - } - - void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs) - { - boxBooleanPayload(boolGPR, boxedRegs.payloadGPR()); -#if USE(JSVALUE32_64) - move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR()); + return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException())); +#elif USE(JSVALUE32_64) + return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(vm()->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); #endif } - - void callExceptionFuzz(); - - enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck }; - enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth }; - Jump emitExceptionCheck( - ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth); #if ENABLE(SAMPLING_COUNTERS) static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1) @@ -886,153 +421,61 @@ public: return m_baselineCodeBlock; } - static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame) + VirtualRegister baselineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame) { if (!inlineCallFrame) - return VirtualRegister(CallFrame::argumentOffset(0)); - if (inlineCallFrame->arguments.size() <= 1) - return virtualRegisterForLocal(0); - ValueRecovery recovery = inlineCallFrame->arguments[1]; - RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack); - return recovery.virtualRegister(); + return baselineCodeBlock()->argumentsRegister(); + + return VirtualRegister(baselineCodeBlockForInlineCallFrame( + inlineCallFrame)->argumentsRegister().offset() + inlineCallFrame->stackOffset); } - static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin) + VirtualRegister baselineArgumentsRegisterFor(const CodeOrigin& codeOrigin) { - return argumentsStart(codeOrigin.inlineCallFrame); + return baselineArgumentsRegisterFor(codeOrigin.inlineCallFrame); } - void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch) + SymbolTable* symbolTableFor(const CodeOrigin& codeOrigin) { -#if USE(JSVALUE64) - load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest); - loadPtr(vm()->heap.structureIDTable().base(), scratch); - loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest); -#else - UNUSED_PARAM(scratch); - loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest); -#endif - } - - static void emitLoadStructure(AssemblyHelpers& jit, RegisterID base, RegisterID dest, RegisterID scratch) - { -#if USE(JSVALUE64) - jit.load32(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest); - jit.loadPtr(jit.vm()->heap.structureIDTable().base(), scratch); - jit.loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest); -#else - UNUSED_PARAM(scratch); - jit.loadPtr(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest); -#endif + return baselineCodeBlockFor(codeOrigin)->symbolTable(); } - void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID) + int offsetOfLocals(const CodeOrigin& codeOrigin) { - emitStoreStructureWithTypeInfo(*this, structure, dest); + if (!codeOrigin.inlineCallFrame) + return 0; + return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register); } - void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch) + int offsetOfArgumentsIncludingThis(InlineCallFrame* inlineCallFrame) { -#if USE(JSVALUE64) - load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch); - store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset())); -#else - // Store all the info flags using a single 32-bit wide load and store. - load32(MacroAssembler::Address(structure, Structure::indexingTypeOffset()), scratch); - store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeOffset())); - - // Store the StructureID - storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset())); -#endif + if (!inlineCallFrame) + return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register); + if (inlineCallFrame->arguments.size() <= 1) + return 0; + ValueRecovery recovery = inlineCallFrame->arguments[1]; + RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack); + return (recovery.virtualRegister().offset() - 1) * sizeof(Register); } - - static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest); - - Jump jumpIfIsRememberedOrInEden(GPRReg cell) + + int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin) { - return branchTest8(MacroAssembler::NonZero, MacroAssembler::Address(cell, JSCell::gcDataOffset())); + return offsetOfArgumentsIncludingThis(codeOrigin.inlineCallFrame); } - Jump jumpIfIsRememberedOrInEden(JSCell* cell) + void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind) { - uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::gcDataOffset(); - return branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(address)); - } - - // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The - // functor is called at those points where we have pinpointed a type. One way to use this is to - // have the functor emit the code to put the type string into an appropriate register and then - // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow - // case. It is passed the unlinked jump to the slow case. - template<typename Functor, typename SlowPathFunctor> - void emitTypeOf( - JSValueRegs regs, GPRReg tempGPR, const Functor& functor, - const SlowPathFunctor& slowPathFunctor) - { - // Implements the following branching structure: - // - // if (is cell) { - // if (is object) { - // if (is function) { - // return function; - // } else if (doesn't have call trap and doesn't masquerade as undefined) { - // return object - // } else { - // return slowPath(); - // } - // } else if (is string) { - // return string - // } else { - // return symbol - // } - // } else if (is number) { - // return number - // } else if (is null) { - // return object - // } else if (is boolean) { - // return boolean - // } else { - // return undefined - // } - - Jump notCell = branchIfNotCell(regs); - - GPRReg cellGPR = regs.payloadGPR(); - Jump notObject = branchIfNotObject(cellGPR); - - Jump notFunction = branchIfNotFunction(cellGPR); - functor(TypeofType::Function, false); + UNUSED_PARAM(owner); + UNUSED_PARAM(scratch1); + UNUSED_PARAM(scratch2); + UNUSED_PARAM(useKind); + ASSERT(owner != scratch1); + ASSERT(owner != scratch2); + ASSERT(scratch1 != scratch2); - notFunction.link(this); - slowPathFunctor( - branchTest8( - NonZero, - Address(cellGPR, JSCell::typeInfoFlagsOffset()), - TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData))); - functor(TypeofType::Object, false); - - notObject.link(this); - - Jump notString = branchIfNotString(cellGPR); - functor(TypeofType::String, false); - notString.link(this); - functor(TypeofType::Symbol, false); - - notCell.link(this); - - Jump notNumber = branchIfNotNumber(regs, tempGPR); - functor(TypeofType::Number, false); - notNumber.link(this); - - JumpList notNull = branchIfNotEqual(regs, jsNull()); - functor(TypeofType::Object, false); - notNull.link(this); - - Jump notBoolean = branchIfNotBoolean(regs, tempGPR); - functor(TypeofType::Boolean, false); - notBoolean.link(this); - - functor(TypeofType::Undefined, true); +#if ENABLE(WRITE_BARRIER_PROFILING) + emitCount(WriteBarrierCounters::jitCounterFor(useKind)); +#endif } Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*); diff --git a/Source/JavaScriptCore/jit/BinarySwitch.cpp b/Source/JavaScriptCore/jit/BinarySwitch.cpp deleted file mode 100644 index 866b2788f..000000000 --- a/Source/JavaScriptCore/jit/BinarySwitch.cpp +++ /dev/null @@ -1,329 +0,0 @@ -/* - * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "BinarySwitch.h" - -#if ENABLE(JIT) - -#include "JSCInlines.h" - -namespace JSC { - -static unsigned globalCounter; // We use a different seed every time we are invoked. - -BinarySwitch::BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type type) - : m_value(value) - , m_weakRandom(globalCounter++) - , m_index(0) - , m_caseIndex(UINT_MAX) - , m_type(type) -{ - if (cases.isEmpty()) - return; - - for (unsigned i = 0; i < cases.size(); ++i) - m_cases.append(Case(cases[i], i)); - - std::sort(m_cases.begin(), m_cases.end()); - - for (unsigned i = 1; i < m_cases.size(); ++i) - RELEASE_ASSERT(m_cases[i - 1] < m_cases[i]); - - build(0, false, m_cases.size()); -} - -BinarySwitch::~BinarySwitch() -{ -} - -bool BinarySwitch::advance(MacroAssembler& jit) -{ - if (m_cases.isEmpty()) { - m_fallThrough.append(jit.jump()); - return false; - } - - if (m_index == m_branches.size()) { - RELEASE_ASSERT(m_jumpStack.isEmpty()); - return false; - } - - for (;;) { - const BranchCode& code = m_branches[m_index++]; - switch (code.kind) { - case NotEqualToFallThrough: - switch (m_type) { - case Int32: - m_fallThrough.append(jit.branch32( - MacroAssembler::NotEqual, m_value, - MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value)))); - break; - case IntPtr: - m_fallThrough.append(jit.branchPtr( - MacroAssembler::NotEqual, m_value, - MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value))))); - break; - } - break; - case NotEqualToPush: - switch (m_type) { - case Int32: - m_jumpStack.append(jit.branch32( - MacroAssembler::NotEqual, m_value, - MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value)))); - break; - case IntPtr: - m_jumpStack.append(jit.branchPtr( - MacroAssembler::NotEqual, m_value, - MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value))))); - break; - } - break; - case LessThanToPush: - switch (m_type) { - case Int32: - m_jumpStack.append(jit.branch32( - MacroAssembler::LessThan, m_value, - MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value)))); - break; - case IntPtr: - m_jumpStack.append(jit.branchPtr( - MacroAssembler::LessThan, m_value, - MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value))))); - break; - } - break; - case Pop: - m_jumpStack.takeLast().link(&jit); - break; - case ExecuteCase: - m_caseIndex = code.index; - return true; - } - } -} - -void BinarySwitch::build(unsigned start, bool hardStart, unsigned end) -{ - unsigned size = end - start; - - RELEASE_ASSERT(size); - - // This code uses some random numbers to keep things balanced. It's important to keep in mind - // that this does not improve average-case throughput under the assumption that all cases fire - // with equal probability. It just ensures that there will not be some switch structure that - // when combined with some input will always produce pathologically good or pathologically bad - // performance. - - const unsigned leafThreshold = 3; - - if (size <= leafThreshold) { - // It turns out that for exactly three cases or less, it's better to just compare each - // case individually. This saves 1/6 of a branch on average, and up to 1/3 of a branch in - // extreme cases where the divide-and-conquer bottoms out in a lot of 3-case subswitches. - // - // This assumes that we care about the cost of hitting some case more than we care about - // bottoming out in a default case. I believe that in most places where we use switch - // statements, we are more likely to hit one of the cases than we are to fall through to - // default. Intuitively, if we wanted to improve the performance of default, we would - // reduce the value of leafThreshold to 2 or even to 1. See below for a deeper discussion. - - bool allConsecutive = false; - - if ((hardStart || (start && m_cases[start - 1].value == m_cases[start].value - 1)) - && start + size < m_cases.size() - && m_cases[start + size - 1].value == m_cases[start + size].value - 1) { - allConsecutive = true; - for (unsigned i = 0; i < size - 1; ++i) { - if (m_cases[i].value + 1 != m_cases[i + 1].value) { - allConsecutive = false; - break; - } - } - } - - Vector<unsigned, 3> localCaseIndices; - for (unsigned i = 0; i < size; ++i) - localCaseIndices.append(start + i); - - std::random_shuffle( - localCaseIndices.begin(), localCaseIndices.end(), - [this] (unsigned n) { - // We use modulo to get a random number in the range we want fully knowing that - // this introduces a tiny amount of bias, but we're fine with such tiny bias. - return m_weakRandom.getUint32() % n; - }); - - for (unsigned i = 0; i < size - 1; ++i) { - m_branches.append(BranchCode(NotEqualToPush, localCaseIndices[i])); - m_branches.append(BranchCode(ExecuteCase, localCaseIndices[i])); - m_branches.append(BranchCode(Pop)); - } - - if (!allConsecutive) - m_branches.append(BranchCode(NotEqualToFallThrough, localCaseIndices.last())); - - m_branches.append(BranchCode(ExecuteCase, localCaseIndices.last())); - return; - } - - // There are two different strategies we could consider here: - // - // Isolate median and split: pick a median and check if the comparison value is equal to it; - // if so, execute the median case. Otherwise check if the value is less than the median, and - // recurse left or right based on this. This has two subvariants: we could either first test - // equality for the median and then do the less-than, or we could first do the less-than and - // then check equality on the not-less-than path. - // - // Ignore median and split: do a less-than comparison on a value that splits the cases in two - // equal-sized halves. Recurse left or right based on the comparison. Do not test for equality - // against the median (or anything else); let the recursion handle those equality comparisons - // once we bottom out in a list that case 3 cases or less (see above). - // - // I'll refer to these strategies as Isolate and Ignore. I initially believed that Isolate - // would be faster since it leads to less branching for some lucky cases. It turns out that - // Isolate is almost a total fail in the average, assuming all cases are equally likely. How - // bad Isolate is depends on whether you believe that doing two consecutive branches based on - // the same comparison is cheaper than doing the compare/branches separately. This is - // difficult to evaluate. For small immediates that aren't blinded, we just care about - // avoiding a second compare instruction. For large immediates or when blinding is in play, we - // also care about the instructions used to materialize the immediate a second time. Isolate - // can help with both costs since it involves first doing a < compare+branch on some value, - // followed by a == compare+branch on the same exact value (or vice-versa). Ignore will do a < - // compare+branch on some value, and then the == compare+branch on that same value will happen - // much later. - // - // To evaluate these costs, I wrote the recurrence relation for Isolate and Ignore, assuming - // that ComparisonCost is the cost of a compare+branch and ChainedComparisonCost is the cost - // of a compare+branch on some value that you've just done another compare+branch for. These - // recurrence relations compute the total cost incurred if you executed the switch statement - // on each matching value. So the average cost of hitting some case can be computed as - // Isolate[n]/n or Ignore[n]/n, respectively for the two relations. - // - // Isolate[1] = ComparisonCost - // Isolate[2] = (2 + 1) * ComparisonCost - // Isolate[3] = (3 + 2 + 1) * ComparisonCost - // Isolate[n_] := With[ - // {medianIndex = Floor[n/2] + If[EvenQ[n], RandomInteger[], 1]}, - // ComparisonCost + ChainedComparisonCost + - // (ComparisonCost * (medianIndex - 1) + Isolate[medianIndex - 1]) + - // (2 * ComparisonCost * (n - medianIndex) + Isolate[n - medianIndex])] - // - // Ignore[1] = ComparisonCost - // Ignore[2] = (2 + 1) * ComparisonCost - // Ignore[3] = (3 + 2 + 1) * ComparisonCost - // Ignore[n_] := With[ - // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]}, - // (medianIndex * ComparisonCost + Ignore[medianIndex]) + - // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])] - // - // This does not account for the average cost of hitting the default case. See further below - // for a discussion of that. - // - // It turns out that for ComparisonCost = 1 and ChainedComparisonCost = 1, Ignore is always - // better than Isolate. If we assume that ChainedComparisonCost = 0, then Isolate wins for - // switch statements that have 20 cases or fewer, though the margin of victory is never large - // - it might sometimes save an average of 0.3 ComparisonCost. For larger switch statements, - // we see divergence between the two with Ignore winning. This is of course rather - // unrealistic since the chained comparison is never free. For ChainedComparisonCost = 0.5, we - // see Isolate winning for 10 cases or fewer, by maybe 0.2 ComparisonCost. Again we see - // divergence for large switches with Ignore winning, for example if a switch statement has - // 100 cases then Ignore saves one branch on average. - // - // Our current JIT backends don't provide for optimization for chained comparisons, except for - // reducing the code for materializing the immediate if the immediates are large or blinding - // comes into play. Probably our JIT backends live somewhere north of - // ChainedComparisonCost = 0.5. - // - // This implies that using the Ignore strategy is likely better. If we wanted to incorporate - // the Isolate strategy, we'd want to determine the switch size threshold at which the two - // cross over and then use Isolate for switches that are smaller than that size. - // - // The average cost of hitting the default case is similar, but involves a different cost for - // the base cases: you have to assume that you will always fail each branch. For the Ignore - // strategy we would get this recurrence relation; the same kind of thing happens to the - // Isolate strategy: - // - // Ignore[1] = ComparisonCost - // Ignore[2] = (2 + 2) * ComparisonCost - // Ignore[3] = (3 + 3 + 3) * ComparisonCost - // Ignore[n_] := With[ - // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]}, - // (medianIndex * ComparisonCost + Ignore[medianIndex]) + - // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])] - // - // This means that if we cared about the default case more, we would likely reduce - // leafThreshold. Reducing it to 2 would reduce the average cost of the default case by 1/3 - // in the most extreme cases (num switch cases = 3, 6, 12, 24, ...). But it would also - // increase the average cost of taking one of the non-default cases by 1/3. Typically the - // difference is 1/6 in either direction. This makes it a very simple trade-off: if we believe - // that the default case is more important then we would want leafThreshold to be 2, and the - // default case would become 1/6 faster on average. But we believe that most switch statements - // are more likely to take one of the cases than the default, so we use leafThreshold = 3 - // and get a 1/6 speed-up on average for taking an explicit case. - - unsigned medianIndex = (start + end) / 2; - - // We want medianIndex to point to the thing we will do a less-than compare against. We want - // this less-than compare to split the current sublist into equal-sized sublists, or - // nearly-equal-sized with some randomness if we're in the odd case. With the above - // calculation, in the odd case we will have medianIndex pointing at either the element we - // want or the element to the left of the one we want. Consider the case of five elements: - // - // 0 1 2 3 4 - // - // start will be 0, end will be 5. The average is 2.5, which rounds down to 2. If we do - // value < 2, then we will split the list into 2 elements on the left and three on the right. - // That's pretty good, but in this odd case we'd like to at random choose 3 instead to ensure - // that we don't become unbalanced on the right. This does not improve throughput since one - // side will always get shafted, and that side might still be odd, in which case it will also - // have two sides and one of them will get shafted - and so on. We just want to avoid - // deterministic pathologies. - // - // In the even case, we will always end up pointing at the element we want: - // - // 0 1 2 3 - // - // start will be 0, end will be 4. So, the average is 2, which is what we'd like. - if (size & 1) { - RELEASE_ASSERT(medianIndex - start + 1 == end - medianIndex); - medianIndex += m_weakRandom.getUint32() & 1; - } else - RELEASE_ASSERT(medianIndex - start == end - medianIndex); - - RELEASE_ASSERT(medianIndex > start); - RELEASE_ASSERT(medianIndex + 1 < end); - - m_branches.append(BranchCode(LessThanToPush, medianIndex)); - build(medianIndex, true, end); - m_branches.append(BranchCode(Pop)); - build(start, hardStart, medianIndex); -} - -} // namespace JSC - -#endif // ENABLE(JIT) - diff --git a/Source/JavaScriptCore/jit/BinarySwitch.h b/Source/JavaScriptCore/jit/BinarySwitch.h deleted file mode 100644 index 56927939c..000000000 --- a/Source/JavaScriptCore/jit/BinarySwitch.h +++ /dev/null @@ -1,143 +0,0 @@ -/* - * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef BinarySwitch_h -#define BinarySwitch_h - -#if ENABLE(JIT) - -#include "GPRInfo.h" -#include "MacroAssembler.h" -#include "WeakRandom.h" - -namespace JSC { - -// The BinarySwitch class makes it easy to emit a switch statement over either -// 32-bit integers or pointers, where the switch uses a tree of branches -// rather than a jump table. This makes it particularly useful if the case -// values are too far apart to make a jump table practical, or if there are -// sufficiently few cases that the total cost of log(numCases) branches is -// less than the cost of an indirected jump. -// -// In an effort to simplify the logic of emitting code for each case, this -// uses an iterator style, rather than a functor callback style. This makes -// sense because even the iterator implementation found herein is relatively -// simple, whereas the code it's used from is usually quite complex - one -// example being the trie-of-trees string switch implementation, where the -// code emitted for each case involves recursing to emit code for a sub-trie. -// -// Use this like so: -// -// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32); -// while (switch.advance(jit)) { -// int value = switch.caseValue(); -// unsigned index = switch.caseIndex(); // index into casesVector, above -// ... // generate code for this case -// ... = jit.jump(); // you have to jump out yourself; falling through causes undefined behavior -// } -// switch.fallThrough().link(&jit); - -class BinarySwitch { -public: - enum Type { - Int32, - IntPtr - }; - - BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type); - ~BinarySwitch(); - - unsigned caseIndex() const { return m_cases[m_caseIndex].index; } - int64_t caseValue() const { return m_cases[m_caseIndex].value; } - - bool advance(MacroAssembler&); - - MacroAssembler::JumpList& fallThrough() { return m_fallThrough; } - -private: - void build(unsigned start, bool hardStart, unsigned end); - - GPRReg m_value; - - struct Case { - Case() { } - - Case(int64_t value, unsigned index) - : value(value) - , index(index) - { - } - - bool operator<(const Case& other) const - { - return value < other.value; - } - - int64_t value; - unsigned index; - }; - - Vector<Case> m_cases; - - enum BranchKind { - NotEqualToFallThrough, - NotEqualToPush, - LessThanToPush, - Pop, - ExecuteCase - }; - - struct BranchCode { - BranchCode() { } - - BranchCode(BranchKind kind, unsigned index = UINT_MAX) - : kind(kind) - , index(index) - { - } - - BranchKind kind; - unsigned index; - }; - - WeakRandom m_weakRandom; - - Vector<BranchCode> m_branches; - - unsigned m_index; - unsigned m_caseIndex; - Vector<MacroAssembler::Jump> m_jumpStack; - - MacroAssembler::JumpList m_fallThrough; - - Type m_type; -}; - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // BinarySwitch_h - diff --git a/Source/JavaScriptCore/jit/CCallHelpers.h b/Source/JavaScriptCore/jit/CCallHelpers.h index 5fa6b5362..afcccd1ca 100644 --- a/Source/JavaScriptCore/jit/CCallHelpers.h +++ b/Source/JavaScriptCore/jit/CCallHelpers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2011 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,8 @@ #ifndef CCallHelpers_h #define CCallHelpers_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "AssemblyHelpers.h" @@ -33,34 +35,12 @@ namespace JSC { -#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64)) -#define POKE_ARGUMENT_OFFSET 4 -#else -#define POKE_ARGUMENT_OFFSET 0 -#endif - class CCallHelpers : public AssemblyHelpers { public: CCallHelpers(VM* vm, CodeBlock* codeBlock = 0) : AssemblyHelpers(vm, codeBlock) { } - - // The most general helper for setting arguments that fit in a GPR, if you can compute each - // argument without using any argument registers. You usually want one of the setupArguments*() - // methods below instead of this. This thing is most useful if you have *a lot* of arguments. - template<typename Functor> - void setupArgument(unsigned argumentIndex, const Functor& functor) - { - unsigned numberOfRegs = GPRInfo::numberOfArgumentRegisters; // Disguise the constant from clang's tautological compare warning. - if (argumentIndex < numberOfRegs) { - functor(GPRInfo::toArgumentRegister(argumentIndex)); - return; - } - - functor(GPRInfo::nonArgGPR0); - poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET + argumentIndex - GPRInfo::numberOfArgumentRegisters); - } // These methods used to sort arguments into the correct registers. // On X86 we use cdecl calling conventions, which pass all arguments on the @@ -115,13 +95,6 @@ public: addCallArgument(arg2); } - ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2) - { - resetCallArguments(); - addCallArgument(arg1); - addCallArgument(arg2); - } - ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3) { resetCallArguments(); @@ -302,15 +275,6 @@ public: addCallArgument(arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3) { resetCallArguments(); @@ -320,58 +284,6 @@ public: addCallArgument(arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - addCallArgument(arg6); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3) { resetCallArguments(); @@ -435,37 +347,6 @@ public: addCallArgument(arg3); addCallArgument(arg4); } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3, TrustedImmPtr arg4) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - } ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) { @@ -478,27 +359,6 @@ public: addCallArgument(arg5); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) { resetCallArguments(); @@ -519,17 +379,6 @@ public: addCallArgument(arg4); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3) { resetCallArguments(); @@ -589,17 +438,6 @@ public: addCallArgument(arg4); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5) { resetCallArguments(); @@ -719,33 +557,8 @@ public: addCallArgument(arg4); addCallArgument(arg5); } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - addCallArgument(arg6); - } - - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) - { - resetCallArguments(); - addCallArgument(GPRInfo::callFrameRegister); - addCallArgument(arg1); - addCallArgument(arg2); - addCallArgument(arg3); - addCallArgument(arg4); - addCallArgument(arg5); - addCallArgument(arg6); - } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) { resetCallArguments(); addCallArgument(GPRInfo::callFrameRegister); @@ -755,7 +568,6 @@ public: addCallArgument(arg4); addCallArgument(arg5); addCallArgument(arg6); - addCallArgument(arg7); } ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) @@ -918,6 +730,12 @@ public: setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3); } +#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64)) +#define POKE_ARGUMENT_OFFSET 4 +#else +#define POKE_ARGUMENT_OFFSET 0 +#endif + #if CPU(X86_64) || CPU(ARM64) ALWAYS_INLINE void setupArguments(FPRReg arg1) { @@ -931,27 +749,14 @@ public: ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) { -#if OS(WINDOWS) && CPU(X86_64) - // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments. - // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx - moveDouble(arg1, FPRInfo::argumentFPR1); - move(arg2, GPRInfo::argumentGPR2); -#else moveDouble(arg1, FPRInfo::argumentFPR0); move(arg2, GPRInfo::argumentGPR1); -#endif move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) { -#if OS(WINDOWS) && CPU(X86_64) - // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments. - // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx - moveDouble(arg3, FPRInfo::argumentFPR3); -#else moveDouble(arg3, FPRInfo::argumentFPR0); -#endif setupStubArguments(arg1, arg2); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } @@ -1134,12 +939,6 @@ public: move(arg1, GPRInfo::argumentGPR0); } - ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2) - { - move(arg2, GPRInfo::argumentGPR1); - move(arg1, GPRInfo::argumentGPR0); - } - ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2) { setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2); @@ -1157,12 +956,6 @@ public: move(arg4, GPRInfo::argumentGPR3); } - ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) - { - setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2, arg3); - move(arg4, GPRInfo::argumentGPR3); - } - ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImmPtr arg4) { setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR2>(arg1, arg3); @@ -1207,14 +1000,6 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } -#if OS(WINDOWS) && CPU(X86_64) - ALWAYS_INLINE void setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32 arg1) - { - move(arg1, GPRInfo::argumentGPR2); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); - } -#endif - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2) { setupStubArguments(arg1, arg2); @@ -1348,14 +1133,6 @@ public: move(arg3, GPRInfo::argumentGPR3); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3) - { - move(arg1, GPRInfo::argumentGPR1); - move(arg2, GPRInfo::argumentGPR2); - move(arg3, GPRInfo::argumentGPR3); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3) { @@ -1380,14 +1157,6 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3) - { - move(arg3, GPRInfo::argumentGPR3); - move(arg1, GPRInfo::argumentGPR1); - move(arg2, GPRInfo::argumentGPR2); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3) { move(arg3, GPRInfo::argumentGPR3); @@ -1396,14 +1165,6 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3) - { - move(arg3, GPRInfo::argumentGPR3); - move(arg1, GPRInfo::argumentGPR1); - move(arg2, GPRInfo::argumentGPR2); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3) { move(arg2, GPRInfo::argumentGPR2); @@ -1483,20 +1244,6 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) { poke(arg4, POKE_ARGUMENT_OFFSET); @@ -1521,55 +1268,12 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) - { - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - -#if CPU(X86_64) - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4) - { - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } -#endif - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) { poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) - { - poke(arg6, POKE_ARGUMENT_OFFSET + 2); - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); @@ -1652,18 +1356,6 @@ public: poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) - { - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) - { - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) { @@ -1693,33 +1385,6 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) - { - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); @@ -1727,13 +1392,6 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); @@ -1756,14 +1414,6 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) - { - poke(arg6, POKE_ARGUMENT_OFFSET + 2); - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) { poke(arg6, POKE_ARGUMENT_OFFSET + 2); @@ -1772,15 +1422,6 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7) - { - poke(arg7, POKE_ARGUMENT_OFFSET + 3); - poke(arg6, POKE_ARGUMENT_OFFSET + 2); - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, GPRReg arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); @@ -1828,22 +1469,6 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6) - { - poke(arg6, POKE_ARGUMENT_OFFSET + 2); - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6) - { - poke(arg6, POKE_ARGUMENT_OFFSET + 2); - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7) { poke(arg7, POKE_ARGUMENT_OFFSET + 3); @@ -1862,16 +1487,6 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, TrustedImmPtr arg8) - { - poke(arg8, POKE_ARGUMENT_OFFSET + 4); - poke(arg7, POKE_ARGUMENT_OFFSET + 3); - poke(arg6, POKE_ARGUMENT_OFFSET + 2); - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7) { poke(arg7, POKE_ARGUMENT_OFFSET + 3); @@ -1888,13 +1503,6 @@ public: move(arg3, GPRInfo::argumentGPR2); move(arg4, GPRInfo::argumentGPR3); } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) - { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); - poke(arg4, POKE_ARGUMENT_OFFSET); - setupArgumentsWithExecState(arg1, arg2, arg3); - } #endif // NUMBER_OF_ARGUMENT_REGISTERS == 4 #if NUMBER_OF_ARGUMENT_REGISTERS >= 5 @@ -1903,13 +1511,6 @@ public: setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4>(arg1, arg3, arg4); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) - { - setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3); - move(arg4, GPRInfo::argumentGPR4); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4) { setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4>(arg1, arg4); @@ -1932,76 +1533,6 @@ public: move(arg4, GPRInfo::argumentGPR4); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) - { - move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. - move(arg1, GPRInfo::argumentGPR1); - move(arg3, GPRInfo::argumentGPR3); - move(arg4, GPRInfo::argumentGPR4); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4) - { - move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. - move(arg1, GPRInfo::argumentGPR1); - move(arg3, GPRInfo::argumentGPR3); - move(arg4, GPRInfo::argumentGPR4); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. - move(arg1, GPRInfo::argumentGPR1); - move(arg3, GPRInfo::argumentGPR3); - move(arg4, GPRInfo::argumentGPR4); - move(arg5, GPRInfo::argumentGPR5); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - move(arg3, GPRInfo::argumentGPR3); - move(arg1, GPRInfo::argumentGPR1); - move(arg2, GPRInfo::argumentGPR2); - move(arg4, GPRInfo::argumentGPR4); - move(arg5, GPRInfo::argumentGPR5); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) - { - setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg2, arg4); - move(arg1, GPRInfo::argumentGPR1); - move(arg3, GPRInfo::argumentGPR3); - move(arg5, GPRInfo::argumentGPR5); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) - { - setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3); - move(arg1, GPRInfo::argumentGPR1); - move(arg4, GPRInfo::argumentGPR4); - move(arg5, GPRInfo::argumentGPR5); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) - { - setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3); - move(arg4, GPRInfo::argumentGPR4); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) - { - setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg1, arg2, arg4); - move(arg3, GPRInfo::argumentGPR3); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) { @@ -2017,15 +1548,6 @@ public: move(arg4, GPRInfo::argumentGPR3); } #endif - - void setupArguments(JSValueRegs arg1) - { -#if USE(JSVALUE64) - setupArguments(arg1.gpr()); -#else - setupArguments(arg1.payloadGPR(), arg1.tagGPR()); -#endif - } void setupResults(GPRReg destA, GPRReg destB) { @@ -2048,19 +1570,9 @@ public: swap(destA, destB); } - void setupResults(JSValueRegs regs) - { -#if USE(JSVALUE64) - move(GPRInfo::returnValueGPR, regs.gpr()); -#else - setupResults(regs.payloadGPR(), regs.tagGPR()); -#endif - } - void jumpToExceptionHandler() { // genericUnwind() leaves the handler CallFrame* in vm->callFrameForThrow, - // the topVMEntryFrame for the handler in vm->vmEntryFrameForThrow, // and the address of the handler in vm->targetMachinePCForThrow. loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1); jump(GPRInfo::regT1); diff --git a/Source/JavaScriptCore/jit/AccessorCallJITStubRoutine.cpp b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp index f57306113..1588f7fea 100644 --- a/Source/JavaScriptCore/jit/AccessorCallJITStubRoutine.cpp +++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,29 +24,37 @@ */ #include "config.h" -#include "AccessorCallJITStubRoutine.h" +#include "ClosureCallStubRoutine.h" #if ENABLE(JIT) -#include "CallLinkInfo.h" +#include "Executable.h" +#include "Heap.h" +#include "VM.h" +#include "Operations.h" +#include "SlotVisitor.h" +#include "Structure.h" namespace JSC { -AccessorCallJITStubRoutine::AccessorCallJITStubRoutine( - const MacroAssemblerCodeRef& code, VM& vm, std::unique_ptr<CallLinkInfo> info) - : GCAwareJITStubRoutine(code, vm) - , m_callLinkInfo(WTF::move(info)) +ClosureCallStubRoutine::ClosureCallStubRoutine( + const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, + Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin) + : GCAwareJITStubRoutine(code, vm, true) + , m_structure(vm, owner, structure) + , m_executable(vm, owner, executable) + , m_codeOrigin(codeOrigin) { } -AccessorCallJITStubRoutine::~AccessorCallJITStubRoutine() +ClosureCallStubRoutine::~ClosureCallStubRoutine() { } -bool AccessorCallJITStubRoutine::visitWeak(RepatchBuffer& repatchBuffer) +void ClosureCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor) { - m_callLinkInfo->visitWeak(repatchBuffer); - return true; + visitor.append(&m_structure); + visitor.append(&m_executable); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/AccessorCallJITStubRoutine.h b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h index 0d1074773..ad61ed514 100644 --- a/Source/JavaScriptCore/jit/AccessorCallJITStubRoutine.h +++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,35 +23,44 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AccessorCallJITStubRoutine_h -#define AccessorCallJITStubRoutine_h +#ifndef ClosureCallStubRoutine_h +#define ClosureCallStubRoutine_h + +#include <wtf/Platform.h> #if ENABLE(JIT) +#include "CodeOrigin.h" #include "GCAwareJITStubRoutine.h" namespace JSC { -class CallLinkInfo; - -// JIT stub routine for use by JavaScript accessors. These will be making a JS -// call that requires inline caching. - -class AccessorCallJITStubRoutine : public GCAwareJITStubRoutine { +class ClosureCallStubRoutine : public GCAwareJITStubRoutine { public: - AccessorCallJITStubRoutine( - const MacroAssemblerCodeRef&, VM&, std::unique_ptr<CallLinkInfo>); + ClosureCallStubRoutine( + const MacroAssemblerCodeRef&, VM&, const JSCell* owner, + Structure*, ExecutableBase*, const CodeOrigin&); - virtual ~AccessorCallJITStubRoutine(); + virtual ~ClosureCallStubRoutine(); - virtual bool visitWeak(RepatchBuffer&) override; - - std::unique_ptr<CallLinkInfo> m_callLinkInfo; + Structure* structure() const { return m_structure.get(); } + ExecutableBase* executable() const { return m_executable.get(); } + const CodeOrigin& codeOrigin() const { return m_codeOrigin; } + +protected: + virtual void markRequiredObjectsInternal(SlotVisitor&) override; + +private: + WriteBarrier<Structure> m_structure; + WriteBarrier<ExecutableBase> m_executable; + // This allows us to figure out who a call is linked to by searching through + // stub routines. + CodeOrigin m_codeOrigin; }; } // namespace JSC #endif // ENABLE(JIT) -#endif // AccessorCallJITStubRoutine_h +#endif // ClosureCallStubRoutine_h diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h index d5eaa4072..b09f2f6cd 100644 --- a/Source/JavaScriptCore/jit/CompactJITCodeMap.h +++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -32,6 +32,8 @@ #include <wtf/Assertions.h> #include <wtf/FastMalloc.h> #include <wtf/FastMalloc.h> +#include <wtf/OwnPtr.h> +#include <wtf/PassOwnPtr.h> #include <wtf/Vector.h> namespace JSC { @@ -45,7 +47,7 @@ namespace JSC { // CompactJITCodeMap::Encoder encoder(map); // encoder.append(a, b); // encoder.append(c, d); // preconditions: c >= a, d >= b -// auto map = encoder.finish(); +// OwnPtr<CompactJITCodeMap> map = encoder.finish(); // // At some later time: // @@ -78,16 +80,6 @@ struct BytecodeAndMachineOffset { class CompactJITCodeMap { WTF_MAKE_FAST_ALLOCATED; public: - CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries) - : m_buffer(buffer) -#if !ASSERT_DISABLED - , m_size(size) -#endif - , m_numberOfEntries(numberOfEntries) - { - UNUSED_PARAM(size); - } - ~CompactJITCodeMap() { if (m_buffer) @@ -102,6 +94,16 @@ public: void decode(Vector<BytecodeAndMachineOffset>& result) const; private: + CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries) + : m_buffer(buffer) +#if !ASSERT_DISABLED + , m_size(size) +#endif + , m_numberOfEntries(numberOfEntries) + { + UNUSED_PARAM(size); + } + uint8_t at(unsigned index) const { ASSERT(index < m_size); @@ -136,8 +138,8 @@ public: void ensureCapacityFor(unsigned numberOfEntriesToAdd); void append(unsigned bytecodeIndex, unsigned machineCodeOffset); - std::unique_ptr<CompactJITCodeMap> finish(); - + PassOwnPtr<CompactJITCodeMap> finish(); + private: void appendByte(uint8_t value); void encodeNumber(uint32_t value); @@ -210,18 +212,18 @@ inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned m_numberOfEntries++; } -inline std::unique_ptr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish() +inline PassOwnPtr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish() { m_capacity = m_size; m_buffer = static_cast<uint8_t*>(fastRealloc(m_buffer, m_capacity)); - auto result = std::make_unique<CompactJITCodeMap>(m_buffer, m_size, m_numberOfEntries); + OwnPtr<CompactJITCodeMap> result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries)); m_buffer = 0; m_size = 0; m_capacity = 0; m_numberOfEntries = 0; m_previousBytecodeIndex = 0; m_previousMachineCodeOffset = 0; - return result; + return result.release(); } inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp deleted file mode 100644 index 9cee5d9b1..000000000 --- a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp +++ /dev/null @@ -1,73 +0,0 @@ -/* - * Copyright (C) 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "ExecutableAllocationFuzz.h" - -#include "TestRunnerUtils.h" -#include <wtf/Atomics.h> -#include <wtf/DataLog.h> - -namespace JSC { - -static Atomic<unsigned> s_numberOfExecutableAllocationFuzzChecks; -unsigned numberOfExecutableAllocationFuzzChecks() -{ - return s_numberOfExecutableAllocationFuzzChecks.load(); -} - -ExecutableAllocationFuzzResult doExecutableAllocationFuzzing() -{ - ASSERT(Options::enableExecutableAllocationFuzz()); - - unsigned oldValue; - unsigned newValue; - do { - oldValue = s_numberOfExecutableAllocationFuzzChecks.load(); - newValue = oldValue + 1; - } while (!s_numberOfExecutableAllocationFuzzChecks.compareExchangeWeak(oldValue, newValue)); - - if (newValue == Options::fireExecutableAllocationFuzzAt()) { - if (Options::verboseExecutableAllocationFuzz()) { - dataLog("Will pretend to fail executable allocation.\n"); - WTFReportBacktrace(); - } - return PretendToFailExecutableAllocation; - } - - if (Options::fireExecutableAllocationFuzzAtOrAfter() - && newValue >= Options::fireExecutableAllocationFuzzAtOrAfter()) { - if (Options::verboseExecutableAllocationFuzz()) { - dataLog("Will pretend to fail executable allocation.\n"); - WTFReportBacktrace(); - } - return PretendToFailExecutableAllocation; - } - - return AllowNormalExecutableAllocation; -} - -} // namespace JSC - diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h deleted file mode 100644 index 4997d210c..000000000 --- a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef ExecutableAllocationFuzz_h -#define ExecutableAllocationFuzz_h - -#include "Options.h" - -namespace JSC { - -enum ExecutableAllocationFuzzResult { - AllowNormalExecutableAllocation, - PretendToFailExecutableAllocation -}; - -ExecutableAllocationFuzzResult doExecutableAllocationFuzzing(); - -inline ExecutableAllocationFuzzResult doExecutableAllocationFuzzingIfEnabled() -{ - if (LIKELY(!Options::enableExecutableAllocationFuzz())) - return AllowNormalExecutableAllocation; - - return doExecutableAllocationFuzzing(); -} - -} // namespace JSC - -#endif // ExecutableAllocationFuzz_h - diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp index d2911a319..5ac6cc412 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -24,17 +24,18 @@ */ #include "config.h" -#include "ExecutableAllocator.h" -#include "JSCInlines.h" +#include "ExecutableAllocator.h" #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) #include "CodeProfiling.h" #include <wtf/HashSet.h> -#include <wtf/Lock.h> #include <wtf/MetaAllocator.h> -#include <wtf/NeverDestroyed.h> #include <wtf/PageReservation.h> +#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) +#include <wtf/PassOwnPtr.h> +#endif +#include <wtf/ThreadingPrimitives.h> #include <wtf/VMTags.h> #endif @@ -56,7 +57,7 @@ public: DemandExecutableAllocator() : MetaAllocator(jitAllocationGranule) { - std::lock_guard<StaticLock> lock(allocatorsMutex()); + MutexLocker lock(allocatorsMutex()); allocators().add(this); // Don't preallocate any memory here. } @@ -64,7 +65,7 @@ public: virtual ~DemandExecutableAllocator() { { - std::lock_guard<StaticLock> lock(allocatorsMutex()); + MutexLocker lock(allocatorsMutex()); allocators().remove(this); } for (unsigned i = 0; i < reservations.size(); ++i) @@ -74,7 +75,7 @@ public: static size_t bytesAllocatedByAllAllocators() { size_t total = 0; - std::lock_guard<StaticLock> lock(allocatorsMutex()); + MutexLocker lock(allocatorsMutex()); for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) total += (*allocator)->bytesAllocated(); return total; @@ -83,7 +84,7 @@ public: static size_t bytesCommittedByAllocactors() { size_t total = 0; - std::lock_guard<StaticLock> lock(allocatorsMutex()); + MutexLocker lock(allocatorsMutex()); for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) total += (*allocator)->bytesCommitted(); return total; @@ -92,7 +93,7 @@ public: #if ENABLE(META_ALLOCATOR_PROFILE) static void dumpProfileFromAllAllocators() { - std::lock_guard<StaticLock> lock(allocatorsMutex()); + MutexLocker lock(allocatorsMutex()); for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) (*allocator)->dumpProfile(); } @@ -134,14 +135,12 @@ private: Vector<PageReservation, 16> reservations; static HashSet<DemandExecutableAllocator*>& allocators() { - DEPRECATED_DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ()); + DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ()); return sAllocators; } - - static StaticLock& allocatorsMutex() + static Mutex& allocatorsMutex() { - static StaticLock mutex; - + DEFINE_STATIC_LOCAL(Mutex, mutex, ()); return mutex; } }; @@ -170,7 +169,7 @@ void ExecutableAllocator::initializeAllocator() ExecutableAllocator::ExecutableAllocator(VM&) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - : m_allocator(std::make_unique<DemandExecutableAllocator>()) + : m_allocator(adoptPtr(new DemandExecutableAllocator())) #endif { ASSERT(allocator()); @@ -213,11 +212,11 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) } -RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) +PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) { RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID); RELEASE_ASSERT(result || effort != JITCompilationMustSucceed); - return result; + return result.release(); } size_t ExecutableAllocator::committedByteCount() diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h index 9294f5e44..01be7c1aa 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.h +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -32,6 +32,7 @@ #include <wtf/MetaAllocatorHandle.h> #include <wtf/MetaAllocator.h> #include <wtf/PageAllocation.h> +#include <wtf/PassRefPtr.h> #include <wtf/RefCounted.h> #include <wtf/Vector.h> @@ -54,6 +55,12 @@ #include <unistd.h> #endif +#if OS(WINCE) +// From pkfuncs.h (private header file from the Platform Builder) +#define CACHE_SYNC_ALL 0x07F +extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); +#endif + #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) @@ -67,9 +74,25 @@ namespace JSC { class VM; +void releaseExecutableMemory(VM&); static const unsigned jitAllocationGranule = 32; +inline size_t roundUpAllocationSize(size_t request, size_t granularity) +{ + RELEASE_ASSERT((std::numeric_limits<size_t>::max() - granularity) > request); + + // Round up to next page boundary + size_t size = request + (granularity - 1); + size = size & ~(granularity - 1); + ASSERT(size >= request); + return size; +} + +} + +namespace JSC { + typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle; #if ENABLE(ASSEMBLER) @@ -79,16 +102,13 @@ class DemandExecutableAllocator; #endif #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) -#if CPU(ARM) +#if CPU(ARM) || CPU(ARM64) static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024; -#elif CPU(ARM64) -static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; #elif CPU(X86_64) static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024; #else static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; #endif -static const double executablePoolReservationFraction = 0.25; extern uintptr_t startOfFixedExecutableMemoryPool; #endif @@ -114,7 +134,7 @@ public: static void dumpProfile() { } #endif - RefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort); + PassRefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort); #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) static void makeWritable(void* start, size_t size) @@ -139,7 +159,7 @@ private: static void reprotectRegion(void*, size_t, ProtectionSetting); #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) // We create a MetaAllocator for each JS global object. - std::unique_ptr<DemandExecutableAllocator> m_allocator; + OwnPtr<DemandExecutableAllocator> m_allocator; DemandExecutableAllocator* allocator() { return m_allocator.get(); } #endif #endif diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp index 287668137..8e0b77cfc 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2009 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,18 +24,14 @@ */ #include "config.h" -#include "ExecutableAllocator.h" -#include "JSCInlines.h" +#include "ExecutableAllocator.h" #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) #include "CodeProfiling.h" -#include "ExecutableAllocationFuzz.h" #include <errno.h> -#if !PLATFORM(WIN) #include <unistd.h> -#endif #include <wtf/MetaAllocator.h> #include <wtf/PageReservation.h> #include <wtf/VMTags.h> @@ -48,6 +44,11 @@ #include <stdio.h> #endif +#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 +// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case. +#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1 +#endif + using namespace WTF; namespace JSC { @@ -60,15 +61,12 @@ public: FixedVMPoolExecutableAllocator() : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes { - size_t reservationSize; - if (Options::jitMemoryReservationSize()) - reservationSize = Options::jitMemoryReservationSize(); - else - reservationSize = fixedExecutableMemoryPoolSize; - reservationSize = roundUpToMultipleOf(pageSize(), reservationSize); - m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); +#if !ENABLE(LLINT) + RELEASE_ASSERT(m_reservation); +#endif if (m_reservation) { - ASSERT(m_reservation.size() == reservationSize); + ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize); addFreshFreeSpace(m_reservation.base(), m_reservation.size()); startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base()); @@ -154,49 +152,28 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) MetaAllocator::Statistics statistics = allocator->currentStatistics(); ASSERT(statistics.bytesAllocated <= statistics.bytesReserved); size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; - size_t bytesAvailable = static_cast<size_t>( - statistics.bytesReserved * (1 - executablePoolReservationFraction)); - if (bytesAllocated >= bytesAvailable) - bytesAllocated = bytesAvailable; + if (bytesAllocated >= statistics.bytesReserved) + bytesAllocated = statistics.bytesReserved; double result = 1.0; - size_t divisor = bytesAvailable - bytesAllocated; + size_t divisor = statistics.bytesReserved - bytesAllocated; if (divisor) - result = static_cast<double>(bytesAvailable) / divisor; + result = static_cast<double>(statistics.bytesReserved) / divisor; if (result < 1.0) result = 1.0; return result; } -RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) +PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) { - if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) { - dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n"); - WTFReportBacktrace(); - } - - if (effort == JITCompilationCanFail - && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation) - return nullptr; - - if (effort == JITCompilationCanFail) { - // Don't allow allocations if we are down to reserve. - MetaAllocator::Statistics statistics = allocator->currentStatistics(); - size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes; - size_t bytesAvailable = static_cast<size_t>( - statistics.bytesReserved * (1 - executablePoolReservationFraction)); - if (bytesAllocated > bytesAvailable) - return nullptr; - } - RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID); if (!result) { - if (effort != JITCompilationCanFail) { - dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n"); - CRASH(); - } - return nullptr; + if (effort == JITCompilationCanFail) + return result; + releaseExecutableMemory(vm); + result = allocator->allocate(sizeInBytes, ownerUID); + RELEASE_ASSERT(result); } - return result; + return result.release(); } size_t ExecutableAllocator::committedByteCount() diff --git a/Source/JavaScriptCore/jit/FPRInfo.h b/Source/JavaScriptCore/jit/FPRInfo.h index f06b17c0d..5bb0e16cc 100644 --- a/Source/JavaScriptCore/jit/FPRInfo.h +++ b/Source/JavaScriptCore/jit/FPRInfo.h @@ -242,6 +242,7 @@ public: 16, 17, 18, 19, 20, 21, 22, InvalidIndex }; unsigned result = indexForRegister[reg]; + ASSERT(result != InvalidIndex); return result; } @@ -304,6 +305,7 @@ public: InvalidIndex, InvalidIndex, 5, InvalidIndex, }; unsigned result = indexForRegister[reg]; + ASSERT(result != InvalidIndex); return result; } @@ -369,6 +371,7 @@ public: InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; unsigned result = indexForRegister[reg]; + ASSERT(result != InvalidIndex); return result; } diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp index bfb07a043..f681dd847 100644 --- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp @@ -30,17 +30,18 @@ #include "Heap.h" #include "VM.h" -#include "JSCInlines.h" +#include "Operations.h" #include "SlotVisitor.h" #include "Structure.h" namespace JSC { GCAwareJITStubRoutine::GCAwareJITStubRoutine( - const MacroAssemblerCodeRef& code, VM& vm) + const MacroAssemblerCodeRef& code, VM& vm, bool isClosureCall) : JITStubRoutine(code) , m_mayBeExecuting(false) , m_isJettisoned(false) + , m_isClosureCall(isClosureCall) { vm.heap.m_jitStubRoutines.add(this); } @@ -97,6 +98,19 @@ void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(Slot PassRefPtr<JITStubRoutine> createJITStubRoutine( const MacroAssemblerCodeRef& code, VM& vm, + const JSCell*, + bool makesCalls) +{ + if (!makesCalls) + return adoptRef(new JITStubRoutine(code)); + + return static_pointer_cast<JITStubRoutine>( + adoptRef(new GCAwareJITStubRoutine(code, vm))); +} + +PassRefPtr<JITStubRoutine> createJITStubRoutine( + const MacroAssemblerCodeRef& code, + VM& vm, const JSCell* owner, bool makesCalls, JSCell* object) @@ -104,11 +118,6 @@ PassRefPtr<JITStubRoutine> createJITStubRoutine( if (!makesCalls) return adoptRef(new JITStubRoutine(code)); - if (!object) { - return static_pointer_cast<JITStubRoutine>( - adoptRef(new GCAwareJITStubRoutine(code, vm))); - } - return static_pointer_cast<JITStubRoutine>( adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, vm, owner, object))); } diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h index 0adc63b50..03045c5d1 100644 --- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,8 @@ #ifndef GCAwareJITStubRoutine_h #define GCAwareJITStubRoutine_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "JITStubRoutine.h" @@ -52,7 +54,7 @@ class JITStubRoutineSet; // list which does not get reclaimed all at once). class GCAwareJITStubRoutine : public JITStubRoutine { public: - GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&); + GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&, bool isClosureCall = false); virtual ~GCAwareJITStubRoutine(); void markRequiredObjects(SlotVisitor& visitor) @@ -62,6 +64,8 @@ public: void deleteFromGC(); + bool isClosureCall() const { return m_isClosureCall; } + protected: virtual void observeZeroRefCount() override; @@ -72,6 +76,7 @@ private: bool m_mayBeExecuting; bool m_isJettisoned; + bool m_isClosureCall; }; // Use this if you want to mark one additional object during GC if your stub @@ -109,13 +114,10 @@ private: // way. PassRefPtr<JITStubRoutine> createJITStubRoutine( + const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls); +PassRefPtr<JITStubRoutine> createJITStubRoutine( const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls, - JSCell* = nullptr); - -// Helper for the creation of simple stub routines that need no help from the GC. Note -// that codeBlock gets "executed" more than once. -#define FINALIZE_CODE_FOR_GC_AWARE_STUB(codeBlock, patchBuffer, makesCalls, cell, dataLogFArguments) \ - (createJITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments), *(codeBlock)->vm(), (codeBlock)->ownerExecutable(), (makesCalls), (cell))) + JSCell*); } // namespace JSC diff --git a/Source/JavaScriptCore/jit/GPRInfo.h b/Source/JavaScriptCore/jit/GPRInfo.h index a5e301bd4..393a56b50 100644 --- a/Source/JavaScriptCore/jit/GPRInfo.h +++ b/Source/JavaScriptCore/jit/GPRInfo.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -54,19 +54,12 @@ public: return JSValueRegs(gpr); } - static JSValueRegs withTwoAvailableRegs(GPRReg gpr, GPRReg) - { - return JSValueRegs(gpr); - } - bool operator!() const { return m_gpr == InvalidGPRReg; } GPRReg gpr() const { return m_gpr; } GPRReg tagGPR() const { return InvalidGPRReg; } GPRReg payloadGPR() const { return m_gpr; } - bool uses(GPRReg gpr) const { return m_gpr == gpr; } - private: GPRReg m_gpr; }; @@ -151,11 +144,6 @@ public: { } - static JSValueRegs withTwoAvailableRegs(GPRReg gpr1, GPRReg gpr2) - { - return JSValueRegs(gpr1, gpr2); - } - static JSValueRegs payloadOnly(GPRReg gpr) { return JSValueRegs(InvalidGPRReg, gpr); @@ -181,8 +169,6 @@ public: return tagGPR(); } - bool uses(GPRReg gpr) const { return m_tagGPR == gpr || m_payloadGPR == gpr; } - private: int8_t m_tagGPR; int8_t m_payloadGPR; @@ -335,18 +321,13 @@ public: return registerForIndex[index]; } - static GPRReg toArgumentRegister(unsigned) - { - UNREACHABLE_FOR_PLATFORM(); - return InvalidGPRReg; - } - static unsigned toIndex(GPRReg reg) { ASSERT(reg != InvalidGPRReg); ASSERT(static_cast<int>(reg) < 8); static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4 }; unsigned result = indexForRegister[reg]; + ASSERT(result != InvalidIndex); return result; } @@ -418,8 +399,6 @@ public: static const GPRReg returnValueGPR = X86Registers::eax; // regT0 static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 static const GPRReg nonPreservedNonReturnGPR = X86Registers::esi; - static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; - static const GPRReg patchpointScratchRegister = MacroAssembler::scratchRegister; static GPRReg toRegister(unsigned index) { @@ -513,13 +492,6 @@ public: return registerForIndex[index]; } - static GPRReg toArgumentRegister(unsigned index) - { - ASSERT(index < numberOfArgumentRegisters); - static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 }; - return registerForIndex[index]; - } - static unsigned toIndex(GPRReg reg) { ASSERT(reg != InvalidGPRReg); @@ -531,6 +503,7 @@ public: { 0, 1, 2, 8, 3, 9, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; #endif unsigned result = indexForRegister[reg]; + ASSERT(result != InvalidIndex); return result; } @@ -559,7 +532,6 @@ class GPRInfo { public: typedef GPRReg RegisterType; static const unsigned numberOfRegisters = 16; - static const unsigned numberOfArgumentRegisters = 8; // Note: regT3 is required to be callee-preserved. @@ -574,8 +546,8 @@ public: static const GPRReg regT1 = ARM64Registers::x1; static const GPRReg regT2 = ARM64Registers::x2; static const GPRReg regT3 = ARM64Registers::x23; - static const GPRReg regT4 = ARM64Registers::x5; - static const GPRReg regT5 = ARM64Registers::x24; + static const GPRReg regT4 = ARM64Registers::x24; + static const GPRReg regT5 = ARM64Registers::x5; static const GPRReg regT6 = ARM64Registers::x6; static const GPRReg regT7 = ARM64Registers::x7; static const GPRReg regT8 = ARM64Registers::x8; @@ -590,9 +562,9 @@ public: static const GPRReg argumentGPR0 = ARM64Registers::x0; // regT0 static const GPRReg argumentGPR1 = ARM64Registers::x1; // regT1 static const GPRReg argumentGPR2 = ARM64Registers::x2; // regT2 - static const GPRReg argumentGPR3 = ARM64Registers::x3; - static const GPRReg argumentGPR4 = ARM64Registers::x4; - static const GPRReg argumentGPR5 = ARM64Registers::x5; // regT4 + static const GPRReg argumentGPR3 = ARM64Registers::x3; // regT3 + static const GPRReg argumentGPR4 = ARM64Registers::x4; // regT4 + static const GPRReg argumentGPR5 = ARM64Registers::x5; // regT5 static const GPRReg argumentGPR6 = ARM64Registers::x6; // regT6 static const GPRReg argumentGPR7 = ARM64Registers::x7; // regT7 static const GPRReg nonArgGPR0 = ARM64Registers::x8; // regT8 @@ -601,8 +573,6 @@ public: static const GPRReg returnValueGPR = ARM64Registers::x0; // regT0 static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1 static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2; - static const GPRReg nonPreservedNonArgumentGPR = ARM64Registers::x8; - static const GPRReg patchpointScratchRegister = ARM64Registers::ip0; // GPRReg mapping is direct, the machine regsiter numbers can // be used directly as indices into the GPR RegisterBank. @@ -628,20 +598,12 @@ public: } static unsigned toIndex(GPRReg reg) { - if (reg > regT15) - return InvalidIndex; return (unsigned)reg; } - static GPRReg toArgumentRegister(unsigned index) - { - ASSERT(index < numberOfArgumentRegisters); - return toRegister(index); - } - static const char* debugName(GPRReg reg) { - ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast<unsigned>(reg) != InvalidGPRReg); ASSERT(static_cast<unsigned>(reg) < 32); static const char* nameForRegister[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -711,6 +673,7 @@ public: 6, InvalidIndex, 3, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; unsigned result = indexForRegister[reg]; + ASSERT(result != InvalidIndex); return result; } @@ -782,6 +745,7 @@ public: ASSERT(reg < 14); static const unsigned indexForRegister[14] = { 0, 1, 2, InvalidIndex, 4, 5, 6, 7, 8, 9, 3, InvalidIndex, InvalidIndex, InvalidIndex }; unsigned result = indexForRegister[reg]; + ASSERT(result != InvalidIndex); return result; } diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp index e8d01916b..528fb2bc4 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp @@ -29,7 +29,6 @@ #include "CallFrame.h" #include "JSCJSValueInlines.h" #include "JSObject.h" -#include "JSCInlines.h" #include <wtf/InlineASM.h> diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h index 71ff4e5bd..f4c8bc703 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.h +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h @@ -28,6 +28,7 @@ #include "JSCJSValue.h" #include "MacroAssemblerCodeRef.h" +#include <wtf/Platform.h> #if ENABLE(JIT) @@ -41,7 +42,7 @@ namespace JSC { extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL; -#if COMPILER(GCC_OR_CLANG) +#if COMPILER(GCC) // This is a public declaration only to convince CLANG not to elide it. extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL; @@ -51,11 +52,11 @@ inline void initializeHostCallReturnValue() getHostCallReturnValueWithExecState(0); } -#else // COMPILER(GCC_OR_CLANG) +#else // COMPILER(GCC) inline void initializeHostCallReturnValue() { } -#endif // COMPILER(GCC_OR_CLANG) +#endif // COMPILER(GCC) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index 038f5d7db..c3508b01d 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,12 +26,15 @@ #include "config.h" #if ENABLE(JIT) - #include "JIT.h" -#include "ArityCheckFailReturnThunks.h" +// This probably does not belong here; adding here for now as a quick Windows build fix. +#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) +#include "MacroAssembler.h" +JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; +#endif + #include "CodeBlock.h" -#include "CodeBlockWithJITType.h" #include "DFGCapabilities.h" #include "Interpreter.h" #include "JITInlines.h" @@ -39,15 +42,11 @@ #include "JSArray.h" #include "JSFunction.h" #include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "JSCInlines.h" -#include "ProfilerDatabase.h" +#include "Operations.h" #include "RepatchBuffer.h" #include "ResultType.h" #include "SamplingTool.h" #include "SlowPathCall.h" -#include "StackAlignment.h" -#include "TypeProfilerLog.h" #include <wtf/CryptographicallyRandomNumber.h> using namespace std; @@ -76,7 +75,7 @@ JIT::JIT(VM* vm, CodeBlock* codeBlock) : JSInterfaceJIT(vm, codeBlock) , m_interpreter(vm->interpreter) , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) - , m_bytecodeOffset(std::numeric_limits<unsigned>::max()) + , m_bytecodeOffset((unsigned)-1) , m_getByIdIndex(UINT_MAX) , m_putByIdIndex(UINT_MAX) , m_byValInstructionIndex(UINT_MAX) @@ -99,31 +98,11 @@ void JIT::emitEnterOptimizationCheck() ASSERT(!m_bytecodeOffset); callOperation(operationOptimize, m_bytecodeOffset); skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); - move(returnValueGPR2, stackPointerRegister); jump(returnValueGPR); skipOptimize.link(this); } #endif -void JIT::emitNotifyWrite(WatchpointSet* set) -{ - if (!set || set->state() == IsInvalidated) - return; - - addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated))); -} - -void JIT::assertStackPointerOffset() -{ - if (ASSERT_DISABLED) - return; - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0); - Jump ok = branchPtr(Equal, regT0, stackPointerRegister); - breakpoint(); - ok.link(this); -} - #define NEXT_OPCODE(name) \ m_bytecodeOffset += OPCODE_LENGTH(name); \ break; @@ -149,9 +128,6 @@ void JIT::assertStackPointerOffset() void JIT::privateCompileMainPass() { - jitAssertTagsInPlace(); - jitAssertArgumentCountSane(); - Instruction* instructionsBegin = m_codeBlock->instructions().begin(); unsigned instructionCount = m_codeBlock->instructions().size(); @@ -182,10 +158,7 @@ void JIT::privateCompileMainPass() AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( m_compilation->bytecodes(), m_bytecodeOffset)))->address())); } - - if (Options::eagerlyUpdateTopCallFrame()) - updateTopCallFrame(); - + switch (opcodeID) { DEFINE_SLOW_OP(del_by_val) DEFINE_SLOW_OP(in) @@ -194,9 +167,10 @@ void JIT::privateCompileMainPass() DEFINE_SLOW_OP(greater) DEFINE_SLOW_OP(greatereq) DEFINE_SLOW_OP(is_function) - DEFINE_SLOW_OP(is_object_or_null) + DEFINE_SLOW_OP(is_object) DEFINE_SLOW_OP(typeof) + DEFINE_OP(op_touch_entry) DEFINE_OP(op_add) DEFINE_OP(op_bitand) DEFINE_OP(op_bitor) @@ -204,34 +178,35 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_call) DEFINE_OP(op_call_eval) DEFINE_OP(op_call_varargs) - DEFINE_OP(op_construct_varargs) DEFINE_OP(op_catch) DEFINE_OP(op_construct) + DEFINE_OP(op_get_callee) DEFINE_OP(op_create_this) DEFINE_OP(op_to_this) - DEFINE_OP(op_create_direct_arguments) - DEFINE_OP(op_create_scoped_arguments) - DEFINE_OP(op_create_out_of_band_arguments) - DEFINE_OP(op_check_tdz) + DEFINE_OP(op_init_lazy_reg) + DEFINE_OP(op_create_arguments) DEFINE_OP(op_debug) DEFINE_OP(op_del_by_id) DEFINE_OP(op_div) DEFINE_OP(op_end) DEFINE_OP(op_enter) - DEFINE_OP(op_get_scope) + DEFINE_OP(op_create_activation) DEFINE_OP(op_eq) DEFINE_OP(op_eq_null) case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_OP(op_get_by_id) + DEFINE_OP(op_get_arguments_length) DEFINE_OP(op_get_by_val) + DEFINE_OP(op_get_argument_by_val) + DEFINE_OP(op_get_by_pname) + DEFINE_OP(op_get_pnames) DEFINE_OP(op_check_has_instance) DEFINE_OP(op_instanceof) DEFINE_OP(op_is_undefined) DEFINE_OP(op_is_boolean) DEFINE_OP(op_is_number) DEFINE_OP(op_is_string) - DEFINE_OP(op_is_object) DEFINE_OP(op_jeq_null) DEFINE_OP(op_jfalse) DEFINE_OP(op_jmp) @@ -249,6 +224,7 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_loop_hint) DEFINE_OP(op_lshift) DEFINE_OP(op_mod) + DEFINE_OP(op_captured_mov) DEFINE_OP(op_mov) DEFINE_OP(op_mul) DEFINE_OP(op_negate) @@ -258,20 +234,20 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_new_array_with_size) DEFINE_OP(op_new_array_buffer) DEFINE_OP(op_new_func) + DEFINE_OP(op_new_captured_func) DEFINE_OP(op_new_func_exp) DEFINE_OP(op_new_object) DEFINE_OP(op_new_regexp) + DEFINE_OP(op_next_pname) DEFINE_OP(op_not) DEFINE_OP(op_nstricteq) + DEFINE_OP(op_pop_scope) DEFINE_OP(op_dec) DEFINE_OP(op_inc) DEFINE_OP(op_profile_did_call) DEFINE_OP(op_profile_will_call) - DEFINE_OP(op_profile_type) - DEFINE_OP(op_profile_control_flow) + DEFINE_OP(op_push_name_scope) DEFINE_OP(op_push_with_scope) - DEFINE_OP(op_create_lexical_environment) - DEFINE_OP(op_get_parent_scope) case op_put_by_id_out_of_line: case op_put_by_id_transition_direct: case op_put_by_id_transition_normal: @@ -281,11 +257,13 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_put_by_index) case op_put_by_val_direct: DEFINE_OP(op_put_by_val) - DEFINE_OP(op_put_getter_by_id) - DEFINE_OP(op_put_setter_by_id) DEFINE_OP(op_put_getter_setter) + case op_init_global_const_nop: + NEXT_OPCODE(op_init_global_const_nop); + DEFINE_OP(op_init_global_const) DEFINE_OP(op_ret) + DEFINE_OP(op_ret_object_or_this) DEFINE_OP(op_rshift) DEFINE_OP(op_unsigned) DEFINE_OP(op_urshift) @@ -295,37 +273,40 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_switch_char) DEFINE_OP(op_switch_imm) DEFINE_OP(op_switch_string) + DEFINE_OP(op_tear_off_activation) + DEFINE_OP(op_tear_off_arguments) DEFINE_OP(op_throw) DEFINE_OP(op_throw_static_error) DEFINE_OP(op_to_number) - DEFINE_OP(op_to_string) DEFINE_OP(op_to_primitive) DEFINE_OP(op_resolve_scope) DEFINE_OP(op_get_from_scope) DEFINE_OP(op_put_to_scope) - DEFINE_OP(op_get_from_arguments) - DEFINE_OP(op_put_to_arguments) - - DEFINE_OP(op_get_enumerable_length) - DEFINE_OP(op_has_generic_property) - DEFINE_OP(op_has_structure_property) - DEFINE_OP(op_has_indexed_property) - DEFINE_OP(op_get_direct_pname) - DEFINE_OP(op_get_property_enumerator) - DEFINE_OP(op_enumerator_structure_pname) - DEFINE_OP(op_enumerator_generic_pname) - DEFINE_OP(op_to_index_string) - default: + + case op_get_by_id_chain: + case op_get_by_id_generic: + case op_get_by_id_proto: + case op_get_by_id_self: + case op_get_by_id_getter_chain: + case op_get_by_id_getter_proto: + case op_get_by_id_getter_self: + case op_get_by_id_custom_chain: + case op_get_by_id_custom_proto: + case op_get_by_id_custom_self: + case op_get_string_length: + case op_put_by_id_generic: + case op_put_by_id_replace: + case op_put_by_id_transition: RELEASE_ASSERT_NOT_REACHED(); } } - RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); + m_bytecodeOffset = (unsigned)-1; #endif } @@ -380,17 +361,20 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_call) DEFINE_SLOWCASE_OP(op_call_eval) DEFINE_SLOWCASE_OP(op_call_varargs) - DEFINE_SLOWCASE_OP(op_construct_varargs) DEFINE_SLOWCASE_OP(op_construct) DEFINE_SLOWCASE_OP(op_to_this) - DEFINE_SLOWCASE_OP(op_check_tdz) DEFINE_SLOWCASE_OP(op_create_this) + DEFINE_SLOWCASE_OP(op_captured_mov) DEFINE_SLOWCASE_OP(op_div) DEFINE_SLOWCASE_OP(op_eq) + DEFINE_SLOWCASE_OP(op_get_callee) case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_SLOWCASE_OP(op_get_by_id) + DEFINE_SLOWCASE_OP(op_get_arguments_length) DEFINE_SLOWCASE_OP(op_get_by_val) + DEFINE_SLOWCASE_OP(op_get_argument_by_val) + DEFINE_SLOWCASE_OP(op_get_by_pname) DEFINE_SLOWCASE_OP(op_check_has_instance) DEFINE_SLOWCASE_OP(op_instanceof) DEFINE_SLOWCASE_OP(op_jfalse) @@ -428,11 +412,7 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_stricteq) DEFINE_SLOWCASE_OP(op_sub) DEFINE_SLOWCASE_OP(op_to_number) - DEFINE_SLOWCASE_OP(op_to_string) DEFINE_SLOWCASE_OP(op_to_primitive) - DEFINE_SLOWCASE_OP(op_has_indexed_property) - DEFINE_SLOWCASE_OP(op_has_structure_property) - DEFINE_SLOWCASE_OP(op_get_direct_pname) DEFINE_SLOWCASE_OP(op_resolve_scope) DEFINE_SLOWCASE_OP(op_get_from_scope) @@ -453,12 +433,12 @@ void JIT::privateCompileSlowCases() RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size()); RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size()); - RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); + m_bytecodeOffset = (unsigned)-1; #endif } @@ -471,6 +451,11 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_canBeOptimizedOrInlined = false; m_shouldEmitProfiling = false; break; + case DFG::CanInline: + m_canBeOptimized = false; + m_canBeOptimizedOrInlined = true; + m_shouldEmitProfiling = true; + break; case DFG::CanCompile: case DFG::CanCompileAndInline: m_canBeOptimized = true; @@ -493,13 +478,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); break; } - - // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type. - if (m_vm->typeProfiler()) - m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation.")); if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler) - m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock); + m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock)); if (m_vm->m_perBytecodeProfiler) { m_compilation = adoptRef( new Profiler::Compilation( @@ -515,7 +496,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_randomGenerator.getUint32() & 1) nop(); - emitFunctionPrologue(); + preserveReturnAddressAfterCall(regT2); + emitPutReturnPCToCallFrameHeader(regT2); emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); Label beginLabel(this); @@ -525,8 +507,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) sampleInstruction(m_codeBlock->instructions().begin()); #endif + Jump stackCheck; if (m_codeBlock->codeType() == FunctionCode) { - ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max()); + ASSERT(m_bytecodeOffset == (unsigned)-1); if (shouldEmitProfiling()) { for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { // If this is a constructor, then we want to put in a dummy profiling site (to @@ -543,14 +526,13 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); } } - } - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1); - Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1); - move(regT1, stackPointerRegister); - checkStackPointerAlignment(); + addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1); + stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1); + } + Label functionBody = label(); + privateCompileMainPass(); privateCompileLinkPass(); privateCompileSlowCases(); @@ -558,17 +540,20 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_disassembler) m_disassembler->setEndOfSlowPath(label()); - stackOverflow.link(this); - m_bytecodeOffset = 0; - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); - callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); - Label arityCheck; if (m_codeBlock->codeType() == FunctionCode) { + stackCheck.link(this); + m_bytecodeOffset = 0; + callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock); +#ifndef NDEBUG + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. +#endif + jump(functionBody); + arityCheck = label(); store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); - emitFunctionPrologue(); + preserveReturnAddressAfterCall(regT2); + emitPutReturnPCToCallFrameHeader(regT2); emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); load32(payloadFor(JSStack::ArgumentCount), regT1); @@ -576,28 +561,14 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_bytecodeOffset = 0; - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); - if (maxFrameExtentForSlowPathCall) - addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); if (returnValueGPR != regT0) move(returnValueGPR, regT0); branchTest32(Zero, regT0).linkTo(beginLabel, this); - GPRReg thunkReg; -#if USE(JSVALUE64) - thunkReg = GPRInfo::regT7; -#else - thunkReg = GPRInfo::regT5; -#endif - CodeLocationLabel* failThunkLabels = - m_vm->arityCheckFailReturnThunks->returnPCsFor(*m_vm, m_codeBlock->numParameters()); - move(TrustedImmPtr(failThunkLabels), thunkReg); - loadPtr(BaseIndex(thunkReg, regT0, timesPtr()), thunkReg); - emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code()); + emitNakedCall(m_vm->getCTIStub(arityFixup).code()); #if !ASSERT_DISABLED - m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. #endif jump(beginLabel); @@ -610,7 +581,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_disassembler) m_disassembler->setEndOfCode(label()); - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort); + LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort); if (patchBuffer.didFailToAllocate()) return CompilationFailed; @@ -657,58 +628,58 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) for (unsigned i = m_putByIds.size(); i--;) m_putByIds[i].finalize(patchBuffer); - for (const auto& byValCompilationInfo : m_byValCompilationInfo) { - PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump; - CodeLocationJump notIndexJump = CodeLocationJump(); - if (Jump(patchableNotIndexJump).isSet()) - notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump)); - CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump)); - CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget); - CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget); - CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget); - CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress); - - *byValCompilationInfo.byValInfo = ByValInfo( - byValCompilationInfo.bytecodeIndex, - notIndexJump, + m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); + for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { + CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); + CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); + CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); + CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); + + m_codeBlock->byValInfo(i) = ByValInfo( + m_byValCompilationInfo[i].bytecodeIndex, badTypeJump, - byValCompilationInfo.arrayMode, - byValCompilationInfo.arrayProfile, + m_byValCompilationInfo[i].arrayMode, differenceBetweenCodePtr(badTypeJump, doneTarget), - differenceBetweenCodePtr(badTypeJump, nextHotPathTarget), differenceBetweenCodePtr(returnAddress, slowPathTarget)); } - for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) { - CallCompilationInfo& compilationInfo = m_callCompilationInfo[i]; - CallLinkInfo& info = *compilationInfo.callLinkInfo; - info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation), - patchBuffer.locationOf(compilationInfo.hotPathBegin), - patchBuffer.locationOfNearCall(compilationInfo.hotPathOther)); + m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); + for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { + CallLinkInfo& info = m_codeBlock->callLinkInfo(i); + info.callType = m_callStructureStubCompilationInfo[i].callType; + info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex); + info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); + info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); + info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); + info.calleeGPR = regT0; } - CompactJITCodeMap::Encoder jitCodeMapEncoder; - for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { - if (m_labels[bytecodeOffset].isSet()) - jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); +#if ENABLE(DFG_JIT) || ENABLE(LLINT) + if (canBeOptimizedOrInlined() +#if ENABLE(LLINT) + || true +#endif + ) { + CompactJITCodeMap::Encoder jitCodeMapEncoder; + for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { + if (m_labels[bytecodeOffset].isSet()) + jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); + } + m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); } - m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); +#endif MacroAssemblerCodePtr withArityCheck; if (m_codeBlock->codeType() == FunctionCode) withArityCheck = patchBuffer.locationOf(arityCheck); - if (Options::showDisassembly()) { + if (Options::showDisassembly()) m_disassembler->dump(patchBuffer); - patchBuffer.didAlreadyDisassemble(); - } if (m_compilation) { m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); m_vm->m_perBytecodeProfiler->addCompilation(m_compilation); } - CodeRef result = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data())); + CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly(); m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add( static_cast<double>(result.size()) / @@ -716,7 +687,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_codeBlock->shrinkToFit(CodeBlock::LateShrink); m_codeBlock->setJITCode( - adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); + adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)), + withArityCheck); #if ENABLE(JIT_VERBOSE) dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); @@ -725,53 +697,72 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) return CompilationSuccessful; } -void JIT::privateCompileExceptionHandlers() +void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind) { - if (!m_exceptionChecksWithCallFrameRollback.empty()) { - m_exceptionChecksWithCallFrameRollback.link(this); + RepatchBuffer repatchBuffer(callerCodeBlock); + + ASSERT(!callLinkInfo->isLinked()); + callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee); + callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee); + repatchBuffer.relink(callLinkInfo->hotPathOther, code); + + if (calleeCodeBlock) + calleeCodeBlock->linkIncomingCall(exec, callLinkInfo); + + // Patch the slow patch so we do not continue to try to link. + if (kind == CodeForCall) { + ASSERT(callLinkInfo->callType == CallLinkInfo::Call + || callLinkInfo->callType == CallLinkInfo::CallVarargs); + if (callLinkInfo->callType == CallLinkInfo::Call) { + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code()); + return; + } + + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code()); + return; + } - // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). + ASSERT(kind == CodeForConstruct); + repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code()); +} - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); +void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo) +{ + RepatchBuffer repatchBuffer(callerCodeBlock); -#if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); -#endif - m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value())); - jumpToExceptionHandler(); + repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code()); +} + +void JIT::privateCompileExceptionHandlers() +{ + if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) + return; + + Jump doLookup; + + if (!m_exceptionChecksWithCallFrameRollback.empty()) { + m_exceptionChecksWithCallFrameRollback.link(this); + emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0); + doLookup = jump(); } - if (!m_exceptionChecks.empty()) { + if (!m_exceptionChecks.empty()) m_exceptionChecks.link(this); + + // lookupExceptionHandler is passed one argument, the exec (the CallFrame*). + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). - move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + if (doLookup.isSet()) + doLookup.link(this); #if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); - poke(GPRInfo::argumentGPR1, 1); + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); #endif - m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value())); - jumpToExceptionHandler(); - } -} - -unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) -{ - ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeRegisters) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeRegisters))); - - return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeRegisters + maxFrameExtentForSlowPathCallInRegisters); + m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value())); + jumpToExceptionHandler(); } -int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) -{ - return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); -} } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index 8411a7ef6..298075706 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,7 +30,7 @@ // We've run into some problems where changing the size of the class JIT leads to // performance fluctuations. Try forcing alignment in an attempt to stabalize this. -#if COMPILER(GCC_OR_CLANG) +#if COMPILER(GCC) #define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32))) #else #define JIT_CLASS_ALIGNMENT @@ -44,6 +44,7 @@ #include "JITDisassembler.h" #include "JITInlineCacheGenerator.h" #include "JSInterfaceJIT.h" +#include "LegacyProfiler.h" #include "Opcode.h" #include "ResultType.h" #include "SamplingTool.h" @@ -52,10 +53,10 @@ namespace JSC { class ArrayAllocationProfile; - class CallLinkInfo; class CodeBlock; class FunctionExecutable; class JIT; + class JSPropertyNameIterator; class Identifier; class Interpreter; class JSScope; @@ -64,6 +65,7 @@ namespace JSC { class Register; class StructureChain; + struct CallLinkInfo; struct Instruction; struct OperandTypes; struct PolymorphicAccessStructureList; @@ -149,35 +151,28 @@ namespace JSC { struct ByValCompilationInfo { ByValCompilationInfo() { } - ByValCompilationInfo(ByValInfo* byValInfo, unsigned bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget) - : byValInfo(byValInfo) - , bytecodeIndex(bytecodeIndex) - , notIndexJump(notIndexJump) + ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget) + : bytecodeIndex(bytecodeIndex) , badTypeJump(badTypeJump) , arrayMode(arrayMode) - , arrayProfile(arrayProfile) , doneTarget(doneTarget) - , nextHotPathTarget(nextHotPathTarget) { } - - ByValInfo* byValInfo; + unsigned bytecodeIndex; - MacroAssembler::PatchableJump notIndexJump; MacroAssembler::PatchableJump badTypeJump; JITArrayMode arrayMode; - ArrayProfile* arrayProfile; MacroAssembler::Label doneTarget; - MacroAssembler::Label nextHotPathTarget; MacroAssembler::Label slowPathTarget; MacroAssembler::Call returnAddress; }; - struct CallCompilationInfo { + struct StructureStubCompilationInfo { MacroAssembler::DataLabelPtr hotPathBegin; MacroAssembler::Call hotPathOther; MacroAssembler::Call callReturnLocation; - CallLinkInfo* callLinkInfo; + CallLinkInfo::CallType callType; + unsigned bytecodeIndex; }; // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. @@ -205,18 +200,18 @@ namespace JSC { return JIT(vm, codeBlock).privateCompile(effort); } - static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) + static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr) { - JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = byValInfo->bytecodeIndex; - jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode); + JIT jit(vm, callerCodeBlock); + jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex; + jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr); } - static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) + static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { JIT jit(vm, codeBlock); jit.m_bytecodeOffset = byValInfo->bytecodeIndex; - jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName); + jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode); } static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) @@ -233,24 +228,26 @@ namespace JSC { jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode); } - static void compileHasIndexedProperty(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) - { - JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = byValInfo->bytecodeIndex; - jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode); - } - static CodeRef compileCTINativeCall(VM* vm, NativeFunction func) { if (!vm->canUseJIT()) { +#if ENABLE(LLINT) return CodeRef::createLLIntCodeRef(llint_native_call_trampoline); +#else + return CodeRef(); +#endif } JIT jit(vm, 0); return jit.privateCompileCTINativeCall(vm, func); } - static unsigned frameRegisterCountFor(CodeBlock*); - static int stackPointerOffsetFor(CodeBlock*); + static void linkFor(ExecState*, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind); + static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*); + + static unsigned frameRegisterCountFor(CodeBlock* codeBlock) + { + return codeBlock->m_numCalleeRegisters; + } private: JIT(VM*, CodeBlock* = 0); @@ -260,12 +257,11 @@ namespace JSC { void privateCompileSlowCases(); CompilationResult privateCompile(JITCompilationEffort); + void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr); + void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); - void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&); void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); - void privateCompileHasIndexedProperty(ByValInfo*, ReturnAddressPtr, JITArrayMode); - Label privateCompileCTINativeCall(VM*, bool isConstruct = false); CodeRef privateCompileCTINativeCall(VM*, NativeFunction); void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress); @@ -278,15 +274,6 @@ namespace JSC { return functionCall; } -#if OS(WINDOWS) && CPU(X86_64) - Call appendCallWithSlowPathReturnType(const FunctionPtr& function) - { - Call functionCall = callWithSlowPathReturnType(); - m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value())); - return functionCall; - } -#endif - void exceptionCheck(Jump jumpToHandler) { m_exceptionChecks.append(jumpToHandler); @@ -312,7 +299,7 @@ namespace JSC { void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex); void compileOpCallSlowCase(OpcodeID, Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex); - void compileSetupVarargsFrame(Instruction*, CallLinkInfo*); + void compileLoadVarargs(Instruction*); void compileCallEval(Instruction*); void compileCallEvalSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitPutCallResult(Instruction*); @@ -323,15 +310,23 @@ namespace JSC { void emitLoadDouble(int index, FPRegisterID value); void emitLoadInt32ToDouble(int index, FPRegisterID value); - Jump emitJumpIfCellObject(RegisterID cellReg); - Jump emitJumpIfCellNotObject(RegisterID cellReg); + Jump emitJumpIfNotObject(RegisterID structureReg); + + Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch); + void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch); + void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*); - enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue }; + enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterValue, ShouldFilterBaseAndValue }; // value register in write barrier is used before any scratch registers // so may safely be the same as either of the scratch registers. + Jump checkMarkWord(RegisterID owner, RegisterID scratch1, RegisterID scratch2); + Jump checkMarkWord(JSCell* owner); void emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode); void emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode); - void emitWriteBarrier(JSCell* owner); +/* + void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch1, RegisterID scratch2, WriteBarrierMode); + void emitWriteBarrier(JSCell* owner, RegisterID value, WriteBarrierMode); +*/ template<typename StructureType> // StructureType can be RegisterID or ImmPtr. void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch); @@ -341,8 +336,8 @@ namespace JSC { void emitValueProfilingSite(ValueProfile*); void emitValueProfilingSite(unsigned bytecodeOffset); void emitValueProfilingSite(); - void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*); - void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex); + void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*); + void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex); void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*); void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*); @@ -352,18 +347,10 @@ namespace JSC { // Property is int-checked and zero extended. Base is cell checked. // Structure is already profiled. Returns the slow cases. Fall-through // case contains result in regT0, and it is not yet profiled. - JumpList emitInt32Load(Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); } - JumpList emitDoubleLoad(Instruction*, PatchableJump& badType); - JumpList emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); - JumpList emitArrayStorageLoad(Instruction*, PatchableJump& badType); - JumpList emitLoadForArrayMode(Instruction*, JITArrayMode, PatchableJump& badType); - JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); } JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType); JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType); - JumpList emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType); - JumpList emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType); JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); @@ -387,13 +374,9 @@ namespace JSC { JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType); JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); - - JITGetByIdGenerator emitGetByValWithCachedId(Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases); - + enum FinalObjectMode { MayBeFinal, KnownNotFinal }; - template <typename T> Jump branchStructure(RelationalCondition, T leftHandSide, Structure*); - #if USE(JSVALUE32_64) bool getOperandConstantImmediateInt(int op1, int op2, int& op, int32_t& constant); @@ -452,7 +435,6 @@ namespace JSC { Jump emitJumpIfImmediateInteger(RegisterID); Jump emitJumpIfNotImmediateInteger(RegisterID); Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); - PatchableJump emitPatchableJumpIfNotImmediateInteger(RegisterID); void emitJumpSlowCaseIfNotImmediateInteger(RegisterID); void emitJumpSlowCaseIfNotImmediateNumber(RegisterID); void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); @@ -473,9 +455,8 @@ namespace JSC { void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition); void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&); - - void assertStackPointerOffset(); + void emit_op_touch_entry(Instruction*); void emit_op_add(Instruction*); void emit_op_bitand(Instruction*); void emit_op_bitor(Instruction*); @@ -483,27 +464,26 @@ namespace JSC { void emit_op_call(Instruction*); void emit_op_call_eval(Instruction*); void emit_op_call_varargs(Instruction*); - void emit_op_construct_varargs(Instruction*); + void emit_op_captured_mov(Instruction*); void emit_op_catch(Instruction*); void emit_op_construct(Instruction*); + void emit_op_get_callee(Instruction*); void emit_op_create_this(Instruction*); void emit_op_to_this(Instruction*); - void emit_op_create_direct_arguments(Instruction*); - void emit_op_create_scoped_arguments(Instruction*); - void emit_op_create_out_of_band_arguments(Instruction*); - void emit_op_check_tdz(Instruction*); + void emit_op_create_arguments(Instruction*); void emit_op_debug(Instruction*); void emit_op_del_by_id(Instruction*); void emit_op_div(Instruction*); void emit_op_end(Instruction*); void emit_op_enter(Instruction*); - void emit_op_get_scope(Instruction*); + void emit_op_create_activation(Instruction*); void emit_op_eq(Instruction*); void emit_op_eq_null(Instruction*); void emit_op_get_by_id(Instruction*); void emit_op_get_arguments_length(Instruction*); void emit_op_get_by_val(Instruction*); void emit_op_get_argument_by_val(Instruction*); + void emit_op_get_by_pname(Instruction*); void emit_op_init_lazy_reg(Instruction*); void emit_op_check_has_instance(Instruction*); void emit_op_instanceof(Instruction*); @@ -511,7 +491,6 @@ namespace JSC { void emit_op_is_boolean(Instruction*); void emit_op_is_number(Instruction*); void emit_op_is_string(Instruction*); - void emit_op_is_object(Instruction*); void emit_op_jeq_null(Instruction*); void emit_op_jfalse(Instruction*); void emit_op_jmp(Instruction*); @@ -538,27 +517,28 @@ namespace JSC { void emit_op_new_array_with_size(Instruction*); void emit_op_new_array_buffer(Instruction*); void emit_op_new_func(Instruction*); + void emit_op_new_captured_func(Instruction*); void emit_op_new_func_exp(Instruction*); void emit_op_new_object(Instruction*); void emit_op_new_regexp(Instruction*); + void emit_op_get_pnames(Instruction*); + void emit_op_next_pname(Instruction*); void emit_op_not(Instruction*); void emit_op_nstricteq(Instruction*); + void emit_op_pop_scope(Instruction*); void emit_op_dec(Instruction*); void emit_op_inc(Instruction*); void emit_op_profile_did_call(Instruction*); void emit_op_profile_will_call(Instruction*); - void emit_op_profile_type(Instruction*); - void emit_op_profile_control_flow(Instruction*); + void emit_op_push_name_scope(Instruction*); void emit_op_push_with_scope(Instruction*); - void emit_op_create_lexical_environment(Instruction*); - void emit_op_get_parent_scope(Instruction*); void emit_op_put_by_id(Instruction*); void emit_op_put_by_index(Instruction*); void emit_op_put_by_val(Instruction*); - void emit_op_put_getter_by_id(Instruction*); - void emit_op_put_setter_by_id(Instruction*); void emit_op_put_getter_setter(Instruction*); + void emit_op_init_global_const(Instruction*); void emit_op_ret(Instruction*); + void emit_op_ret_object_or_this(Instruction*); void emit_op_rshift(Instruction*); void emit_op_strcat(Instruction*); void emit_op_stricteq(Instruction*); @@ -566,24 +546,15 @@ namespace JSC { void emit_op_switch_char(Instruction*); void emit_op_switch_imm(Instruction*); void emit_op_switch_string(Instruction*); + void emit_op_tear_off_activation(Instruction*); void emit_op_tear_off_arguments(Instruction*); void emit_op_throw(Instruction*); void emit_op_throw_static_error(Instruction*); void emit_op_to_number(Instruction*); - void emit_op_to_string(Instruction*); void emit_op_to_primitive(Instruction*); void emit_op_unexpected_load(Instruction*); void emit_op_unsigned(Instruction*); void emit_op_urshift(Instruction*); - void emit_op_get_enumerable_length(Instruction*); - void emit_op_has_generic_property(Instruction*); - void emit_op_has_structure_property(Instruction*); - void emit_op_has_indexed_property(Instruction*); - void emit_op_get_direct_pname(Instruction*); - void emit_op_get_property_enumerator(Instruction*); - void emit_op_enumerator_structure_pname(Instruction*); - void emit_op_enumerator_generic_pname(Instruction*); - void emit_op_to_index_string(Instruction*); void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&); @@ -592,11 +563,10 @@ namespace JSC { void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_construct_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&); + void emitSlow_op_captured_mov(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_to_this(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_create_this(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_check_tdz(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_get_callee(Instruction*, Vector<SlowCaseEntry>::iterator&); @@ -604,6 +574,7 @@ namespace JSC { void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&); + void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&); @@ -633,19 +604,13 @@ namespace JSC { void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_to_number(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_to_string(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_unsigned(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_has_indexed_property(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_has_structure_property(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_get_direct_pname(Instruction*, Vector<SlowCaseEntry>::iterator&); void emit_op_resolve_scope(Instruction*); void emit_op_get_from_scope(Instruction*); void emit_op_put_to_scope(Instruction*); - void emit_op_get_from_arguments(Instruction*); - void emit_op_put_to_arguments(Instruction*); void emitSlow_op_resolve_scope(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_get_from_scope(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_put_to_scope(Instruction*, Vector<SlowCaseEntry>::iterator&); @@ -654,19 +619,28 @@ namespace JSC { void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned); void emitVarInjectionCheck(bool needsVarInjectionChecks); - void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth); + void emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth); void emitLoadWithStructureCheck(int scope, Structure** structureSlot); void emitGetGlobalProperty(uintptr_t* operandSlot); void emitGetGlobalVar(uintptr_t operand); void emitGetClosureVar(int scope, uintptr_t operand); void emitPutGlobalProperty(uintptr_t* operandSlot, int value); - void emitNotifyWrite(WatchpointSet*); - void emitPutGlobalVar(uintptr_t operand, int value, WatchpointSet*); - void emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet*); +#if USE(JSVALUE64) + void emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet*); +#else + void emitNotifyWrite(RegisterID tag, RegisterID payload, RegisterID scratch, VariableWatchpointSet*); +#endif + void emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet*); + void emitPutClosureVar(int scope, uintptr_t operand, int value); void emitInitRegister(int dst); void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); + void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); + void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); +#if USE(JSVALUE64) + void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); +#endif JSValue getConstantOperand(int src); bool isOperandConstantImmediateInt(int src); @@ -689,9 +663,6 @@ namespace JSC { void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int virtualRegisterIndex); MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr&); -#if OS(WINDOWS) && CPU(X86_64) - MacroAssembler::Call appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr&); -#endif MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr&); MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr&, int); MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr&, int); @@ -700,11 +671,9 @@ namespace JSC { MacroAssembler::Call callOperation(C_JITOperation_E); MacroAssembler::Call callOperation(C_JITOperation_EO, GPRReg); - MacroAssembler::Call callOperation(C_JITOperation_EL, GPRReg); - MacroAssembler::Call callOperation(C_JITOperation_EL, TrustedImmPtr); MacroAssembler::Call callOperation(C_JITOperation_ESt, Structure*); MacroAssembler::Call callOperation(C_JITOperation_EZ, int32_t); - MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, int32_t, int32_t); + MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, int32_t); MacroAssembler::Call callOperation(J_JITOperation_E, int); MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg); MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, GPRReg, int32_t); @@ -713,18 +682,12 @@ namespace JSC { MacroAssembler::Call callOperation(V_JITOperation_EC, JSCell*); MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg); #if USE(JSVALUE64) - MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*); + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, StringImpl*); #else - MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*); + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, StringImpl*); #endif MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, const Identifier*); MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg); - MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, ArrayProfile*); - MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, ByValInfo*); - MacroAssembler::Call callOperation(C_JITOperation_EJsc, GPRReg); - MacroAssembler::Call callOperation(J_JITOperation_EJscC, int, GPRReg, JSCell*); - MacroAssembler::Call callOperation(C_JITOperation_EJscZ, GPRReg, int32_t); - MacroAssembler::Call callOperation(C_JITOperation_EJscZ, int, GPRReg, int32_t); #if USE(JSVALUE64) MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg); #else @@ -732,68 +695,54 @@ namespace JSC { #endif MacroAssembler::Call callOperation(J_JITOperation_EP, int, void*); MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EPc, int, Instruction*); - MacroAssembler::Call callOperation(J_JITOperation_EPc, int, Instruction*); MacroAssembler::Call callOperation(J_JITOperation_EZ, int, int32_t); - MacroAssembler::Call callOperation(J_JITOperation_EZZ, int, int32_t, int32_t); MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, size_t); + MacroAssembler::Call callOperation(P_JITOperation_EZ, int32_t); MacroAssembler::Call callOperation(S_JITOperation_ECC, RegisterID, RegisterID); MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID); MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID); MacroAssembler::Call callOperation(S_JITOperation_EOJss, RegisterID, RegisterID); - MacroAssembler::Call callOperation(Sprt_JITOperation_EZ, int32_t); MacroAssembler::Call callOperation(V_JITOperation_E); MacroAssembler::Call callOperation(V_JITOperation_EC, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_ECC, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_ECIC, RegisterID, const Identifier*, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_ECICC, RegisterID, const Identifier*, RegisterID, RegisterID); - MacroAssembler::Call callOperation(J_JITOperation_EE, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID); - MacroAssembler::Call callOperation(J_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, int32_t); MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID); #if USE(JSVALUE64) MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID); #else MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID, RegisterID); #endif - MacroAssembler::Call callOperation(V_JITOperation_EJIdJ, RegisterID, const Identifier*, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_EJIdJJ, RegisterID, const Identifier*, RegisterID, RegisterID); #if USE(JSVALUE64) - MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, int32_t, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, UniquedStringImpl*); + MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, StringImpl*); #else - MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, UniquedStringImpl*); + MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, StringImpl*); #endif MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, ArrayProfile*); - MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, ByValInfo*); MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, int32_t, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, int32_t); MacroAssembler::Call callOperation(V_JITOperation_EPc, Instruction*); MacroAssembler::Call callOperation(V_JITOperation_EZ, int32_t); - MacroAssembler::Call callOperation(V_JITOperation_EZJ, int, GPRReg); MacroAssembler::Call callOperationWithCallFrameRollbackOnException(J_JITOperation_E); MacroAssembler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb, CodeBlock*); MacroAssembler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E); #if USE(JSVALUE32_64) - MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, RegisterID, int32_t, RegisterID); - MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, GPRReg, int32_t, int32_t); + MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID); + MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, GPRReg, int32_t); MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg, GPRReg); MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg, GPRReg); MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, GPRReg, const Identifier*); MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg); - MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, GPRReg, GPRReg, ArrayProfile*); - MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, GPRReg, GPRReg, ByValInfo*); MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, GPRReg, size_t); MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID, RegisterID); MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, RegisterID, int32_t); MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ArrayProfile*); - MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ByValInfo*); MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, RegisterID, int32_t); MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, RegisterID, int32_t, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_EZJ, int32_t, RegisterID, RegisterID); #endif Jump checkStructure(RegisterID reg, Structure* structure); @@ -853,7 +802,7 @@ namespace JSC { Vector<JITGetByIdGenerator> m_getByIds; Vector<JITPutByIdGenerator> m_putByIds; Vector<ByValCompilationInfo> m_byValCompilationInfo; - Vector<CallCompilationInfo> m_callCompilationInfo; + Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo; Vector<JumpTable> m_jmpTable; unsigned m_bytecodeOffset; @@ -868,7 +817,7 @@ namespace JSC { unsigned m_byValInstructionIndex; unsigned m_callLinkInfoIndex; - std::unique_ptr<JITDisassembler> m_disassembler; + OwnPtr<JITDisassembler> m_disassembler; RefPtr<Profiler::Compilation> m_compilation; WeakRandom m_randomGenerator; static CodeRef stringGetByValStubGenerator(VM*); diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp index 167e41301..b9c70570c 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp @@ -35,7 +35,7 @@ #include "JSArray.h" #include "JSFunction.h" #include "Interpreter.h" -#include "JSCInlines.h" +#include "Operations.h" #include "ResultType.h" #include "SamplingTool.h" #include "SlowPathCall.h" @@ -955,8 +955,9 @@ void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry> int op2 = currentInstruction[3].u.operand; OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand); if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) { - if (!ASSERT_DISABLED) - abortWithReason(JITDivOperandsAreNotNumbers); +#ifndef NDEBUG + breakpoint(); +#endif return; } if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) { diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp index 30b42d196..53ac73894 100644 --- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp +++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp @@ -35,7 +35,7 @@ #include "JSArray.h" #include "JSFunction.h" #include "Interpreter.h" -#include "JSCInlines.h" +#include "Operations.h" #include "ResultType.h" #include "SamplingTool.h" #include "SlowPathCall.h" diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp index 099ade394..90c2e4fb9 100644 --- a/Source/JavaScriptCore/jit/JITCall.cpp +++ b/Source/JavaScriptCore/jit/JITCall.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,18 +29,16 @@ #if USE(JSVALUE64) #include "JIT.h" +#include "Arguments.h" #include "CodeBlock.h" #include "JITInlines.h" #include "JSArray.h" #include "JSFunction.h" #include "Interpreter.h" -#include "JSCInlines.h" -#include "LinkBuffer.h" +#include "Operations.h" #include "RepatchBuffer.h" #include "ResultType.h" #include "SamplingTool.h" -#include "SetupVarargsFrame.h" -#include "StackAlignment.h" #include "ThunkGenerators.h" #include <wtf/StringPrintStream.h> @@ -54,50 +52,74 @@ void JIT::emitPutCallResult(Instruction* instruction) emitPutVirtualRegister(dst); } -void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) +void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; - int firstVarArgOffset = instruction[6].u.operand; + + JumpList slowCase; + JumpList end; + bool canOptimize = m_codeBlock->usesArguments() + && arguments == m_codeBlock->argumentsRegister().offset() + && !m_codeBlock->symbolTable()->slowArguments(); + + if (canOptimize) { + emitGetVirtualRegister(arguments, regT0); + slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue())))); + + emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); + slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1))); + // regT0: argumentCountIncludingThis + + move(regT0, regT1); + neg64(regT1); + add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1); + lshift64(TrustedImm32(3), regT1); + addPtr(callFrameRegister, regT1); + // regT1: newCallFrame + + slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1)); + + // Initialize ArgumentCount. + store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + + // Initialize 'this'. + emitGetVirtualRegister(thisValue, regT2); + store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); + + // Copy arguments. + signExtend32ToPtr(regT0, regT0); + end.append(branchSub64(Zero, TrustedImm32(1), regT0)); + // regT0: argumentCount + + Label copyLoop = label(); + load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2); + store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); + branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this); + + end.append(jump()); + } + + if (canOptimize) + slowCase.link(this); emitGetVirtualRegister(arguments, regT1); - callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset); - move(TrustedImm32(-firstFreeRegister), regT1); - emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); - addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister); + callOperation(operationSizeAndAllocFrameForVarargs, regT1, firstFreeRegister); + emitGetVirtualRegister(thisValue, regT1); emitGetVirtualRegister(arguments, regT2); - callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0); + callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2); move(returnValueGPR, regT1); - // Profile the argument count. - load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); - load8(info->addressOfMaxNumArguments(), regT0); - Jump notBiggest = branch32(Above, regT0, regT2); - Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); - move(TrustedImm32(255), regT2); - notSaturated.link(this); - store8(regT2, info->addressOfMaxNumArguments()); - notBiggest.link(this); - - // Initialize 'this'. - emitGetVirtualRegister(thisValue, regT0); - store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); - - addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); + if (canOptimize) + end.link(this); } void JIT::compileCallEval(Instruction* instruction) { - addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1); - storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset())); - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); - - callOperation(operationCallEval, regT1); - + callOperationWithCallFrameRollbackOnException(operationCallEval); addSlowCase(branch64(Equal, regT0, TrustedImm64(JSValue::encode(JSValue())))); + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); sampleCodeBlock(m_codeBlock); @@ -106,21 +128,10 @@ void JIT::compileCallEval(Instruction* instruction) void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { - CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); - info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); - linkSlowCase(iter); - int registerOffset = -instruction[4].u.operand; - addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); - - load64(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0); - move(TrustedImmPtr(info), regT2); - MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info); - info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true)); - emitNakedCall(virtualThunk.code()); - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + emitGetFromCallFrameHeader64(JSStack::Callee, regT0); + emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code()); sampleCodeBlock(m_codeBlock); @@ -136,21 +147,17 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: + - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - - Caller initializes ReturnPC; CodeBlock. + - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ - COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length); - COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length); - COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length); - CallLinkInfo* info; - if (opcodeID != op_call_eval) - info = m_codeBlock->addCallLinkInfo(); - if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) - compileSetupVarargsFrame(instruction, info); + + if (opcodeID == op_call_varargs) + compileLoadVarargs(instruction); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; @@ -158,22 +165,24 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca if (opcodeID == op_call && shouldEmitProfiling()) { emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0); Jump done = emitJumpIfNotJSCell(regT0); - load32(Address(regT0, JSCell::structureIDOffset()), regT0); - store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); + loadPtr(Address(regT0, JSCell::structureOffset()), regT0); + storePtr(regT0, instruction[6].u.arrayProfile->addressOfLastSeenStructure()); done.link(this); } - addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); - store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); - } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. + addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1); + store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + } // regT1 holds newCallFrame with ArgumentCount initialized. uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin(); uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset); - store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset)); + store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); emitGetVirtualRegister(callee, regT0); // regT0 holds callee. - store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC))); - + store64(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset())); + store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register)))); + move(regT1, callFrameRegister); + if (opcodeID == op_call_eval) { compileCallEval(instruction); return; @@ -183,16 +192,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); - ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); - info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); - m_callCompilationInfo.append(CallCompilationInfo()); - m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; - m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; - - m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); + ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); + m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); + m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; + m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID); + m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset; - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1); + emitPutToCallFrameHeader(regT1, JSStack::ScopeChain); + m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); sampleCodeBlock(m_codeBlock); @@ -208,17 +216,59 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec linkSlowCase(iter); - move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2); - m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code()); - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructThunkGenerator).code() : m_vm->getCTIStub(linkCallThunkGenerator).code()); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); } +void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr) +{ + JumpList slowCases; + + slowCases.append(branchTestPtr(NonZero, regT0, tagMaskRegister)); + slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure))); + slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable))); + + loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1); + emitPutToCallFrameHeader(regT1, JSStack::ScopeChain); + + Call call = nearCall(); + Jump done = jump(); + + slowCases.link(this); + move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2); + restoreReturnAddressBeforeReturn(regT2); + Jump slow = jump(); + + LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); + + patchBuffer.link(call, FunctionPtr(codePtr.executableAddress())); + patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0)); + patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code())); + + RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline closure call stub for %s, return point %p, target %p (%s)", + toCString(*m_codeBlock).data(), + callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(), + codePtr.executableAddress(), + toCString(pointerDump(calleeCodeBlock)).data())), + *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable, + callLinkInfo->codeOrigin)); + + RepatchBuffer repatchBuffer(m_codeBlock); + + repatchBuffer.replaceWithJump( + RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin), + CodeLocationLabel(stubRoutine->code().code())); + repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code()); + + callLinkInfo->stub = stubRoutine.release(); +} + void JIT::emit_op_call(Instruction* currentInstruction) { compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++); @@ -233,11 +283,6 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction) { compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++); } - -void JIT::emit_op_construct_varargs(Instruction* currentInstruction) -{ - compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++); -} void JIT::emit_op_construct(Instruction* currentInstruction) { @@ -258,12 +303,7 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC { compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++); } - -void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++); -} - + void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++); diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp index 2935ca6ac..6086038a2 100644 --- a/Source/JavaScriptCore/jit/JITCall32_64.cpp +++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,18 +29,16 @@ #if USE(JSVALUE32_64) #include "JIT.h" +#include "Arguments.h" #include "CodeBlock.h" #include "Interpreter.h" #include "JITInlines.h" #include "JSArray.h" #include "JSFunction.h" -#include "JSCInlines.h" -#include "LinkBuffer.h" +#include "Operations.h" #include "RepatchBuffer.h" #include "ResultType.h" #include "SamplingTool.h" -#include "SetupVarargsFrame.h" -#include "StackAlignment.h" #include <wtf/StringPrintStream.h> @@ -58,9 +56,37 @@ void JIT::emit_op_ret(Instruction* currentInstruction) unsigned dst = currentInstruction[1].u.operand; emitLoad(dst, regT1, regT0); + emitGetReturnPCFromCallFrameHeaderPtr(regT2); + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); - checkStackPointerAlignment(); - emitFunctionEpilogue(); + restoreReturnAddressBeforeReturn(regT2); + ret(); +} + +void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) +{ + unsigned result = currentInstruction[1].u.operand; + unsigned thisReg = currentInstruction[2].u.operand; + + emitLoad(result, regT1, regT0); + Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump notObject = emitJumpIfNotObject(regT2); + + emitGetReturnPCFromCallFrameHeaderPtr(regT2); + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + restoreReturnAddressBeforeReturn(regT2); + ret(); + + notJSCell.link(this); + notObject.link(this); + emitLoad(thisReg, regT1, regT0); + + emitGetReturnPCFromCallFrameHeaderPtr(regT2); + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + restoreReturnAddressBeforeReturn(regT2); ret(); } @@ -78,12 +104,7 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC { compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++); } - -void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++); -} - + void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++); @@ -103,61 +124,83 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction) { compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++); } - -void JIT::emit_op_construct_varargs(Instruction* currentInstruction) -{ - compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++); -} - + void JIT::emit_op_construct(Instruction* currentInstruction) { compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++); } -void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info) +void JIT::compileLoadVarargs(Instruction* instruction) { int thisValue = instruction[3].u.operand; int arguments = instruction[4].u.operand; int firstFreeRegister = instruction[5].u.operand; - int firstVarArgOffset = instruction[6].u.operand; + + JumpList slowCase; + JumpList end; + bool canOptimize = m_codeBlock->usesArguments() + && VirtualRegister(arguments) == m_codeBlock->argumentsRegister() + && !m_codeBlock->symbolTable()->slowArguments(); + + if (canOptimize) { + emitLoadTag(arguments, regT1); + slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag))); + + load32(payloadFor(JSStack::ArgumentCount), regT2); + slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1))); + // regT2: argumentCountIncludingThis + + move(regT2, regT3); + neg32(regT3); + add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3); + lshift32(TrustedImm32(3), regT3); + addPtr(callFrameRegister, regT3); + // regT3: newCallFrame + + slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT3)); + + // Initialize ArgumentCount. + store32(regT2, payloadFor(JSStack::ArgumentCount, regT3)); + + // Initialize 'this'. + emitLoad(thisValue, regT1, regT0); + store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); + store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); + + // Copy arguments. + end.append(branchSub32(Zero, TrustedImm32(1), regT2)); + // regT2: argumentCount; + + Label copyLoop = label(); + load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0); + load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1); + store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); + store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); + branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this); + + end.append(jump()); + } + + if (canOptimize) + slowCase.link(this); emitLoad(arguments, regT1, regT0); - callOperation(operationSizeFrameForVarargs, regT1, regT0, -firstFreeRegister, firstVarArgOffset); - move(TrustedImm32(-firstFreeRegister), regT1); - emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1); - addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister); - emitLoad(arguments, regT2, regT4); - callOperation(operationSetupVarargsFrame, regT1, regT2, regT4, firstVarArgOffset, regT0); - move(returnValueGPR, regT1); - - // Profile the argument count. - load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2); - load8(info->addressOfMaxNumArguments(), regT0); - Jump notBiggest = branch32(Above, regT0, regT2); - Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255)); - move(TrustedImm32(255), regT2); - notSaturated.link(this); - store8(regT2, info->addressOfMaxNumArguments()); - notBiggest.link(this); - - // Initialize 'this'. - emitLoad(thisValue, regT2, regT0); - store32(regT0, Address(regT1, PayloadOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); - store32(regT2, Address(regT1, TagOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))))); - - addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister); + callOperation(operationSizeAndAllocFrameForVarargs, regT1, regT0, firstFreeRegister); + move(returnValueGPR, regT5); + emitLoad(thisValue, regT1, regT0); + emitLoad(arguments, regT3, regT2); + callOperation(operationLoadVarargs, regT5, regT1, regT0, regT3, regT2); + move(returnValueGPR, regT3); + + if (canOptimize) + end.link(this); } void JIT::compileCallEval(Instruction* instruction) { - addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1); - storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset())); - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - - callOperation(operationCallEval, regT1); - + callOperationWithCallFrameRollbackOnException(operationCallEval); addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag))); + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); sampleCodeBlock(m_codeBlock); @@ -166,25 +209,10 @@ void JIT::compileCallEval(Instruction* instruction) void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter) { - CallLinkInfo* info = m_codeBlock->addCallLinkInfo(); - info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0); - linkSlowCase(iter); - int registerOffset = -instruction[4].u.operand; - - addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); - - loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0); - loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT1); - move(TrustedImmPtr(info), regT2); - emitLoad(JSStack::Callee, regT1, regT0); - MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info); - info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true)); - emitNakedCall(virtualThunk.code()); - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code()); sampleCodeBlock(m_codeBlock); @@ -200,18 +228,17 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca - Initializes ArgumentCount; CallerFrame; Callee. For a JS call: + - Caller initializes ScopeChain. - Callee initializes ReturnPC; CodeBlock. - Callee restores callFrameRegister before return. For a non-JS call: - - Caller initializes ReturnPC; CodeBlock. + - Caller initializes ScopeChain; ReturnPC; CodeBlock. - Caller restores callFrameRegister after return. */ - CallLinkInfo* info; - if (opcodeID != op_call_eval) - info = m_codeBlock->addCallLinkInfo(); - if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs) - compileSetupVarargsFrame(instruction, info); + + if (opcodeID == op_call_varargs) + compileLoadVarargs(instruction); else { int argCount = instruction[3].u.operand; int registerOffset = -instruction[4].u.operand; @@ -219,46 +246,44 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca if (opcodeID == op_call && shouldEmitProfiling()) { emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1); Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag)); - loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1); - storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID()); + loadPtr(Address(regT1, JSCell::structureOffset()), regT1); + storePtr(regT1, instruction[6].u.arrayProfile->addressOfLastSeenStructure()); done.link(this); } - addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister); + addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3); - store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); - } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized. + store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3)); + } // regT3 holds newCallFrame with ArgumentCount initialized. uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction); store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister)); emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee. - store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC))); - store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC))); + storePtr(callFrameRegister, Address(regT3, CallFrame::callerFrameOffset())); + emitStore(JSStack::Callee, regT1, regT0, regT3); + move(regT3, callFrameRegister); if (opcodeID == op_call_eval) { compileCallEval(instruction); return; } - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); - DataLabelPtr addressOfLinkedFunctionCheck; Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0)); addSlowCase(slowCase); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); - ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex); - info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0); - m_callCompilationInfo.append(CallCompilationInfo()); - m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; - m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info; - - checkStackPointerAlignment(); - m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); + ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex); + m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo()); + m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck; + m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID); + m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset; - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1); + emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); + m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall(); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); @@ -274,16 +299,58 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec linkSlowCase(iter); linkSlowCase(iter); - move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2); - m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code()); - - addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister); - checkStackPointerAlignment(); + m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructThunkGenerator).code() : m_vm->getCTIStub(linkCallThunkGenerator).code()); sampleCodeBlock(m_codeBlock); emitPutCallResult(instruction); } +void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr) +{ + JumpList slowCases; + + slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); + slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure))); + slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable))); + + loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1); + emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); + + Call call = nearCall(); + Jump done = jump(); + + slowCases.link(this); + move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2); + restoreReturnAddressBeforeReturn(regT2); + Jump slow = jump(); + + LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); + + patchBuffer.link(call, FunctionPtr(codePtr.executableAddress())); + patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0)); + patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code())); + + RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline closure call stub for %s, return point %p, target %p (%s)", + toCString(*m_codeBlock).data(), + callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(), + codePtr.executableAddress(), + toCString(pointerDump(calleeCodeBlock)).data())), + *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable, + callLinkInfo->codeOrigin)); + + RepatchBuffer repatchBuffer(m_codeBlock); + + repatchBuffer.replaceWithJump( + RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin), + CodeLocationLabel(stubRoutine->code().code())); + repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code()); + + callLinkInfo->stub = stubRoutine.release(); +} + } // namespace JSC #endif // USE(JSVALUE32_64) diff --git a/Source/JavaScriptCore/jit/JITCode.cpp b/Source/JavaScriptCore/jit/JITCode.cpp index 5310b5da6..213b7000c 100644 --- a/Source/JavaScriptCore/jit/JITCode.cpp +++ b/Source/JavaScriptCore/jit/JITCode.cpp @@ -27,9 +27,7 @@ #include "JITCode.h" #include "LLIntThunks.h" -#include "JSCInlines.h" -#include "ProtoCallFrame.h" -#include "RegisterPreservationWrapperGenerator.h" +#include "Operations.h" #include <wtf/PrintStream.h> namespace JSC { @@ -43,42 +41,11 @@ JITCode::~JITCode() { } -const char* JITCode::typeName(JITType jitType) +JSValue JITCode::execute(VM* vm, ProtoCallFrame* protoCallFrame, Register* topOfStack) { - switch (jitType) { - case None: - return "None"; - case HostCallThunk: - return "Host"; - case InterpreterThunk: - return "LLInt"; - case BaselineJIT: - return "Baseline"; - case DFGJIT: - return "DFG"; - case FTLJIT: - return "FTL"; - default: - CRASH(); - return ""; - } -} - -void JITCode::validateReferences(const TrackedReferences&) -{ -} - -JSValue JITCode::execute(VM* vm, ProtoCallFrame* protoCallFrame) -{ - void* entryAddress; - JSFunction* function = jsDynamicCast<JSFunction*>(protoCallFrame->callee()); + ASSERT(!vm->topCallFrame || ((Register*)(vm->topCallFrame) >= topOfStack)); - if (!function || !protoCallFrame->needArityCheck()) { - ASSERT(!protoCallFrame->needArityCheck()); - entryAddress = executableAddress(); - } else - entryAddress = addressForCall(*vm, function->executable(), MustCheckArity, RegisterPreservationNotRequired).executableAddress(); - JSValue result = JSValue::decode(vmEntryToJavaScript(entryAddress, vm, protoCallFrame)); + JSValue result = JSValue::decode(callToJavaScript(executableAddress(), &vm->topCallFrame, protoCallFrame, topOfStack)); return vm->exception() ? jsNull() : result; } @@ -106,38 +73,52 @@ FTL::ForOSREntryJITCode* JITCode::ftlForOSREntry() return 0; } -JITCodeWithCodeRef::JITCodeWithCodeRef(JITType jitType) +PassRefPtr<JITCode> JITCode::hostFunction(JITCode::CodeRef code) +{ + return adoptRef(new DirectJITCode(code, HostCallThunk)); +} + +DirectJITCode::DirectJITCode(JITType jitType) : JITCode(jitType) { } -JITCodeWithCodeRef::JITCodeWithCodeRef(CodeRef ref, JITType jitType) +DirectJITCode::DirectJITCode(const JITCode::CodeRef ref, JITType jitType) : JITCode(jitType) , m_ref(ref) { } -JITCodeWithCodeRef::~JITCodeWithCodeRef() +DirectJITCode::~DirectJITCode() +{ +} + +void DirectJITCode::initializeCodeRef(const JITCode::CodeRef ref) { - if ((Options::showDisassembly() || (isOptimizingJIT(jitType()) && Options::showDFGDisassembly())) - && m_ref.executableMemory()) - dataLog("Destroying JIT code at ", pointerDump(m_ref.executableMemory()), "\n"); + RELEASE_ASSERT(!m_ref); + m_ref = ref; } -void* JITCodeWithCodeRef::executableAddressAtOffset(size_t offset) +JITCode::CodePtr DirectJITCode::addressForCall() +{ + RELEASE_ASSERT(m_ref); + return m_ref.code(); +} + +void* DirectJITCode::executableAddressAtOffset(size_t offset) { RELEASE_ASSERT(m_ref); return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset; } -void* JITCodeWithCodeRef::dataAddressAtOffset(size_t offset) +void* DirectJITCode::dataAddressAtOffset(size_t offset) { RELEASE_ASSERT(m_ref); ASSERT(offset <= size()); // use <= instead of < because it is valid to ask for an address at the exclusive end of the code. return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset; } -unsigned JITCodeWithCodeRef::offsetOf(void* pointerIntoCode) +unsigned DirectJITCode::offsetOf(void* pointerIntoCode) { RELEASE_ASSERT(m_ref); intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress()); @@ -145,123 +126,47 @@ unsigned JITCodeWithCodeRef::offsetOf(void* pointerIntoCode) return static_cast<unsigned>(result); } -size_t JITCodeWithCodeRef::size() +size_t DirectJITCode::size() { RELEASE_ASSERT(m_ref); return m_ref.size(); } -bool JITCodeWithCodeRef::contains(void* address) +bool DirectJITCode::contains(void* address) { RELEASE_ASSERT(m_ref); return m_ref.executableMemory()->contains(address); } -DirectJITCode::DirectJITCode(JITType jitType) - : JITCodeWithCodeRef(jitType) -{ -} - -DirectJITCode::DirectJITCode(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck, JITType jitType) - : JITCodeWithCodeRef(ref, jitType) - , m_withArityCheck(withArityCheck) -{ -} - -DirectJITCode::~DirectJITCode() -{ -} - -void DirectJITCode::initializeCodeRef(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck) -{ - RELEASE_ASSERT(!m_ref); - m_ref = ref; - m_withArityCheck = withArityCheck; -} - -DirectJITCode::RegisterPreservationWrappers* DirectJITCode::ensureWrappers() -{ - if (!m_wrappers) - m_wrappers = std::make_unique<RegisterPreservationWrappers>(); - return m_wrappers.get(); -} - -JITCode::CodePtr DirectJITCode::addressForCall( - VM& vm, ExecutableBase* executable, ArityCheckMode arity, - RegisterPreservationMode registers) -{ - switch (arity) { - case ArityCheckNotRequired: - switch (registers) { - case RegisterPreservationNotRequired: - RELEASE_ASSERT(m_ref); - return m_ref.code(); - case MustPreserveRegisters: { -#if ENABLE(JIT) - RegisterPreservationWrappers* wrappers = ensureWrappers(); - if (!wrappers->withoutArityCheck) - wrappers->withoutArityCheck = generateRegisterPreservationWrapper(vm, executable, m_ref.code()); - return wrappers->withoutArityCheck.code(); -#else - UNUSED_PARAM(vm); - UNUSED_PARAM(executable); - RELEASE_ASSERT_NOT_REACHED(); -#endif - } } - case MustCheckArity: - switch (registers) { - case RegisterPreservationNotRequired: - RELEASE_ASSERT(m_withArityCheck); - return m_withArityCheck; - case MustPreserveRegisters: { -#if ENABLE(JIT) - RegisterPreservationWrappers* wrappers = ensureWrappers(); - if (!wrappers->withArityCheck) - wrappers->withArityCheck = generateRegisterPreservationWrapper(vm, executable, m_withArityCheck); - return wrappers->withArityCheck.code(); -#else - RELEASE_ASSERT_NOT_REACHED(); -#endif - } } - } - RELEASE_ASSERT_NOT_REACHED(); - return CodePtr(); -} - -NativeJITCode::NativeJITCode(JITType jitType) - : JITCodeWithCodeRef(jitType) -{ -} - -NativeJITCode::NativeJITCode(CodeRef ref, JITType jitType) - : JITCodeWithCodeRef(ref, jitType) -{ -} - -NativeJITCode::~NativeJITCode() -{ -} - -void NativeJITCode::initializeCodeRef(CodeRef ref) -{ - ASSERT(!m_ref); - m_ref = ref; -} - -JITCode::CodePtr NativeJITCode::addressForCall( - VM&, ExecutableBase*, ArityCheckMode, RegisterPreservationMode) -{ - RELEASE_ASSERT(!!m_ref); - return m_ref.code(); -} - } // namespace JSC namespace WTF { void printInternal(PrintStream& out, JSC::JITCode::JITType type) { - out.print(JSC::JITCode::typeName(type)); + switch (type) { + case JSC::JITCode::None: + out.print("None"); + return; + case JSC::JITCode::HostCallThunk: + out.print("Host"); + return; + case JSC::JITCode::InterpreterThunk: + out.print("LLInt"); + return; + case JSC::JITCode::BaselineJIT: + out.print("Baseline"); + return; + case JSC::JITCode::DFGJIT: + out.print("DFG"); + return; + case JSC::JITCode::FTLJIT: + out.print("FTL"); + return; + default: + CRASH(); + return; + } } } // namespace WTF diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h index 1d83cb59d..52c78111a 100644 --- a/Source/JavaScriptCore/jit/JITCode.h +++ b/Source/JavaScriptCore/jit/JITCode.h @@ -26,13 +26,14 @@ #ifndef JITCode_h #define JITCode_h -#include "ArityCheckMode.h" +#if ENABLE(JIT) || ENABLE(LLINT) #include "CallFrame.h" #include "Disassembler.h" #include "JITStubs.h" #include "JSCJSValue.h" +#include "LegacyProfiler.h" #include "MacroAssemblerCodeRef.h" -#include "RegisterPreservationMode.h" +#endif namespace JSC { @@ -46,7 +47,6 @@ class JITCode; } struct ProtoCallFrame; -class TrackedReferences; class VM; class JITCode : public ThreadSafeRefCounted<JITCode> { @@ -54,17 +54,8 @@ public: typedef MacroAssemblerCodeRef CodeRef; typedef MacroAssemblerCodePtr CodePtr; - enum JITType : uint8_t { - None, - HostCallThunk, - InterpreterThunk, - BaselineJIT, - DFGJIT, - FTLJIT - }; + enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT, FTLJIT }; - static const char* typeName(JITType); - static JITType bottomTierJIT() { return BaselineJIT; @@ -173,7 +164,7 @@ public: return jitCode->jitType(); } - virtual CodePtr addressForCall(VM&, ExecutableBase*, ArityCheckMode, RegisterPreservationMode) = 0; + virtual CodePtr addressForCall() = 0; virtual void* executableAddressAtOffset(size_t offset) = 0; void* executableAddress() { return executableAddressAtOffset(0); } virtual void* dataAddressAtOffset(size_t offset) = 0; @@ -184,9 +175,7 @@ public: virtual FTL::JITCode* ftl(); virtual FTL::ForOSREntryJITCode* ftlForOSREntry(); - virtual void validateReferences(const TrackedReferences&); - - JSValue execute(VM*, ProtoCallFrame*); + JSValue execute(VM*, ProtoCallFrame*, Register*); void* start() { return dataAddressAtOffset(0); } virtual size_t size() = 0; @@ -194,60 +183,29 @@ public: virtual bool contains(void*) = 0; + static PassRefPtr<JITCode> hostFunction(CodeRef); + private: JITType m_jitType; }; -class JITCodeWithCodeRef : public JITCode { -protected: - JITCodeWithCodeRef(JITType); - JITCodeWithCodeRef(CodeRef, JITType); - +class DirectJITCode : public JITCode { public: - virtual ~JITCodeWithCodeRef(); + DirectJITCode(JITType); + DirectJITCode(const CodeRef, JITType); + virtual ~DirectJITCode(); + + void initializeCodeRef(CodeRef ref); + virtual CodePtr addressForCall() override; virtual void* executableAddressAtOffset(size_t offset) override; virtual void* dataAddressAtOffset(size_t offset) override; virtual unsigned offsetOf(void* pointerIntoCode) override; virtual size_t size() override; virtual bool contains(void*) override; -protected: - CodeRef m_ref; -}; - -class DirectJITCode : public JITCodeWithCodeRef { -public: - DirectJITCode(JITType); - DirectJITCode(CodeRef, CodePtr withArityCheck, JITType); - virtual ~DirectJITCode(); - - void initializeCodeRef(CodeRef, CodePtr withArityCheck); - - virtual CodePtr addressForCall(VM&, ExecutableBase*, ArityCheckMode, RegisterPreservationMode) override; - private: - struct RegisterPreservationWrappers { - CodeRef withoutArityCheck; - CodeRef withArityCheck; - }; - - RegisterPreservationWrappers* ensureWrappers(); - - CodePtr m_withArityCheck; - - std::unique_ptr<RegisterPreservationWrappers> m_wrappers; -}; - -class NativeJITCode : public JITCodeWithCodeRef { -public: - NativeJITCode(JITType); - NativeJITCode(CodeRef, JITType); - virtual ~NativeJITCode(); - - void initializeCodeRef(CodeRef); - - virtual CodePtr addressForCall(VM&, ExecutableBase*, ArityCheckMode, RegisterPreservationMode) override; + CodeRef m_ref; }; } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITCompilationEffort.h b/Source/JavaScriptCore/jit/JITCompilationEffort.h index 29e95426a..5eb680178 100644 --- a/Source/JavaScriptCore/jit/JITCompilationEffort.h +++ b/Source/JavaScriptCore/jit/JITCompilationEffort.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions diff --git a/Source/JavaScriptCore/jit/JITDisassembler.cpp b/Source/JavaScriptCore/jit/JITDisassembler.cpp index 04e1b4d49..2d91a6466 100644 --- a/Source/JavaScriptCore/jit/JITDisassembler.cpp +++ b/Source/JavaScriptCore/jit/JITDisassembler.cpp @@ -26,13 +26,11 @@ #include "config.h" #include "JITDisassembler.h" -#if ENABLE(JIT) +#if ENABLE(JIT) && ENABLE(DISASSEMBLER) #include "CodeBlock.h" #include "CodeBlockWithJITType.h" #include "JIT.h" -#include "JSCInlines.h" -#include "LinkBuffer.h" #include <wtf/StringPrintStream.h> namespace JSC { @@ -166,5 +164,5 @@ void JITDisassembler::dumpDisassembly(PrintStream& out, LinkBuffer& linkBuffer, } // namespace JSC -#endif // ENABLE(JIT) +#endif // ENABLE(JIT) && ENABLE(DISASSEMBLER) diff --git a/Source/JavaScriptCore/jit/JITDisassembler.h b/Source/JavaScriptCore/jit/JITDisassembler.h index 6655de893..7ea13f47d 100644 --- a/Source/JavaScriptCore/jit/JITDisassembler.h +++ b/Source/JavaScriptCore/jit/JITDisassembler.h @@ -26,20 +26,20 @@ #ifndef JITDisassembler_h #define JITDisassembler_h +#include <wtf/Platform.h> + #if ENABLE(JIT) +#include "LinkBuffer.h" #include "MacroAssembler.h" +#include "ProfilerDatabase.h" #include <wtf/Vector.h> -#include <wtf/text/CString.h> namespace JSC { class CodeBlock; -class LinkBuffer; -namespace Profiler { -class Compilation; -} +#if ENABLE(DISASSEMBLER) class JITDisassembler { WTF_MAKE_FAST_ALLOCATED; @@ -86,6 +86,25 @@ private: MacroAssembler::Label m_endOfCode; }; +#else // ENABLE(DISASSEMBLER) + +class JITDisassembler { + WTF_MAKE_FAST_ALLOCATED; +public: + JITDisassembler(CodeBlock*) { } + + void setStartOfCode(MacroAssembler::Label) { } + void setForBytecodeMainPath(unsigned, MacroAssembler::Label) { } + void setForBytecodeSlowPath(unsigned, MacroAssembler::Label) { } + void setEndOfSlowPath(MacroAssembler::Label) { } + void setEndOfCode(MacroAssembler::Label) { } + + void dump(LinkBuffer&) { } + void reportToProfiler(Profiler::Compilation*, LinkBuffer&) { } +}; + +#endif // ENABLE(DISASSEMBLER) + } // namespace JSC #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp index 4f6d9e8f0..8084f773b 100644 --- a/Source/JavaScriptCore/jit/JITExceptions.cpp +++ b/Source/JavaScriptCore/jit/JITExceptions.cpp @@ -27,6 +27,7 @@ #include "JITExceptions.h" #include "CallFrame.h" +#include "CallFrameInlines.h" #include "CodeBlock.h" #include "Interpreter.h" #include "JITStubs.h" @@ -35,22 +36,15 @@ #include "LLIntOpcode.h" #include "LLIntThunks.h" #include "Opcode.h" -#include "JSCInlines.h" +#include "Operations.h" #include "VM.h" namespace JSC { -void genericUnwind(VM* vm, ExecState* callFrame) +void genericUnwind(VM* vm, ExecState* callFrame, JSValue exceptionValue) { - if (Options::breakOnThrow()) { - dataLog("In call frame ", RawPointer(callFrame), " for code block ", *callFrame->codeBlock(), "\n"); - CRASH(); - } - - Exception* exception = vm->exception(); - RELEASE_ASSERT(exception); - VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame; - HandlerInfo* handler = vm->interpreter->unwind(vmEntryFrame, callFrame, exception); // This may update vmEntryFrame and callFrame. + RELEASE_ASSERT(exceptionValue); + HandlerInfo* handler = vm->interpreter->unwind(callFrame, exceptionValue); // This may update callFrame. void* catchRoutine; Instruction* catchPCForInterpreter = 0; @@ -62,9 +56,8 @@ void genericUnwind(VM* vm, ExecState* callFrame) catchRoutine = catchPCForInterpreter->u.pointer; #endif } else - catchRoutine = LLInt::getCodePtr(handleUncaughtException); + catchRoutine = LLInt::getCodePtr(returnFromJavaScript); - vm->vmEntryFrameForThrow = vmEntryFrame; vm->callFrameForThrow = callFrame; vm->targetMachinePCForThrow = catchRoutine; vm->targetInterpreterPCForThrow = catchPCForInterpreter; diff --git a/Source/JavaScriptCore/jit/JITExceptions.h b/Source/JavaScriptCore/jit/JITExceptions.h index 43b92e7fb..376e269f1 100644 --- a/Source/JavaScriptCore/jit/JITExceptions.h +++ b/Source/JavaScriptCore/jit/JITExceptions.h @@ -28,14 +28,18 @@ #include "JSCJSValue.h" +#if ENABLE(JIT) || ENABLE(LLINT) + namespace JSC { class ExecState; class VM; -void genericUnwind(VM*, ExecState*); +void genericUnwind(VM*, ExecState*, JSValue exceptionValue); } // namespace JSC +#endif // ENABLE(JIT) || ENABLE(LLINT) + #endif // JITExceptions_h diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp index b5c68086d..74b086a7c 100644 --- a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp +++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp @@ -30,7 +30,7 @@ #include "CodeBlock.h" #include "LinkBuffer.h" -#include "JSCInlines.h" +#include "Operations.h" namespace JSC { @@ -49,12 +49,12 @@ JITInlineCacheGenerator::JITInlineCacheGenerator(CodeBlock* codeBlock, CodeOrigi JITByIdGenerator::JITByIdGenerator( CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters, - JSValueRegs base, JSValueRegs value, SpillRegistersMode spillMode) + GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value, bool registersFlushed) : JITInlineCacheGenerator(codeBlock, codeOrigin) , m_base(base) , m_value(value) { - m_stubInfo->patch.spillMode = spillMode; + m_stubInfo->patch.registersFlushed = registersFlushed; m_stubInfo->patch.usedRegisters = usedRegisters; // This is a convenience - in cases where the only registers you're using are base/value, @@ -62,6 +62,7 @@ JITByIdGenerator::JITByIdGenerator( m_stubInfo->patch.usedRegisters.set(base); m_stubInfo->patch.usedRegisters.set(value); + m_stubInfo->patch.callFrameRegister = static_cast<int8_t>(callFrameRegister); m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR()); m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR()); #if USE(JSVALUE32_64) @@ -101,23 +102,15 @@ void JITByIdGenerator::finalize(LinkBuffer& linkBuffer) void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit, GPRReg butterfly) { - m_structureCheck = jit.patchableBranch32WithPatch( + m_structureCheck = jit.patchableBranchPtrWithPatch( MacroAssembler::NotEqual, - MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureIDOffset()), - m_structureImm, MacroAssembler::TrustedImm32(0)); + MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureOffset()), + m_structureImm, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer))); m_propertyStorageLoad = jit.convertibleLoadPtr( MacroAssembler::Address(m_base.payloadGPR(), JSObject::butterflyOffset()), butterfly); } -JITGetByIdGenerator::JITGetByIdGenerator( - CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters, - JSValueRegs base, JSValueRegs value, SpillRegistersMode spillMode) - : JITByIdGenerator(codeBlock, codeOrigin, usedRegisters, base, value, spillMode) -{ - RELEASE_ASSERT(base.payloadGPR() != value.tagGPR()); -} - void JITGetByIdGenerator::generateFastPath(MacroAssembler& jit) { generateFastPathChecks(jit, m_value.payloadGPR()); @@ -137,9 +130,11 @@ void JITGetByIdGenerator::generateFastPath(MacroAssembler& jit) JITPutByIdGenerator::JITPutByIdGenerator( CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters, - JSValueRegs base, JSValueRegs value, GPRReg scratch, SpillRegistersMode spillMode, - ECMAMode ecmaMode, PutKind putKind) - : JITByIdGenerator(codeBlock, codeOrigin, usedRegisters, base, value, spillMode) + GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value, GPRReg scratch, + bool registersFlushed, ECMAMode ecmaMode, PutKind putKind) + : JITByIdGenerator( + codeBlock, codeOrigin, usedRegisters, callFrameRegister, base, value, + registersFlushed) , m_scratch(scratch) , m_ecmaMode(ecmaMode) , m_putKind(putKind) diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h index 12f7534f6..6ff0c09b5 100644 --- a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h +++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h @@ -56,8 +56,8 @@ protected: JITByIdGenerator() { } JITByIdGenerator( - CodeBlock*, CodeOrigin, const RegisterSet&, JSValueRegs base, JSValueRegs value, - SpillRegistersMode spillMode); + CodeBlock*, CodeOrigin, const RegisterSet&, GPRReg callFrameRegister, + JSValueRegs base, JSValueRegs value, bool registersFlushed); public: void reportSlowPathCall(MacroAssembler::Label slowPathBegin, MacroAssembler::Call call) @@ -78,7 +78,7 @@ protected: JSValueRegs m_base; JSValueRegs m_value; - MacroAssembler::DataLabel32 m_structureImm; + MacroAssembler::DataLabelPtr m_structureImm; MacroAssembler::PatchableJump m_structureCheck; MacroAssembler::ConvertibleLoadLabel m_propertyStorageLoad; AssemblerLabel m_loadOrStore; @@ -95,8 +95,14 @@ public: JITGetByIdGenerator() { } JITGetByIdGenerator( - CodeBlock*, CodeOrigin, const RegisterSet& usedRegisters, JSValueRegs base, - JSValueRegs value, SpillRegistersMode spillMode); + CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters, + GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value, + bool registersFlushed) + : JITByIdGenerator( + codeBlock, codeOrigin, usedRegisters, callFrameRegister, base, value, + registersFlushed) + { + } void generateFastPath(MacroAssembler&); }; @@ -106,8 +112,9 @@ public: JITPutByIdGenerator() { } JITPutByIdGenerator( - CodeBlock*, CodeOrigin, const RegisterSet& usedRegisters, JSValueRegs base, - JSValueRegs, GPRReg scratch, SpillRegistersMode spillMode, ECMAMode, PutKind); + CodeBlock*, CodeOrigin, const RegisterSet& usedRegisters, GPRReg callFrameRegister, + JSValueRegs base, JSValueRegs value, GPRReg scratch, bool registersFlushed, + ECMAMode, PutKind); void generateFastPath(MacroAssembler&); diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h index 5d6869008..9330e773e 100644 --- a/Source/JavaScriptCore/jit/JITInlines.h +++ b/Source/JavaScriptCore/jit/JITInlines.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012, 2013, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,57 +26,13 @@ #ifndef JITInlines_h #define JITInlines_h + #if ENABLE(JIT) -#include "JSCInlines.h" +#include "CallFrameInlines.h" namespace JSC { -#if USE(JSVALUE64) -inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType) -{ - JumpList slowCases = emitDoubleLoad(instruction, badType); - moveDoubleTo64(fpRegT0, regT0); - sub64(tagTypeNumberRegister, regT0); - return slowCases; -} -#else -inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType) -{ - JumpList slowCases = emitDoubleLoad(instruction, badType); - moveDoubleToInts(fpRegT0, regT0, regT1); - return slowCases; -} -#endif // USE(JSVALUE64) - -ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType) -{ - switch (arrayMode) { - case JITInt32: - return emitInt32Load(currentInstruction, badType); - case JITDouble: - return emitDoubleLoad(currentInstruction, badType); - case JITContiguous: - return emitContiguousLoad(currentInstruction, badType); - case JITArrayStorage: - return emitArrayStorageLoad(currentInstruction, badType); - default: - break; - } - RELEASE_ASSERT_NOT_REACHED(); - return MacroAssembler::JumpList(); -} - -inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(Instruction* instruction, PatchableJump& badType, IndexingType expectedShape) -{ - return emitContiguousLoad(instruction, badType, expectedShape); -} - -inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(Instruction* instruction, PatchableJump& badType) -{ - return emitArrayStorageLoad(instruction, badType); -} - ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(int src) { return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble(); @@ -98,9 +54,26 @@ ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::Ca #endif } +ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) +{ + loadPtr(Address(from, entry * sizeof(Register)), to); +} + +ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) +{ + load32(Address(from, entry * sizeof(Register)), to); +} + +#if USE(JSVALUE64) +ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) +{ + load64(Address(from, entry * sizeof(Register)), to); +} +#endif + ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures) { - failures.append(branchStructure(NotEqual, Address(src, JSCell::structureIDOffset()), m_vm->stringStructure.get())); + failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1))); loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst); failures.append(branchTest32(Zero, dst)); @@ -119,7 +92,8 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function) { - ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + Call nakedCall = nearCall(); m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress())); return nakedCall; @@ -146,16 +120,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheck(const Funct return call; } -#if OS(WINDOWS) && CPU(X86_64) -ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr& function) -{ - updateTopCallFrame(); - MacroAssembler::Call call = appendCallWithSlowPathReturnType(function); - exceptionCheck(); - return call; -} -#endif - ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithCallFrameRollbackOnException(const FunctionPtr& function) { updateTopCallFrame(); // The callee is responsible for setting topCallFrame to their caller @@ -193,30 +157,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_E operation return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJsc operation, GPRReg arg1) -{ - setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJscZ operation, GPRReg arg1, int32_t arg2) -{ - setupArgumentsWithExecState(arg1, TrustedImm32(arg2)); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, GPRReg arg1) -{ - setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, TrustedImmPtr arg1) -{ - setupArgumentsWithExecState(arg1); - return appendCallWithExceptionCheck(operation); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EO operation, GPRReg arg) { setupArgumentsWithExecState(arg); @@ -265,12 +205,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EC operatio return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscC operation, int dst, GPRReg arg1, JSCell* cell) -{ - setupArgumentsWithExecState(arg1, TrustedImmPtr(cell)); - return appendCallWithExceptionCheckSetJSValueResult(operation, dst); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EP operation, int dst, void* pointer) { setupArgumentsWithExecState(TrustedImmPtr(pointer)); @@ -283,22 +217,16 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(WithProfileTag, J_JITOpera return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EPc operation, int dst, Instruction* bytecodePC) -{ - setupArgumentsWithExecState(TrustedImmPtr(bytecodePC)); - return appendCallWithExceptionCheckSetJSValueResult(operation, dst); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZ operation, int dst, int32_t arg) { setupArgumentsWithExecState(TrustedImm32(arg)); return appendCallWithExceptionCheckSetJSValueResult(operation, dst); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZZ operation, int dst, int32_t arg1, int32_t arg2) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EZ operation, int32_t op) { - setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImm32(arg2)); - return appendCallWithExceptionCheckSetJSValueResult(operation, dst); + setupArgumentsWithExecState(TrustedImm32(op)); + return appendCallWithExceptionCheck(operation); } ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_ECC operation, RegisterID regOp1, RegisterID regOp2) @@ -313,17 +241,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EOJss opera return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Sprt_JITOperation_EZ operation, int32_t op) -{ -#if OS(WINDOWS) && CPU(X86_64) - setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32(op)); - return appendCallWithExceptionCheckAndSlowPathReturnType(operation); -#else - setupArgumentsWithExecState(TrustedImm32(op)); - return appendCallWithExceptionCheck(operation); -#endif -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_E operation) { setupArgumentsExecState(); @@ -342,13 +259,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECC operati return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EE operation, RegisterID regOp) -{ - setupArgumentsWithExecState(regOp); - updateTopCallFrame(); - return appendCallWithExceptionCheck(operation); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EPc operation, Instruction* bytecodePC) { setupArgumentsWithExecState(TrustedImmPtr(bytecodePC)); @@ -381,19 +291,19 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnExce #if USE(JSVALUE64) -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1, int32_t arg2, int32_t arg3) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1, int32_t arg3) { - setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3)); + setupArgumentsWithExecState(arg1, TrustedImm32(arg3)); return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2, int32_t arg3, GPRReg arg4) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3) { - setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4); + setupArgumentsWithExecState(arg1, arg2, arg3); return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, UniquedStringImpl* uid) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, StringImpl* uid) { setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1, regOp2, TrustedImmPtr(uid)); return appendCallWithExceptionCheck(operation); @@ -405,25 +315,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operat return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ArrayProfile* arrayProfile) -{ - setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(arrayProfile)); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ByValInfo* byValInfo) -{ - setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(byValInfo)); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, GPRReg arg) -{ - setupArgumentsWithExecState(TrustedImm32(dst), arg); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, StringImpl* uid) { setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid)); return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst); @@ -459,18 +351,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operati return appendCallWithExceptionCheckSetJSValueResult(operation, dst); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1, GPRReg arg2, ArrayProfile* arrayProfile) -{ - setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arrayProfile)); - return appendCallWithExceptionCheckSetJSValueResult(operation, dst); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1, GPRReg arg2, ByValInfo* byValInfo) -{ - setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(byValInfo)); - return appendCallWithExceptionCheckSetJSValueResult(operation, dst); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1) { setupArgumentsWithExecState(arg1); @@ -496,15 +376,9 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operati return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2, int32_t op3) { - setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3) -{ - setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3); + setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2, TrustedImm32(op3)); return appendCallWithExceptionCheck(operation); } @@ -514,12 +388,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJIdJ operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3) -{ - setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3); - return appendCallWithExceptionCheck(operation); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJIdJJ operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4) { setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4); @@ -564,15 +432,20 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOpera return appendCall(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2, int32_t arg3) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2) { - setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3)); +#if CPU(SH4) + // We have to put arg3 in the 4th argument register (r7) as 64-bit value arg2 will be put on stack for sh4 architecure. + setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImm32(arg2)); +#else + setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2)); +#endif return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, int32_t arg3, GPRReg arg4) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload) { - setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4); + setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag); return appendCallWithExceptionCheck(operation); } @@ -588,7 +461,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operatio return appendCallWithExceptionCheckSetJSValueResult(operation, dst); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, StringImpl* uid) { setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid)); return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst); @@ -606,18 +479,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operati return appendCallWithExceptionCheckSetJSValueResult(operation, dst); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ArrayProfile* arrayProfile) -{ - setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag, TrustedImmPtr(arrayProfile)); - return appendCallWithExceptionCheckSetJSValueResult(operation, dst); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ByValInfo* byValInfo) -{ - setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag, TrustedImmPtr(byValInfo)); - return appendCallWithExceptionCheckSetJSValueResult(operation, dst); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload) { setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag); @@ -642,12 +503,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operati return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIC operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3) -{ - setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3); - return appendCallWithExceptionCheck(operation); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECICC operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4) { setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4); @@ -660,13 +515,13 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int32_t op1, SymbolTable* symbolTable, RegisterID regOp3Tag, RegisterID regOp3Payload) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2Tag, RegisterID regOp2Payload, int32_t op3) { - setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag); + setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2Payload, regOp2Tag, TrustedImm32(op3)); return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, UniquedStringImpl* uid) +ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, StringImpl* uid) { setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, TrustedImmPtr(uid)); return appendCallWithExceptionCheck(operation); @@ -678,24 +533,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operat return appendCallWithExceptionCheck(operation); } -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ArrayProfile* arrayProfile) -{ - setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(arrayProfile)); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ByValInfo* byValInfo) -{ - setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(byValInfo)); - return appendCallWithExceptionCheck(operation); -} - -ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, RegisterID regOp1Tag, RegisterID regOp1Payload) -{ - setupArgumentsWithExecState(TrustedImm32(dst), regOp1Payload, regOp1Tag); - return appendCallWithExceptionCheck(operation); -} - ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, int32_t op2) { setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2)); @@ -715,7 +552,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operat ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure) { - return branchStructure(NotEqual, Address(reg, JSCell::structureIDOffset()), structure); + return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure)); } ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg) @@ -726,14 +563,14 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& ALWAYS_INLINE void JIT::addSlowCase(Jump jump) { - ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset)); } ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList) { - ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. const JumpList::JumpVector& jumpVector = jumpList.jumps(); size_t size = jumpVector.size(); @@ -743,7 +580,7 @@ ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList) ALWAYS_INLINE void JIT::addSlowCase() { - ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. Jump emptyJump; // Doing it this way to make Windows happy. m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset)); @@ -751,26 +588,21 @@ ALWAYS_INLINE void JIT::addSlowCase() ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset) { - ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset)); } ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset) { - ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this); } -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellObject(RegisterID cellReg) -{ - return branch8(AboveOrEqual, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); -} - -ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellNotObject(RegisterID cellReg) +ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg) { - return branch8(Below, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); + return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)); } #if ENABLE(SAMPLING_FLAGS) @@ -841,11 +673,11 @@ inline void JIT::emitAllocateJSObject(RegisterID allocator, StructureType struct loadPtr(Address(result), scratch); storePtr(scratch, Address(allocator, MarkedAllocator::offsetOfFreeListHead())); + // initialize the object's structure + storePtr(structure, Address(result, JSCell::structureOffset())); + // initialize the object's property storage pointer storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset())); - - // initialize the object's structure - emitStoreStructureWithTypeInfo(structure, result, scratch); } inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile) @@ -881,19 +713,22 @@ inline void JIT::emitValueProfilingSite() emitValueProfilingSite(m_bytecodeOffset); } -inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile) +inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile) { - if (shouldEmitProfiling()) { - load32(MacroAssembler::Address(cell, JSCell::structureIDOffset()), indexingType); - store32(indexingType, arrayProfile->addressOfLastSeenStructureID()); - } + UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now. + + RegisterID structure = structureAndIndexingType; + RegisterID indexingType = structureAndIndexingType; + + if (shouldEmitProfiling()) + storePtr(structure, arrayProfile->addressOfLastSeenStructure()); - load8(Address(cell, JSCell::indexingTypeOffset()), indexingType); + load8(Address(structure, Structure::indexingTypeOffset()), indexingType); } -inline void JIT::emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex) +inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex) { - emitArrayProfilingSiteWithCell(cell, indexingType, m_codeBlock->getOrAddArrayProfile(bytecodeIndex)); + emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex)); } inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile) @@ -984,7 +819,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value) { if (m_codeBlock->isConstantRegisterIndex(index)) { WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index); - loadDouble(TrustedImmPtr(&inConstantPool), value); + loadDouble(&inConstantPool, value); } else loadDouble(addressFor(index), value); } @@ -1096,7 +931,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op // get arg puts an arg from the SF register array into a h/w register ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst) { - ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. + ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set. // TODO: we want to reuse values that are already in registers if we can - add a register allocator! if (m_codeBlock->isConstantRegisterIndex(src)) { @@ -1184,7 +1019,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value) { if (m_codeBlock->isConstantRegisterIndex(index)) { WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index); - loadDouble(TrustedImmPtr(&inConstantPool), value); + loadDouble(&inConstantPool, value); } else loadDouble(addressFor(index), value); } @@ -1208,11 +1043,6 @@ ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg) return branch64(Below, reg, tagTypeNumberRegister); } -ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotImmediateInteger(RegisterID reg) -{ - return patchableBranch64(Below, reg, tagTypeNumberRegister); -} - ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch) { move(reg1, scratch); @@ -1247,26 +1077,6 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg) #endif // USE(JSVALUE32_64) -template <typename T> -JIT::Jump JIT::branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure) -{ -#if USE(JSVALUE64) - return branch32(condition, leftHandSide, TrustedImm32(structure->id())); -#else - return branchPtr(condition, leftHandSide, TrustedImmPtr(structure)); -#endif -} - -template <typename T> -MacroAssembler::Jump branchStructure(MacroAssembler& jit, MacroAssembler::RelationalCondition condition, T leftHandSide, Structure* structure) -{ -#if USE(JSVALUE64) - return jit.branch32(condition, leftHandSide, MacroAssembler::TrustedImm32(structure->id())); -#else - return jit.branchPtr(condition, leftHandSide, MacroAssembler::TrustedImmPtr(structure)); -#endif -} - } // namespace JSC #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 84cbed519..2bdae1914 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2012-2015 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com> * * Redistribution and use in source and binary forms, with or without @@ -28,24 +28,18 @@ #if ENABLE(JIT) #include "JIT.h" -#include "BasicBlockLocation.h" +#include "Arguments.h" #include "CopiedSpaceInlines.h" #include "Debugger.h" -#include "Exception.h" #include "Heap.h" #include "JITInlines.h" #include "JSArray.h" #include "JSCell.h" #include "JSFunction.h" -#include "JSPropertyNameEnumerator.h" +#include "JSPropertyNameIterator.h" #include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "RepatchBuffer.h" #include "SlowPathCall.h" -#include "TypeLocation.h" -#include "TypeProfilerLog.h" #include "VirtualRegister.h" -#include "Watchdog.h" namespace JSC { @@ -65,12 +59,21 @@ void JIT::emit_op_mov(Instruction* currentInstruction) emitPutVirtualRegister(dst); } +void JIT::emit_op_captured_mov(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + + emitGetVirtualRegister(src, regT0); + emitNotifyWrite(regT0, regT1, currentInstruction[3].u.watchpointSet); + emitPutVirtualRegister(dst); +} void JIT::emit_op_end(Instruction* currentInstruction) { RELEASE_ASSERT(returnValueGPR != callFrameRegister); emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); - emitFunctionEpilogue(); + restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset())); ret(); } @@ -114,7 +117,8 @@ void JIT::emit_op_check_has_instance(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(regT0, baseVal); // Check that baseVal 'ImplementsHasInstance'. - addSlowCase(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); + loadPtr(Address(regT0, JSCell::structureOffset()), regT0); + addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); } void JIT::emit_op_instanceof(Instruction* currentInstruction) @@ -133,7 +137,8 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(regT1, proto); // Check that prototype is an object - addSlowCase(emitJumpIfCellNotObject(regT1)); + loadPtr(Address(regT1, JSCell::structureOffset()), regT3); + addSlowCase(emitJumpIfNotObject(regT3)); // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. @@ -143,7 +148,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) // Load the prototype of the object in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - emitLoadStructure(regT2, regT2, regT3); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); load64(Address(regT2, Structure::prototypeOffset()), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); emitJumpIfJSCell(regT2).linkTo(loop, this); @@ -168,12 +173,12 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction) Jump done = jump(); isCell.link(this); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump notMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - emitLoadStructure(regT0, regT1, regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); comparePtr(Equal, regT0, regT1, regT0); @@ -215,7 +220,8 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitGetVirtualRegister(value, regT0); Jump isNotCell = emitJumpIfNotJSCell(regT0); - compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); emitTagAsBoolImmediate(regT0); Jump done = jump(); @@ -226,26 +232,48 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitPutVirtualRegister(dst); } -void JIT::emit_op_is_object(Instruction* currentInstruction) +void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; - int value = currentInstruction[2].u.operand; + int activation = currentInstruction[1].u.operand; + Jump activationNotCreated = branchTest64(Zero, addressFor(activation)); + emitGetVirtualRegister(activation, regT0); + callOperation(operationTearOffActivation, regT0); + activationNotCreated.link(this); +} - emitGetVirtualRegister(value, regT0); - Jump isNotCell = emitJumpIfNotJSCell(regT0); +void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) +{ + int arguments = currentInstruction[1].u.operand; + int activation = currentInstruction[2].u.operand; - compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); - emitTagAsBoolImmediate(regT0); - Jump done = jump(); + Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset()))); + emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0); + emitGetVirtualRegister(activation, regT1); + callOperation(operationTearOffArguments, regT0, regT1); + argsNotCreated.link(this); +} - isNotCell.link(this); - move(TrustedImm32(ValueFalse), regT0); +void JIT::emit_op_ret(Instruction* currentInstruction) +{ + ASSERT(callFrameRegister != regT1); + ASSERT(regT1 != returnValueGPR); + ASSERT(returnValueGPR != callFrameRegister); - done.link(this); - emitPutVirtualRegister(dst); + // Return the result in %eax. + emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); + + // Grab the return address. + emitGetReturnPCFromCallFrameHeaderPtr(regT1); + + // Restore our caller's "r". + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); } -void JIT::emit_op_ret(Instruction* currentInstruction) +void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction) { ASSERT(callFrameRegister != regT1); ASSERT(regT1 != returnValueGPR); @@ -253,9 +281,33 @@ void JIT::emit_op_ret(Instruction* currentInstruction) // Return the result in %eax. emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR); + Jump notJSCell = emitJumpIfNotJSCell(returnValueGPR); + loadPtr(Address(returnValueGPR, JSCell::structureOffset()), regT2); + Jump notObject = emitJumpIfNotObject(regT2); + + // Grab the return address. + emitGetReturnPCFromCallFrameHeaderPtr(regT1); - checkStackPointerAlignment(); - emitFunctionEpilogue(); + // Restore our caller's "r". + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); + ret(); + + // Return 'this' in %eax. + notJSCell.link(this); + notObject.link(this); + emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueGPR); + + // Grab the return address. + emitGetReturnPCFromCallFrameHeaderPtr(regT1); + + // Restore our caller's "r". + emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister); + + // Return. + restoreReturnAddressBeforeReturn(regT1); ret(); } @@ -267,7 +319,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) emitGetVirtualRegister(src, regT0); Jump isImm = emitJumpIfNotJSCell(regT0); - addSlowCase(emitJumpIfCellObject(regT0)); + addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); isImm.link(this); if (dst != src) @@ -318,8 +370,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); - emitLoadStructure(regT0, regT2, regT1); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump masqueradesGlobalObjectIsForeign = jump(); @@ -341,8 +393,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) Jump isImmediate = emitJumpIfNotJSCell(regT0); // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. - addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); - emitLoadStructure(regT0, regT2, regT1); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump wasNotImmediate = jump(); @@ -424,10 +476,117 @@ void JIT::emit_op_throw(Instruction* currentInstruction) jumpToExceptionHandler(); } +void JIT::emit_op_get_pnames(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int breakTarget = currentInstruction[5].u.operand; + + JumpList isNotObject; + + emitGetVirtualRegister(base, regT0); + if (!m_codeBlock->isKnownNotImmediate(base)) + isNotObject.append(emitJumpIfNotJSCell(regT0)); + if (base != m_codeBlock->thisRegister().offset() || m_codeBlock->isStrictMode()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + isNotObject.append(emitJumpIfNotObject(regT2)); + } + + // We could inline the case where you have a valid cache, but + // this call doesn't seem to be hot. + Label isObject(this); + callOperation(operationGetPNames, regT0); + emitStoreCell(dst, returnValueGPR); + load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); + store64(tagTypeNumberRegister, addressFor(i)); + store32(TrustedImm32(Int32Tag), intTagFor(size)); + store32(regT3, intPayloadFor(size)); + Jump end = jump(); + + isNotObject.link(this); + move(regT0, regT1); + and32(TrustedImm32(~TagBitUndefined), regT1); + addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget); + callOperation(operationToObject, base, regT0); + jump().linkTo(isObject, this); + + end.link(this); +} + +void JIT::emit_op_next_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int it = currentInstruction[5].u.operand; + int target = currentInstruction[6].u.operand; + + JumpList callHasProperty; + + Label begin(this); + load32(intPayloadFor(i), regT0); + Jump end = branch32(Equal, regT0, intPayloadFor(size)); + + // Grab key @ i + loadPtr(addressFor(it), regT1); + loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); + + load64(BaseIndex(regT2, regT0, TimesEight), regT2); + + emitPutVirtualRegister(dst, regT2); + + // Increment i + add32(TrustedImm32(1), regT0); + store32(regT0, intPayloadFor(i)); + + // Verify that i is valid: + emitGetVirtualRegister(base, regT0); + + // Test base's structure + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); + + // Test base's prototype chain + loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); + addJump(branchTestPtr(Zero, Address(regT3)), target); + + Label checkPrototype(this); + load64(Address(regT2, Structure::prototypeOffset()), regT2); + callHasProperty.append(emitJumpIfNotJSCell(regT2)); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); + addPtr(TrustedImm32(sizeof(Structure*)), regT3); + branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); + + // Continue loop. + addJump(jump(), target); + + // Slow case: Ask the object if i is valid. + callHasProperty.link(this); + emitGetVirtualRegister(dst, regT1); + callOperation(operationHasProperty, regT0, regT1); + + // Test for valid key. + addJump(branchTest32(NonZero, regT0), target); + jump().linkTo(begin, this); + + // End of loop. + end.link(this); +} + void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope); - slowPathCall.call(); + emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); + callOperation(operationPushWithScope, regT0); +} + +void JIT::emit_op_pop_scope(Instruction*) +{ + callOperation(operationPopScope); } void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type) @@ -481,51 +640,19 @@ void JIT::emit_op_to_number(Instruction* currentInstruction) emitPutVirtualRegister(currentInstruction[1].u.operand); } -void JIT::emit_op_to_string(Instruction* currentInstruction) +void JIT::emit_op_push_name_scope(Instruction* currentInstruction) { - int srcVReg = currentInstruction[2].u.operand; - emitGetVirtualRegister(srcVReg, regT0); - - addSlowCase(emitJumpIfNotJSCell(regT0)); - addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); - - emitPutVirtualRegister(currentInstruction[1].u.operand); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand); } void JIT::emit_op_catch(Instruction* currentInstruction) { - // Gotta restore the tag registers. We could be throwing from FTL, which may - // clobber them. - move(TrustedImm64(TagTypeNumber), tagTypeNumberRegister); - move(TrustedImm64(TagMask), tagMaskRegister); - move(TrustedImmPtr(m_vm), regT3); load64(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister); - load64(Address(regT3, VM::vmEntryFrameForThrowOffset()), regT0); - store64(regT0, Address(regT3, VM::topVMEntryFrameOffset())); - - addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); - load64(Address(regT3, VM::exceptionOffset()), regT0); store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset())); emitPutVirtualRegister(currentInstruction[1].u.operand); - - load64(Address(regT0, Exception::valueOffset()), regT0); - emitPutVirtualRegister(currentInstruction[2].u.operand); -} - -void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment); - slowPathCall.call(); -} - -void JIT::emit_op_get_parent_scope(Instruction* currentInstruction) -{ - int currentScope = currentInstruction[2].u.operand; - emitGetVirtualRegister(currentScope, regT0); - loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); - emitStoreCell(currentInstruction[1].u.operand, regT0); } void JIT::emit_op_switch_imm(Instruction* currentInstruction) @@ -537,7 +664,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); - jumpTable->ensureCTITable(); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); emitGetVirtualRegister(scrutinee, regT0); callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex); @@ -553,7 +680,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); - jumpTable->ensureCTITable(); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); emitGetVirtualRegister(scrutinee, regT0); callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex); @@ -597,12 +724,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); comparePtr(Equal, regT0, regT2, regT0); @@ -629,12 +756,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitGetVirtualRegister(src1, regT0); Jump isImmediate = emitJumpIfNotJSCell(regT0); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(1), regT0); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - emitLoadStructure(regT0, regT2, regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); comparePtr(NotEqual, regT0, regT2, regT0); @@ -652,8 +779,10 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitPutVirtualRegister(dst); } -void JIT::emit_op_enter(Instruction*) +void JIT::emit_op_enter(Instruction* currentInstruction) { + emitEnterOptimizationCheck(); + // Even though CTI doesn't use them, we initialize our constant // registers to zap stale pointers, to avoid unnecessarily prolonging // object lifetime and increasing GC pressure. @@ -661,17 +790,38 @@ void JIT::emit_op_enter(Instruction*) for (size_t j = 0; j < count; ++j) emitInitRegister(virtualRegisterForLocal(j).offset()); - emitWriteBarrier(m_codeBlock->ownerExecutable()); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter); + slowPathCall.call(); +} - emitEnterOptimizationCheck(); +void JIT::emit_op_create_activation(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + + Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); + callOperation(operationCreateActivation, 0); + emitStoreCell(dst, returnValueGPR); + activationCreated.link(this); } -void JIT::emit_op_get_scope(Instruction* currentInstruction) +void JIT::emit_op_create_arguments(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); - loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); - emitStoreCell(dst, regT0); + + Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst)); + + callOperation(operationCreateArguments); + emitStoreCell(dst, returnValueGPR); + emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)), returnValueGPR); + + argsCreated.link(this); +} + +void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + + store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst)); } void JIT::emit_op_to_this(Instruction* currentInstruction) @@ -680,66 +830,60 @@ void JIT::emit_op_to_this(Instruction* currentInstruction) emitGetVirtualRegister(currentInstruction[1].u.operand, regT1); emitJumpSlowCaseIfNotJSCell(regT1); + loadPtr(Address(regT1, JSCell::structureOffset()), regT0); - addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); + addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); loadPtr(cachedStructure, regT2); - addSlowCase(branchTestPtr(Zero, regT2)); - load32(Address(regT2, Structure::structureIDOffset()), regT2); - addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2)); + addSlowCase(branchPtr(NotEqual, regT0, regT2)); +} + +void JIT::emit_op_get_callee(Instruction* currentInstruction) +{ + int result = currentInstruction[1].u.operand; + WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[2].u.jsCell; + emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); + + loadPtr(cachedFunction, regT2); + addSlowCase(branchPtr(NotEqual, regT0, regT2)); + + emitPutVirtualRegister(result); +} + +void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee); + slowPathCall.call(); } void JIT::emit_op_create_this(Instruction* currentInstruction) { int callee = currentInstruction[2].u.operand; - WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[4].u.jsCell; RegisterID calleeReg = regT0; - RegisterID rareDataReg = regT4; RegisterID resultReg = regT0; RegisterID allocatorReg = regT1; RegisterID structureReg = regT2; - RegisterID cachedFunctionReg = regT4; RegisterID scratchReg = regT3; emitGetVirtualRegister(callee, calleeReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); - addSlowCase(branchTestPtr(Zero, rareDataReg)); - loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); - loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); addSlowCase(branchTestPtr(Zero, allocatorReg)); - loadPtr(cachedFunction, cachedFunctionReg); - Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); - addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); - hasSeenMultipleCallees.link(this); - emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); emitPutVirtualRegister(currentInstruction[1].u.operand); } void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - linkSlowCase(iter); // doesn't have rare data linkSlowCase(iter); // doesn't have an allocation profile linkSlowCase(iter); // allocation failed - linkSlowCase(iter); // cached function didn't match JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this); slowPathCall.call(); } -void JIT::emit_op_check_tdz(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - addSlowCase(branchTest64(Zero, regT0)); -} - -void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error); - slowPathCall.call(); -} - void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress())); @@ -764,7 +908,6 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn linkSlowCase(iter); linkSlowCase(iter); linkSlowCase(iter); - linkSlowCase(iter); JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this); slowPathCall.call(); @@ -886,44 +1029,110 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCase slowPathCall.call(); } -void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emit_op_get_arguments_length(Instruction* currentInstruction) { - linkSlowCase(iter); // Not JSCell. - linkSlowCase(iter); // Not JSString. + int dst = currentInstruction[1].u.operand; + int argumentsRegister = currentInstruction[2].u.operand; + addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); + emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0); + sub32(TrustedImm32(1), regT0); + emitFastArithReTagImmediate(regT0, regT0); + emitPutVirtualRegister(dst, regT0); +} - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string); - slowPathCall.call(); +void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + callOperation(operationGetArgumentsLength, dst, base); +} + +void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int argumentsRegister = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister))); + emitGetVirtualRegister(property, regT1); + addSlowCase(emitJumpIfNotImmediateInteger(regT1)); + add32(TrustedImm32(1), regT1); + // regT1 now contains the integer index of the argument we want, including this + emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2); + addSlowCase(branch32(AboveOrEqual, regT1, regT2)); + + signExtend32ToPtr(regT1, regT1); + load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0); + emitValueProfilingSite(); + emitPutVirtualRegister(dst, regT0); +} + +void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + int dst = currentInstruction[1].u.operand; + int arguments = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + + linkSlowCase(iter); + Jump skipArgumentsCreation = jump(); + + linkSlowCase(iter); + linkSlowCase(iter); + callOperation(operationCreateArguments); + emitStoreCell(arguments, returnValueGPR); + emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)), returnValueGPR); + + skipArgumentsCreation.link(this); + emitGetVirtualRegister(arguments, regT0); + emitGetVirtualRegister(property, regT1); + callOperation(WithProfile, operationGetByValGeneric, dst, regT0, regT1); } #endif // USE(JSVALUE64) +void JIT::emit_op_touch_entry(Instruction* currentInstruction) +{ + if (m_codeBlock->symbolTable()->m_functionEnteredOnce.hasBeenInvalidated()) + return; + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_touch_entry); + slowPathCall.call(); +} + void JIT::emit_op_loop_hint(Instruction*) { // Emit the JIT optimization check: if (canBeOptimized()) { - addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), - AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); + if (Options::enableOSREntryInLoops()) { + addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()), + AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); + } else { + // Add with saturation. + move(TrustedImmPtr(m_codeBlock->addressOfJITExecuteCounter()), regT3); + load32(regT3, regT2); + Jump dontAdd = branch32( + GreaterThan, regT2, + TrustedImm32(std::numeric_limits<int32_t>::max() - Options::executionCounterIncrementForLoop())); + add32(TrustedImm32(Options::executionCounterIncrementForLoop()), regT2); + store32(regT2, regT3); + dontAdd.link(this); + } } // Emit the watchdog timer check: - if (m_vm->watchdog) - addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog->timerDidFireAddress()))); + if (m_vm->watchdog.isEnabled()) + addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress()))); } void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter) { #if ENABLE(DFG_JIT) // Emit the slow path for the JIT optimization check: - if (canBeOptimized()) { + if (canBeOptimized() && Options::enableOSREntryInLoops()) { linkSlowCase(iter); callOperation(operationOptimize, m_bytecodeOffset); Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR); - if (!ASSERT_DISABLED) { - Jump ok = branchPtr(MacroAssembler::Above, regT0, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000)))); - abortWithReason(JITUnreasonableLoopHintJumpTarget); - ok.link(this); - } jump(returnValueGPR); noOptimizedEntry.link(this); @@ -932,7 +1141,7 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i #endif // Emit the slow path of the watchdog timer check: - if (m_vm->watchdog) { + if (m_vm->watchdog.isEnabled()) { linkSlowCase(iter); callOperation(operationHandleWatchdogTimer); @@ -950,36 +1159,32 @@ void JIT::emit_op_new_func(Instruction* currentInstruction) { Jump lazyJump; int dst = currentInstruction[1].u.operand; - -#if USE(JSVALUE64) - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + if (currentInstruction[3].u.operand) { +#if USE(JSVALUE32_64) + lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); #else - emitLoadPayload(currentInstruction[2].u.operand, regT0); + lazyJump = branchTest64(NonZero, addressFor(dst)); #endif - FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand); - callOperation(operationNewFunction, dst, regT0, funcExec); + } + + FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand); + callOperation(operationNewFunction, dst, funcExec); + + if (currentInstruction[3].u.operand) + lazyJump.link(this); +} + +void JIT::emit_op_new_captured_func(Instruction* currentInstruction) +{ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_captured_func); + slowPathCall.call(); } void JIT::emit_op_new_func_exp(Instruction* currentInstruction) { - Jump notUndefinedScope; int dst = currentInstruction[1].u.operand; -#if USE(JSVALUE64) - emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); - notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined()))); - store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst)); -#else - emitLoadPayload(currentInstruction[2].u.operand, regT0); - notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag)); - emitStore(dst, jsUndefined()); -#endif - - Jump done = jump(); - notUndefinedScope.link(this); - - FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[3].u.operand); - callOperation(operationNewFunction, dst, regT0, funcExpr); - done.link(this); + FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[2].u.operand); + callOperation(operationNewFunction, dst, funcExpr); } void JIT::emit_op_new_array(Instruction* currentInstruction) @@ -1016,335 +1221,13 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction) callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size); } -#if USE(JSVALUE64) -void JIT::emit_op_has_structure_property(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int enumerator = currentInstruction[4].u.operand; - - emitGetVirtualRegister(base, regT0); - emitGetVirtualRegister(enumerator, regT1); - emitJumpSlowCaseIfNotJSCell(regT0, base); - - load32(Address(regT0, JSCell::structureIDOffset()), regT0); - addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - - move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); - emitPutVirtualRegister(dst); -} - -void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) -{ - Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; - - PatchableJump badType; - - // FIXME: Add support for other types like TypedArrays and Arguments. - // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. - JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); - move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); - Jump done = jump(); - - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); - - patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - - patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); - - byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( - m_codeBlock, patchBuffer, - ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); - - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationHasIndexedPropertyGeneric)); -} - -void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_codeBlock->addByValInfo(); - - emitGetVirtualRegisters(base, regT0, property, regT1); - - // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. - // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if - // number was signed since m_vectorLength is always less than intmax (since the total allocation - // size is always less than 4Gb). As such zero extending will have been correct (and extending the value - // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign - // extending since it makes it easier to re-tag the value in the slow case. - zeroExtend32ToPtr(regT1, regT1); - - emitJumpSlowCaseIfNotJSCell(regT0, base); - emitArrayProfilingSiteWithCell(regT0, regT2, profile); - and32(TrustedImm32(IndexingShapeMask), regT2); - - JITArrayMode mode = chooseArrayMode(profile); - PatchableJump badType; - - // FIXME: Add support for other types like TypedArrays and Arguments. - // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. - JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); - - move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0); - - addSlowCase(badType); - addSlowCase(slowCases); - - Label done = label(); - - emitPutVirtualRegister(dst); - - Label nextHotPath = label(); - - m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath)); -} - -void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; - - linkSlowCaseIfNotJSCell(iter, base); // base cell check - linkSlowCase(iter); // base array check - linkSlowCase(iter); // vector length check - linkSlowCase(iter); // empty value - - Label slowPath = label(); - - emitGetVirtualRegister(base, regT0); - emitGetVirtualRegister(property, regT1); - Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo); - - m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; - m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; - m_byValInstructionIndex++; -} - -void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int index = currentInstruction[4].u.operand; - int enumerator = currentInstruction[5].u.operand; - - // Check that base is a cell - emitGetVirtualRegister(base, regT0); - emitJumpSlowCaseIfNotJSCell(regT0, base); - - // Check the structure - emitGetVirtualRegister(enumerator, regT2); - load32(Address(regT0, JSCell::structureIDOffset()), regT1); - addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - - // Compute the offset - emitGetVirtualRegister(index, regT1); - // If index is less than the enumerator's cached inline storage, then it's an inline access - Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); - addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); - signExtend32ToPtr(regT1, regT1); - load64(BaseIndex(regT0, regT1, TimesEight), regT0); - - Jump done = jump(); - - // Otherwise it's out of line - outOfLineAccess.link(this); - loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); - sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1); - neg32(regT1); - signExtend32ToPtr(regT1, regT1); - int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); - load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0); - - done.link(this); - emitValueProfilingSite(); - emitPutVirtualRegister(dst, regT0); -} - -void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emitSlow_op_captured_mov(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - int base = currentInstruction[2].u.operand; - linkSlowCaseIfNotJSCell(iter, base); + VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet; + if (!set || set->state() == IsInvalidated) + return; linkSlowCase(iter); - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname); - slowPathCall.call(); -} - -void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int enumerator = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitGetVirtualRegister(index, regT0); - emitGetVirtualRegister(enumerator, regT1); - Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); - - move(TrustedImm64(JSValue::encode(jsNull())), regT0); - - Jump done = jump(); - inBounds.link(this); - - loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); - signExtend32ToPtr(regT0, regT0); - load64(BaseIndex(regT1, regT0, TimesEight), regT0); - - done.link(this); - emitPutVirtualRegister(dst); -} - -void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int enumerator = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitGetVirtualRegister(index, regT0); - emitGetVirtualRegister(enumerator, regT1); - Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); - - move(TrustedImm64(JSValue::encode(jsNull())), regT0); - - Jump done = jump(); - inBounds.link(this); - - loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); - signExtend32ToPtr(regT0, regT0); - load64(BaseIndex(regT1, regT0, TimesEight), regT0); - - done.link(this); - emitPutVirtualRegister(dst); -} - -void JIT::emit_op_profile_type(Instruction* currentInstruction) -{ - TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; - int valueToProfile = currentInstruction[1].u.operand; - - emitGetVirtualRegister(valueToProfile, regT0); - - JumpList jumpToEnd; - - jumpToEnd.append(branchTest64(Zero, regT0)); - - // Compile in a predictive type check, if possible, to see if we can skip writing to the log. - // These typechecks are inlined to match those of the 64-bit JSValue type checks. - if (cachedTypeLocation->m_lastSeenType == TypeUndefined) - jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined())))); - else if (cachedTypeLocation->m_lastSeenType == TypeNull) - jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull())))); - else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) { - move(regT0, regT1); - and64(TrustedImm32(~1), regT1); - jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse))); - } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt) - jumpToEnd.append(emitJumpIfImmediateInteger(regT0)); - else if (cachedTypeLocation->m_lastSeenType == TypeNumber) - jumpToEnd.append(emitJumpIfImmediateNumber(regT0)); - else if (cachedTypeLocation->m_lastSeenType == TypeString) { - Jump isNotCell = emitJumpIfNotJSCell(regT0); - jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); - isNotCell.link(this); - } - - // Load the type profiling log into T2. - TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); - move(TrustedImmPtr(cachedTypeProfilerLog), regT2); - // Load the next log entry into T1. - loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); - - // Store the JSValue onto the log entry. - store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset())); - - // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry. - Jump notCell = emitJumpIfNotJSCell(regT0); - load32(Address(regT0, JSCell::structureIDOffset()), regT0); - store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); - Jump skipIsCell = jump(); - notCell.link(this); - store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); - skipIsCell.link(this); - - // Store the typeLocation on the log entry. - move(TrustedImmPtr(cachedTypeLocation), regT0); - store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); - - // Increment the current log entry. - addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); - store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); - Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())); - // Clear the log if we're at the end of the log. - callOperation(operationProcessTypeProfilerLog); - skipClearLog.link(this); - - jumpToEnd.link(this); -} - -#endif // USE(JSVALUE64) - -void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length); - slowPathCall.call(); -} - -void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - linkSlowCase(iter); - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property); - slowPathCall.call(); -} - -void JIT::emit_op_has_generic_property(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property); - slowPathCall.call(); -} - -void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator); - slowPathCall.call(); -} - -void JIT::emit_op_to_index_string(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string); - slowPathCall.call(); -} - -void JIT::emit_op_profile_control_flow(Instruction* currentInstruction) -{ - BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation; - if (!basicBlockLocation->hasExecuted()) - basicBlockLocation->emitExecuteCode(*this, regT1); -} - -void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments); - slowPathCall.call(); -} - -void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments); - slowPathCall.call(); -} - -void JIT::emit_op_create_out_of_band_arguments(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_out_of_band_arguments); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_captured_mov); slowPathCall.call(); } diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp index 91c00e3fd..29e8880aa 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009, 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com> * * Redistribution and use in source and binary forms, with or without @@ -32,18 +32,14 @@ #include "CCallHelpers.h" #include "Debugger.h" -#include "Exception.h" #include "JITInlines.h" #include "JSArray.h" #include "JSCell.h" -#include "JSEnvironmentRecord.h" #include "JSFunction.h" -#include "JSPropertyNameEnumerator.h" +#include "JSPropertyNameIterator.h" +#include "JSVariableObject.h" #include "LinkBuffer.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "RepatchBuffer.h" #include "SlowPathCall.h" -#include "TypeProfilerLog.h" #include "VirtualRegister.h" namespace JSC { @@ -52,24 +48,41 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func) { Call nativeCall; - emitFunctionPrologue(); emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock); storePtr(callFrameRegister, &m_vm->topCallFrame); #if CPU(X86) + // Load caller frame's scope chain into this callframe so that whatever we call can + // get to its global data. + emitGetCallerFrameFromCallFrameHeaderPtr(regT0); + emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0); + emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); + + peek(regT1); + emitPutReturnPCToCallFrameHeader(regT1); + // Calling convention: f(ecx, edx, ...); // Host function signature: f(ExecState*); move(callFrameRegister, X86Registers::ecx); - subPtr(TrustedImm32(8), stackPointerRegister); // Align stack for call. - storePtr(X86Registers::ecx, Address(stackPointerRegister)); + subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call. + + move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. // call the function nativeCall = call(); - addPtr(TrustedImm32(8), stackPointerRegister); + addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); #elif CPU(ARM) || CPU(SH4) || CPU(MIPS) + // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data. + emitGetCallerFrameFromCallFrameHeaderPtr(regT2); + emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2); + emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain); + + preserveReturnAddressAfterCall(regT3); // Callee preserved + emitPutReturnPCToCallFrameHeader(regT3); + #if CPU(MIPS) // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. subPtr(TrustedImm32(16), stackPointerRegister); @@ -80,6 +93,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func) move(callFrameRegister, argumentGPR0); emitGetFromCallFrameHeaderPtr(JSStack::Callee, argumentGPR1); + move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. loadPtr(Address(argumentGPR1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2); // call the function @@ -93,39 +107,42 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func) restoreReturnAddressBeforeReturn(regT3); #else #error "JIT not supported on this platform." - abortWithReason(JITNotSupported); + breakpoint(); #endif // CPU(X86) // Check for an exception - Jump sawException = branch32(NotEqual, AbsoluteAddress(vm->addressOfException()), TrustedImm32(0)); + Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); - emitFunctionEpilogue(); // Return. ret(); // Handle an exception sawException.link(this); + // Grab the return address. + preserveReturnAddressAfterCall(regT1); + + move(TrustedImmPtr(&vm->exceptionLocation), regT2); + storePtr(regT1, regT2); storePtr(callFrameRegister, &m_vm->topCallFrame); #if CPU(X86) - addPtr(TrustedImm32(-4), stackPointerRegister); - loadPtr(Address(callFrameRegister), X86Registers::ecx); - push(X86Registers::ecx); + addPtr(TrustedImm32(-12), stackPointerRegister); + push(callFrameRegister); #else - loadPtr(Address(callFrameRegister), argumentGPR0); + move(callFrameRegister, argumentGPR0); #endif move(TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), regT3); call(regT3); #if CPU(X86) - addPtr(TrustedImm32(8), stackPointerRegister); + addPtr(TrustedImm32(16), stackPointerRegister); #endif jumpToExceptionHandler(); // All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object. - LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID); patchBuffer.link(nativeCall, FunctionPtr(func)); return FINALIZE_CODE(patchBuffer, ("JIT CTI native call")); @@ -144,11 +161,21 @@ void JIT::emit_op_mov(Instruction* currentInstruction) } } +void JIT::emit_op_captured_mov(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int src = currentInstruction[2].u.operand; + + emitLoad(src, regT1, regT0); + emitNotifyWrite(regT1, regT0, regT2, currentInstruction[3].u.watchpointSet); + emitStore(dst, regT1, regT0); +} + void JIT::emit_op_end(Instruction* currentInstruction) { ASSERT(returnValueGPR != callFrameRegister); emitLoad(currentInstruction[1].u.operand, regT1, regT0); - emitFunctionEpilogue(); + restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset())); ret(); } @@ -192,7 +219,8 @@ void JIT::emit_op_check_has_instance(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(baseVal); // Check that baseVal 'ImplementsHasInstance'. - addSlowCase(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); + loadPtr(Address(regT0, JSCell::structureOffset()), regT0); + addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance))); } void JIT::emit_op_instanceof(Instruction* currentInstruction) @@ -211,7 +239,8 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(proto); // Check that prototype is an object - addSlowCase(emitJumpIfCellNotObject(regT1)); + loadPtr(Address(regT1, JSCell::structureOffset()), regT3); + addSlowCase(emitJumpIfNotObject(regT3)); // Optimistically load the result true, and start looping. // Initially, regT1 still contains proto and regT2 still contains value. @@ -221,7 +250,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction) // Load the prototype of the cell in regT2. If this is equal to regT1 - WIN! // Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again. - loadPtr(Address(regT2, JSCell::structureIDOffset()), regT2); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); Jump isInstance = branchPtr(Equal, regT2, regT1); branchTest32(NonZero, regT2).linkTo(loop, this); @@ -277,12 +306,12 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction) Jump done = jump(); isCell.link(this); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT0); Jump notMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1); compare32(Equal, regT0, regT1, regT0); @@ -321,7 +350,8 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitLoad(value, regT1, regT0); Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0); Jump done = jump(); isNotCell.link(this); @@ -331,22 +361,25 @@ void JIT::emit_op_is_string(Instruction* currentInstruction) emitStoreBool(dst, regT0); } -void JIT::emit_op_is_object(Instruction* currentInstruction) +void JIT::emit_op_tear_off_activation(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; - int value = currentInstruction[2].u.operand; - - emitLoad(value, regT1, regT0); - Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - - compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0); - Jump done = jump(); + int activation = currentInstruction[1].u.operand; + Jump activationNotCreated = branch32(Equal, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); + emitLoadPayload(activation, regT0); + callOperation(operationTearOffActivation, regT0); + activationNotCreated.link(this); +} - isNotCell.link(this); - move(TrustedImm32(0), regT0); +void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction) +{ + VirtualRegister arguments = VirtualRegister(currentInstruction[1].u.operand); + int activation = currentInstruction[2].u.operand; - done.link(this); - emitStoreBool(dst, regT0); + Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(arguments).offset()), TrustedImm32(JSValue::EmptyValueTag)); + emitLoadPayload(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0); + emitLoadPayload(activation, regT1); + callOperation(operationTearOffArguments, regT0, regT1); + argsNotCreated.link(this); } void JIT::emit_op_to_primitive(Instruction* currentInstruction) @@ -357,7 +390,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction) emitLoad(src, regT1, regT0); Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - addSlowCase(emitJumpIfCellObject(regT0)); + addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); isImm.link(this); if (dst != src) @@ -477,8 +510,9 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction) Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); - loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); + // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump masqueradesGlobalObjectIsForeign = jump(); @@ -502,8 +536,9 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction) Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); - loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); + // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure. + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target); Jump wasNotImmediate = jump(); @@ -557,8 +592,8 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>: genericCase.append(getSlowCase(iter)); // tags not equal linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); - genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); // String case. callOperation(operationCompareStringEq, regT0, regT2); @@ -601,8 +636,8 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry> genericCase.append(getSlowCase(iter)); // tags not equal linkSlowCase(iter); // tags equal and JSCell - genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); - genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); + genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); // String case. callOperation(operationCompareStringEq, regT0, regT2); @@ -630,12 +665,12 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy addSlowCase(branch32(NotEqual, regT1, regT3)); addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag))); - // Jump to a slow case if both are strings or symbols (non object). + // Jump to a slow case if both are strings. Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - Jump firstIsObject = emitJumpIfCellObject(regT0); - addSlowCase(emitJumpIfCellNotObject(regT2)); + Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())); + addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()))); notCell.link(this); - firstIsObject.link(this); + firstNotString.link(this); // Simply compare the payloads. if (type == OpStrictEq) @@ -684,12 +719,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction) emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(0), regT1); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); compare32(Equal, regT0, regT2, regT1); @@ -715,12 +750,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction) emitLoad(src, regT1, regT0); Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)); - Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)); move(TrustedImm32(1), regT1); Jump wasNotMasqueradesAsUndefined = jump(); isMasqueradesAsUndefined.link(this); - loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2); move(TrustedImmPtr(m_codeBlock->globalObject()), regT0); loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2); compare32(NotEqual, regT0, regT2, regT1); @@ -746,96 +781,160 @@ void JIT::emit_op_throw(Instruction* currentInstruction) jumpToExceptionHandler(); } -void JIT::emit_op_push_with_scope(Instruction* currentInstruction) +void JIT::emit_op_get_pnames(Instruction* currentInstruction) { - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope); - slowPathCall.call(); + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int breakTarget = currentInstruction[5].u.operand; + + JumpList isNotObject; + + emitLoad(base, regT1, regT0); + if (!m_codeBlock->isKnownNotImmediate(base)) + isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); + if (VirtualRegister(base) != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + isNotObject.append(emitJumpIfNotObject(regT2)); + } + + // We could inline the case where you have a valid cache, but + // this call doesn't seem to be hot. + Label isObject(this); + callOperation(operationGetPNames, regT0); + emitStoreCell(dst, returnValueGPR); + load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3); + store32(TrustedImm32(Int32Tag), intTagFor(i)); + store32(TrustedImm32(0), intPayloadFor(i)); + store32(TrustedImm32(Int32Tag), intTagFor(size)); + store32(regT3, payloadFor(size)); + Jump end = jump(); + + isNotObject.link(this); + addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget); + addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget); + callOperation(operationToObject, base, regT1, regT0); + jump().linkTo(isObject, this); + + end.link(this); } -void JIT::emit_op_to_number(Instruction* currentInstruction) +void JIT::emit_op_next_pname(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int src = currentInstruction[2].u.operand; + int base = currentInstruction[2].u.operand; + int i = currentInstruction[3].u.operand; + int size = currentInstruction[4].u.operand; + int it = currentInstruction[5].u.operand; + int target = currentInstruction[6].u.operand; - emitLoad(src, regT1, regT0); + JumpList callHasProperty; - Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag)); - addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); - isInt32.link(this); + Label begin(this); + load32(intPayloadFor(i), regT0); + Jump end = branch32(Equal, regT0, intPayloadFor(size)); - if (src != dst) - emitStore(dst, regT1, regT0); + // Grab key @ i + loadPtr(payloadFor(it), regT1); + loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2); + load32(BaseIndex(regT2, regT0, TimesEight), regT2); + store32(TrustedImm32(JSValue::CellTag), tagFor(dst)); + store32(regT2, payloadFor(dst)); + + // Increment i + add32(TrustedImm32(1), regT0); + store32(regT0, intPayloadFor(i)); + + // Verify that i is valid: + loadPtr(payloadFor(base), regT0); + + // Test base's structure + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))))); + + // Test base's prototype chain + loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3); + loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3); + addJump(branchTestPtr(Zero, Address(regT3)), target); + + Label checkPrototype(this); + callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag))); + loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2); + loadPtr(Address(regT2, JSCell::structureOffset()), regT2); + callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3))); + addPtr(TrustedImm32(sizeof(Structure*)), regT3); + branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this); + + // Continue loop. + addJump(jump(), target); + + // Slow case: Ask the object if i is valid. + callHasProperty.link(this); + loadPtr(addressFor(dst), regT1); + callOperation(operationHasProperty, regT0, regT1); + + // Test for valid key. + addJump(branchTest32(NonZero, regT0), target); + jump().linkTo(begin, this); + + // End of loop. + end.link(this); } -void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emit_op_push_with_scope(Instruction* currentInstruction) { - linkSlowCase(iter); + emitLoad(currentInstruction[1].u.operand, regT1, regT0); + callOperation(operationPushWithScope, regT1, regT0); +} - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number); - slowPathCall.call(); +void JIT::emit_op_pop_scope(Instruction*) +{ + callOperation(operationPopScope); } -void JIT::emit_op_to_string(Instruction* currentInstruction) +void JIT::emit_op_to_number(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; int src = currentInstruction[2].u.operand; emitLoad(src, regT1, regT0); - addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag))); - addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); + Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag)); + addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag))); + isInt32.link(this); if (src != dst) emitStore(dst, regT1, regT0); } -void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - linkSlowCase(iter); // Not JSCell. - linkSlowCase(iter); // Not JSString. + linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string); + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number); slowPathCall.call(); } +void JIT::emit_op_push_name_scope(Instruction* currentInstruction) +{ + emitLoad(currentInstruction[2].u.operand, regT1, regT0); + callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT1, regT0, currentInstruction[3].u.operand); +} + void JIT::emit_op_catch(Instruction* currentInstruction) { move(TrustedImmPtr(m_vm), regT3); // operationThrow returns the callFrame for the handler. load32(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister); - load32(Address(regT3, VM::vmEntryFrameForThrowOffset()), regT0); - store32(regT0, Address(regT3, VM::topVMEntryFrameOffset())); - - addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister); - // Now store the exception returned by operationThrow. - load32(Address(regT3, VM::exceptionOffset()), regT2); - move(TrustedImm32(JSValue::CellTag), regT1); - - store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset())); + load32(Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); + load32(Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); + store32(TrustedImm32(JSValue().payload()), Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); + store32(TrustedImm32(JSValue().tag()), Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); unsigned exception = currentInstruction[1].u.operand; - emitStore(exception, regT1, regT2); - - load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); - - unsigned thrownValue = currentInstruction[2].u.operand; - emitStore(thrownValue, regT1, regT0); -} - -void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction) -{ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment); - slowPathCall.call(); -} - -void JIT::emit_op_get_parent_scope(Instruction* currentInstruction) -{ - int currentScope = currentInstruction[2].u.operand; - emitLoadPayload(currentScope, regT0); - loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); - emitStoreCell(currentInstruction[1].u.operand, regT0); + emitStore(exception, regT1, regT0); } void JIT::emit_op_switch_imm(Instruction* currentInstruction) @@ -847,7 +946,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate)); - jumpTable->ensureCTITable(); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); emitLoad(scrutinee, regT1, regT0); callOperation(operationSwitchImmWithUnknownKeyType, regT1, regT0, tableIndex); @@ -863,7 +962,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction) // create jump table for switch destinations, track this switch statement. SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex); m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character)); - jumpTable->ensureCTITable(); + jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size()); emitLoad(scrutinee, regT1, regT0); callOperation(operationSwitchCharWithUnknownKeyType, regT1, regT0, tableIndex); @@ -914,48 +1013,77 @@ void JIT::emit_op_enter(Instruction* currentInstruction) slowPathCall.call(); } -void JIT::emit_op_get_scope(Instruction* currentInstruction) +void JIT::emit_op_create_activation(Instruction* currentInstruction) +{ + int activation = currentInstruction[1].u.operand; + + Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag)); + callOperation(operationCreateActivation, 0); + emitStoreCell(activation, returnValueGPR); + activationCreated.link(this); +} + +void JIT::emit_op_create_arguments(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; + + Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag)); + callOperation(operationCreateArguments); + emitStoreCell(dst, returnValueGPR); + emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)).offset(), returnValueGPR); + argsCreated.link(this); +} + +void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + + emitStore(dst, JSValue()); +} + +void JIT::emit_op_get_callee(Instruction* currentInstruction) +{ + int result = currentInstruction[1].u.operand; + WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[2].u.jsCell; emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0); - loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0); - emitStoreCell(dst, regT0); + + loadPtr(cachedFunction, regT2); + addSlowCase(branchPtr(NotEqual, regT0, regT2)); + + move(TrustedImm32(JSValue::CellTag), regT1); + emitStore(result, regT1, regT0); +} + +void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + linkSlowCase(iter); + + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee); + slowPathCall.call(); } void JIT::emit_op_create_this(Instruction* currentInstruction) { int callee = currentInstruction[2].u.operand; - WriteBarrierBase<JSCell>* cachedFunction = ¤tInstruction[4].u.jsCell; RegisterID calleeReg = regT0; - RegisterID rareDataReg = regT4; RegisterID resultReg = regT0; RegisterID allocatorReg = regT1; RegisterID structureReg = regT2; - RegisterID cachedFunctionReg = regT4; RegisterID scratchReg = regT3; emitLoadPayload(callee, calleeReg); - loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg); - addSlowCase(branchTestPtr(Zero, rareDataReg)); - loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); - loadPtr(Address(rareDataReg, FunctionRareData::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg); + loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg); addSlowCase(branchTestPtr(Zero, allocatorReg)); - loadPtr(cachedFunction, cachedFunctionReg); - Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects())); - addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg)); - hasSeenMultipleCallees.link(this); - emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg); emitStoreCell(currentInstruction[1].u.operand, resultReg); } void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { - linkSlowCase(iter); // doesn't have rare data linkSlowCase(iter); // doesn't have an allocation profile linkSlowCase(iter); // allocation failed - linkSlowCase(iter); // cached function didn't match JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this); slowPathCall.call(); @@ -969,8 +1097,8 @@ void JIT::emit_op_to_this(Instruction* currentInstruction) emitLoad(thisRegister, regT3, regT2); addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag))); - addSlowCase(branch8(NotEqual, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); - loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0); + loadPtr(Address(regT2, JSCell::structureOffset()), regT0); + addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType))); loadPtr(cachedStructure, regT2); addSlowCase(branchPtr(NotEqual, regT0, regT2)); } @@ -984,19 +1112,6 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn slowPathCall.call(); } -void JIT::emit_op_check_tdz(Instruction* currentInstruction) -{ - emitLoadTag(currentInstruction[1].u.operand, regT0); - addSlowCase(branch32(Equal, regT0, TrustedImm32(JSValue::EmptyValueTag))); -} - -void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error); - slowPathCall.call(); -} - void JIT::emit_op_profile_will_call(Instruction* currentInstruction) { load32(m_vm->enabledProfilerAddress(), regT0); @@ -1015,281 +1130,63 @@ void JIT::emit_op_profile_did_call(Instruction* currentInstruction) profilerDone.link(this); } -void JIT::emit_op_has_structure_property(Instruction* currentInstruction) +void JIT::emit_op_get_arguments_length(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int enumerator = currentInstruction[4].u.operand; - - emitLoadPayload(base, regT0); - emitJumpSlowCaseIfNotJSCell(base); - - emitLoadPayload(enumerator, regT1); - - load32(Address(regT0, JSCell::structureIDOffset()), regT0); - addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - - move(TrustedImm32(1), regT0); - emitStoreBool(dst, regT0); + int argumentsRegister = currentInstruction[2].u.operand; + addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag))); + load32(payloadFor(JSStack::ArgumentCount), regT0); + sub32(TrustedImm32(1), regT0); + emitStoreInt32(dst, regT0); } -void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) -{ - Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; - - PatchableJump badType; - - // FIXME: Add support for other types like TypedArrays and Arguments. - // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. - JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType); - move(TrustedImm32(1), regT0); - Jump done = jump(); - - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); - - patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - - patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); - - byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( - m_codeBlock, patchBuffer, - ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); - - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationHasIndexedPropertyGeneric)); -} - -void JIT::emit_op_has_indexed_property(Instruction* currentInstruction) +void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { + linkSlowCase(iter); int dst = currentInstruction[1].u.operand; int base = currentInstruction[2].u.operand; - int property = currentInstruction[3].u.operand; - ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_codeBlock->addByValInfo(); - - emitLoadPayload(base, regT0); - emitJumpSlowCaseIfNotJSCell(base); - - emitLoadPayload(property, regT1); - - // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. - // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if - // number was signed since m_vectorLength is always less than intmax (since the total allocation - // size is always less than 4Gb). As such zero extending will have been correct (and extending the value - // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign - // extending since it makes it easier to re-tag the value in the slow case. - zeroExtend32ToPtr(regT1, regT1); - - emitArrayProfilingSiteWithCell(regT0, regT2, profile); - and32(TrustedImm32(IndexingShapeMask), regT2); - - JITArrayMode mode = chooseArrayMode(profile); - PatchableJump badType; - - // FIXME: Add support for other types like TypedArrays and Arguments. - // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034. - JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType); - move(TrustedImm32(1), regT0); - - addSlowCase(badType); - addSlowCase(slowCases); - - Label done = label(); - - emitStoreBool(dst, regT0); - - Label nextHotPath = label(); - - m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath)); + callOperation(operationGetArgumentsLength, dst, base); } -void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; + int argumentsRegister = currentInstruction[2].u.operand; int property = currentInstruction[3].u.operand; - ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; - - linkSlowCaseIfNotJSCell(iter, base); // base cell check - linkSlowCase(iter); // base array check - linkSlowCase(iter); // vector length check - linkSlowCase(iter); // empty value - - Label slowPath = label(); - - emitLoad(base, regT1, regT0); - emitLoad(property, regT3, regT2); - Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT1, regT0, regT3, regT2, byValInfo); - - m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; - m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; - m_byValInstructionIndex++; -} - -void JIT::emit_op_get_direct_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int base = currentInstruction[2].u.operand; - int index = currentInstruction[4].u.operand; - int enumerator = currentInstruction[5].u.operand; - - // Check that base is a cell - emitLoadPayload(base, regT0); - emitJumpSlowCaseIfNotJSCell(base); - - // Check the structure - emitLoadPayload(enumerator, regT1); - load32(Address(regT0, JSCell::structureIDOffset()), regT2); - addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset()))); - - // Compute the offset - emitLoadPayload(index, regT2); - // If index is less than the enumerator's cached inline storage, then it's an inline access - Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset())); - addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0); - load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); - load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); + addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag))); + emitLoad(property, regT1, regT2); + addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag))); + add32(TrustedImm32(1), regT2); + // regT2 now contains the integer index of the argument we want, including this + load32(payloadFor(JSStack::ArgumentCount), regT3); + addSlowCase(branch32(AboveOrEqual, regT2, regT3)); - Jump done = jump(); - - // Otherwise it's out of line - outOfLineAccess.link(this); - loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0); - sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2); - neg32(regT2); - int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue); - load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); - load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); - - done.link(this); + loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0); + loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT1); emitValueProfilingSite(); emitStore(dst, regT1, regT0); } -void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - int base = currentInstruction[2].u.operand; - linkSlowCaseIfNotJSCell(iter, base); - linkSlowCase(iter); - - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname); - slowPathCall.call(); -} - -void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction) +void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { int dst = currentInstruction[1].u.operand; - int enumerator = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitLoadPayload(index, regT0); - emitLoadPayload(enumerator, regT1); - Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset())); - - move(TrustedImm32(JSValue::NullTag), regT2); - move(TrustedImm32(0), regT0); - - Jump done = jump(); - inBounds.link(this); - - loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); - loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); - move(TrustedImm32(JSValue::CellTag), regT2); - - done.link(this); - emitStore(dst, regT2, regT0); -} - -void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction) -{ - int dst = currentInstruction[1].u.operand; - int enumerator = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitLoadPayload(index, regT0); - emitLoadPayload(enumerator, regT1); - Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset())); + int arguments = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; - move(TrustedImm32(JSValue::NullTag), regT2); - move(TrustedImm32(0), regT0); + linkSlowCase(iter); + Jump skipArgumentsCreation = jump(); - Jump done = jump(); - inBounds.link(this); + linkSlowCase(iter); + linkSlowCase(iter); - loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1); - loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0); - move(TrustedImm32(JSValue::CellTag), regT2); + callOperation(operationCreateArguments); + emitStoreCell(arguments, returnValueGPR); + emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), returnValueGPR); - done.link(this); - emitStore(dst, regT2, regT0); -} - -void JIT::emit_op_profile_type(Instruction* currentInstruction) -{ - TypeLocation* cachedTypeLocation = currentInstruction[2].u.location; - int valueToProfile = currentInstruction[1].u.operand; - - // Load payload in T0. Load tag in T3. - emitLoadPayload(valueToProfile, regT0); - emitLoadTag(valueToProfile, regT3); - - JumpList jumpToEnd; - - jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::EmptyValueTag))); - - // Compile in a predictive type check, if possible, to see if we can skip writing to the log. - // These typechecks are inlined to match those of the 32-bit JSValue type checks. - if (cachedTypeLocation->m_lastSeenType == TypeUndefined) - jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::UndefinedTag))); - else if (cachedTypeLocation->m_lastSeenType == TypeNull) - jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::NullTag))); - else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) - jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::BooleanTag))); - else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt) - jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag))); - else if (cachedTypeLocation->m_lastSeenType == TypeNumber) { - jumpToEnd.append(branch32(Below, regT3, TrustedImm32(JSValue::LowestTag))); - jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag))); - } else if (cachedTypeLocation->m_lastSeenType == TypeString) { - Jump isNotCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)); - jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType))); - isNotCell.link(this); - } - - // Load the type profiling log into T2. - TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog(); - move(TrustedImmPtr(cachedTypeProfilerLog), regT2); - - // Load the next log entry into T1. - loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1); - - // Store the JSValue onto the log entry. - store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); - store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); - - // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry. - Jump notCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)); - load32(Address(regT0, JSCell::structureIDOffset()), regT0); - store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); - Jump skipNotCell = jump(); - notCell.link(this); - store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset())); - skipNotCell.link(this); - - // Store the typeLocation on the log entry. - move(TrustedImmPtr(cachedTypeLocation), regT0); - store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset())); - - // Increment the current log entry. - addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1); - store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset())); - jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()))); - // Clear the log if we're at the end of the log. - callOperation(operationProcessTypeProfilerLog); - - jumpToEnd.link(this); + skipArgumentsCreation.link(this); + emitLoad(arguments, regT1, regT0); + emitLoad(property, regT3, regT2); + callOperation(WithProfile, operationGetByValGeneric, dst, regT1, regT0, regT3, regT2); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITOperationWrappers.h b/Source/JavaScriptCore/jit/JITOperationWrappers.h new file mode 100644 index 000000000..f9624fdbc --- /dev/null +++ b/Source/JavaScriptCore/jit/JITOperationWrappers.h @@ -0,0 +1,413 @@ +/* + * Copyright (C) 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JITOperationWrappers_h +#define JITOperationWrappers_h + +#include "JITOperations.h" +#include <wtf/Compiler.h> +#include <wtf/InlineASM.h> + +#if COMPILER(MSVC) +#include <intrin.h> +#endif + +namespace JSC { + +#if CPU(MIPS) +#if WTF_MIPS_PIC +#define LOAD_FUNCTION_TO_T9(function) \ + ".set noreorder" "\n" \ + ".cpload $25" "\n" \ + ".set reorder" "\n" \ + "la $t9, " LOCAL_REFERENCE(function) "\n" +#else +#define LOAD_FUNCTION_TO_T9(function) "" "\n" +#endif +#endif + +#if COMPILER(GCC) && CPU(X86_64) + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \ + asm( \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "mov (%rsp), %" STRINGIZE(register) "\n" \ + "jmp " LOCAL_REFERENCE(function##WithReturnAddress) "\n" \ + ); +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx) +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, r8) + +#elif COMPILER(GCC) && CPU(X86) + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "mov (%esp), %eax\n" \ + "mov %eax, " STRINGIZE(offset) "(%esp)\n" \ + "jmp " LOCAL_REFERENCE(function##WithReturnAddress) "\n" \ + ); +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 16) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 20) +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 28) + +#elif CPU(ARM64) + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \ + asm ( \ + ".text" "\n" \ + ".align 2" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "mov " STRINGIZE(register) ", lr" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x1) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x1) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x3) +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x3) +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x4) + +#elif COMPILER(GCC) && CPU(ARM_THUMB2) + +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + asm ( \ + ".text" "\n" \ + ".align 2" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + ".thumb" "\n" \ + ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "mov a2, lr" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ + asm ( \ + ".text" "\n" \ + ".align 2" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + ".thumb" "\n" \ + ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "mov a4, lr" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]). +// As a result, return address will be at a 4-byte further location in the following cases. +#if COMPILER_SUPPORTS(EABI) && CPU(ARM) +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #4]" +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #12]" +#else +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #0]" +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #8]" +#endif + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \ + asm ( \ + ".text" "\n" \ + ".align 2" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + ".thumb" "\n" \ + ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + INSTRUCTION_STORE_RETURN_ADDRESS_EJI "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \ + asm ( \ + ".text" "\n" \ + ".align 2" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + ".thumb" "\n" \ + ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL) + +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + "mov a2, lr" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + "mov a4, lr" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]). +// As a result, return address will be at a 4-byte further location in the following cases. +#if COMPILER_SUPPORTS(EABI) && CPU(ARM) +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #4]" +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #12]" +#else +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #0]" +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #8]" +#endif + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + INSTRUCTION_STORE_RETURN_ADDRESS_EJI "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#elif COMPILER(GCC) && CPU(MIPS) + +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \ + "move $a1, $ra" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \ + "move $a3, $ra" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \ + "sw $ra, 20($sp)" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \ + "sw $ra, 28($sp)" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#elif COMPILER(GCC) && CPU(SH4) + +#define SH4_SCRATCH_REGISTER "r11" + +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "sts pr, r5" "\n" \ + "bra " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + "nop" "\n" \ + ); + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "sts pr, r7" "\n" \ + "mov.l 2f, " SH4_SCRATCH_REGISTER "\n" \ + "braf " SH4_SCRATCH_REGISTER "\n" \ + "nop" "\n" \ + "1: .balign 4" "\n" \ + "2: .long " LOCAL_REFERENCE(function) "WithReturnAddress-1b" "\n" \ + ); + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset, scratch) \ + asm( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + SYMBOL_STRING(function) ":" "\n" \ + "sts pr, " scratch "\n" \ + "mov.l " scratch ", @(" STRINGIZE(offset) ", r15)" "\n" \ + "mov.l 2f, " scratch "\n" \ + "braf " scratch "\n" \ + "nop" "\n" \ + "1: .balign 4" "\n" \ + "2: .long " LOCAL_REFERENCE(function) "WithReturnAddress-1b" "\n" \ + ); + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 0, SH4_SCRATCH_REGISTER) +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8, SH4_SCRATCH_REGISTER) + +#elif COMPILER(MSVC) && CPU(X86) + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \ +__declspec(naked) EncodedJSValue JIT_OPERATION function(ExecState*, EncodedJSValue, StringImpl*) \ +{ \ + __asm { \ + __asm mov eax, [esp] \ + __asm mov [esp + 20], eax \ + __asm jmp function##WithReturnAddress \ + } \ +} + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ +__declspec(naked) EncodedJSValue JIT_OPERATION function(ExecState*, JSCell*, StringImpl*) \ +{ \ + __asm { \ + __asm mov eax, [esp] \ + __asm mov [esp + 16], eax \ + __asm jmp function##WithReturnAddress \ + } \ +} + +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) \ +__declspec(naked) void JIT_OPERATION function(ExecState*, EncodedJSValue, JSCell*, StringImpl*) \ +{ \ + __asm { \ + __asm mov eax, [esp] \ + __asm mov [esp + 24], eax \ + __asm jmp function##WithReturnAddress \ + } \ +} + +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \ +__declspec(naked) void JIT_OPERATION function(ExecState*, EncodedJSValue, EncodedJSValue, StringImpl*) \ +{ \ + __asm { \ + __asm mov eax, [esp] \ + __asm mov [esp + 28], eax \ + __asm jmp function##WithReturnAddress \ + } \ +} + +#elif COMPILER(MSVC) + +#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + void* JIT_OPERATION function(ExecState* exec) { return function##WithReturnAddress(exec, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); } + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + EncodedJSValue JIT_OPERATION function(ExecState* exec) { return function##WithReturnAddress(exec, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); } + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ + EncodedJSValue JIT_OPERATION function(ExecState* exec, JSCell* cell, StringImpl* string) { return function##WithReturnAddress(exec, cell, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); } + +#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \ + EncodedJSValue JIT_OPERATION function(ExecState* exec, EncodedJSValue value, StringImpl* string) { return function##WithReturnAddress(exec, value, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); } + +#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \ + void JIT_OPERATION function(ExecState* exec, EncodedJSValue value, EncodedJSValue baseValue, StringImpl* string) { return function##WithReturnAddress(exec, value, baseValue, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); } + +#endif + +#define P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ +void* JIT_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \ +_P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) + +#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ +EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \ +_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) + +#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ +EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, JSCell*, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \ +_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) + +#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \ +EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \ +_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) + +#define V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \ +void JIT_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, EncodedJSValue, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \ +_V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) + +} // namespace JSC + +#endif // JITOperationWrappers_h + diff --git a/Source/JavaScriptCore/jit/JITOperations.cpp b/Source/JavaScriptCore/jit/JITOperations.cpp index b59a028e1..578d15dac 100644 --- a/Source/JavaScriptCore/jit/JITOperations.cpp +++ b/Source/JavaScriptCore/jit/JITOperations.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,41 +24,32 @@ */ #include "config.h" -#include "JITOperations.h" - #if ENABLE(JIT) +#include "JITOperations.h" +#include "Arguments.h" #include "ArrayConstructor.h" +#include "CallFrameInlines.h" +#include "CommonSlowPaths.h" #include "DFGCompilationMode.h" #include "DFGDriver.h" #include "DFGOSREntry.h" -#include "DFGThunks.h" #include "DFGWorklist.h" -#include "Debugger.h" -#include "DirectArguments.h" #include "Error.h" -#include "ErrorHandlingScope.h" -#include "ExceptionFuzz.h" #include "GetterSetter.h" #include "HostCallReturnValue.h" #include "JIT.h" +#include "JITOperationWrappers.h" #include "JITToDFGDeferredCompilationCallback.h" -#include "JSCInlines.h" #include "JSGlobalObjectFunctions.h" -#include "JSLexicalEnvironment.h" -#include "JSPropertyNameEnumerator.h" +#include "JSNameScope.h" +#include "JSPropertyNameIterator.h" #include "JSStackInlines.h" #include "JSWithScope.h" -#include "LegacyProfiler.h" #include "ObjectConstructor.h" -#include "PropertyName.h" +#include "Operations.h" #include "Repatch.h" #include "RepatchBuffer.h" -#include "ScopedArguments.h" -#include "TestRunnerUtils.h" -#include "TypeProfilerLog.h" -#include "VMInlines.h" -#include <wtf/InlineASM.h> namespace JSC { @@ -80,34 +71,33 @@ void * _ReturnAddress(void); #endif -void JIT_OPERATION operationThrowStackOverflowError(ExecState* exec, CodeBlock* codeBlock) +void JIT_OPERATION operationStackCheck(ExecState* exec, CodeBlock* codeBlock) { // We pass in our own code block, because the callframe hasn't been populated. VM* vm = codeBlock->vm(); - - VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame; - CallFrame* callerFrame = exec->callerFrame(vmEntryFrame); + CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel(); if (!callerFrame) callerFrame = exec; - NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame); - ErrorHandlingScope errorScope(*vm); - vm->throwException(callerFrame, createStackOverflowError(callerFrame)); + NativeCallFrameTracer tracer(vm, callerFrame); + + JSStack& stack = vm->interpreter->stack(); + + if (UNLIKELY(!stack.grow(&exec->registers()[virtualRegisterForLocal(codeBlock->frameRegisterCount()).offset()]))) + vm->throwException(callerFrame, createStackOverflowError(callerFrame)); } int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec) { VM* vm = &exec->vm(); - VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame; - CallFrame* callerFrame = exec->callerFrame(vmEntryFrame); + CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel(); + NativeCallFrameTracer tracer(vm, callerFrame); JSStack& stack = vm->interpreter->stack(); int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForCall); - if (missingArgCount < 0) { - NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame); - throwStackOverflowError(callerFrame); - } + if (missingArgCount < 0) + vm->throwException(callerFrame, createStackOverflowError(callerFrame)); return missingArgCount; } @@ -115,94 +105,81 @@ int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec) int32_t JIT_OPERATION operationConstructArityCheck(ExecState* exec) { VM* vm = &exec->vm(); - VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame; - CallFrame* callerFrame = exec->callerFrame(vmEntryFrame); + CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel(); + NativeCallFrameTracer tracer(vm, callerFrame); JSStack& stack = vm->interpreter->stack(); int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForConstruct); - if (missingArgCount < 0) { - NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame); - throwStackOverflowError(callerFrame); - } + if (missingArgCount < 0) + vm->throwException(callerFrame, createStackOverflowError(callerFrame)); return missingArgCount; } -EncodedJSValue JIT_OPERATION operationGetById(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid) +EncodedJSValue JIT_OPERATION operationGetById(ExecState* exec, StructureStubInfo*, EncodedJSValue base, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - stubInfo->tookSlowPath = true; - JSValue baseValue = JSValue::decode(base); PropertySlot slot(baseValue); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); return JSValue::encode(baseValue.get(exec, ident, slot)); } -EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid) -{ - VM* vm = &exec->vm(); - NativeCallFrameTracer tracer(vm, exec); - - JSValue baseValue = JSValue::decode(base); - PropertySlot slot(baseValue); - Identifier ident = Identifier::fromUid(vm, uid); - return JSValue::encode(baseValue.get(exec, ident, slot)); -} - -EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid) +EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue baseValue = JSValue::decode(base); PropertySlot slot(baseValue); - bool hasResult = baseValue.getPropertySlot(exec, ident, slot); - + JSValue result = baseValue.get(exec, ident, slot); + if (accessType == static_cast<AccessType>(stubInfo->accessType)) buildGetByIDList(exec, baseValue, ident, slot, *stubInfo); - return JSValue::encode(hasResult? slot.getValue(exec, ident) : jsUndefined()); + return JSValue::encode(result); } -EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid) +EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident = uid->isEmptyUnique() ? Identifier::from(PrivateName(uid)) : Identifier(vm, uid); + AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue baseValue = JSValue::decode(base); PropertySlot slot(baseValue); + JSValue result = baseValue.get(exec, ident, slot); - bool hasResult = baseValue.getPropertySlot(exec, ident, slot); - if (stubInfo->seen) - repatchGetByID(exec, baseValue, ident, slot, *stubInfo); - else - stubInfo->seen = true; - - return JSValue::encode(hasResult? slot.getValue(exec, ident) : jsUndefined()); + if (accessType == static_cast<AccessType>(stubInfo->accessType)) { + if (stubInfo->seen) + repatchGetByID(exec, baseValue, ident, slot, *stubInfo); + else + stubInfo->seen = true; + } + return JSValue::encode(result); } -EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key) +EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, StringImpl* key) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); if (!base->isObject()) { - vm->throwException(exec, createInvalidInParameterError(exec, base)); + vm->throwException(exec, createInvalidParameterError(exec, "in", base)); return JSValue::encode(jsUndefined()); } AccessType accessType = static_cast<AccessType>(stubInfo->accessType); - Identifier ident = Identifier::fromUid(vm, key); + Identifier ident(vm, key); PropertySlot slot(base); bool result = asObject(base)->getPropertySlot(exec, ident, slot); @@ -216,19 +193,17 @@ EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubI return JSValue::encode(jsBoolean(result)); } -EncodedJSValue JIT_OPERATION operationIn(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key) +EncodedJSValue JIT_OPERATION operationIn(ExecState* exec, StructureStubInfo*, JSCell* base, StringImpl* key) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - - stubInfo->tookSlowPath = true; if (!base->isObject()) { - vm->throwException(exec, createInvalidInParameterError(exec, base)); + vm->throwException(exec, createInvalidParameterError(exec, "in", base)); return JSValue::encode(jsUndefined()); } - Identifier ident = Identifier::fromUid(vm, key); + Identifier ident(vm, key); return JSValue::encode(jsBoolean(asObject(base)->hasProperty(exec, ident))); } @@ -240,232 +215,234 @@ EncodedJSValue JIT_OPERATION operationGenericIn(ExecState* exec, JSCell* base, E return JSValue::encode(jsBoolean(CommonSlowPaths::opIn(exec, JSValue::decode(key), base))); } -void JIT_OPERATION operationPutByIdStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +EncodedJSValue JIT_OPERATION operationCallCustomGetter(ExecState* exec, JSCell* base, PropertySlot::GetValueFunc function, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - stubInfo->tookSlowPath = true; + Identifier ident(vm, uid); + + return function(exec, JSValue::encode(base), JSValue::encode(base), ident); +} + +EncodedJSValue JIT_OPERATION operationCallGetter(ExecState* exec, JSCell* base, JSCell* getterSetter) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); + + return JSValue::encode(callGetter(exec, base, getterSetter)); +} + +void JIT_OPERATION operationPutByIdStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) +{ + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext()); JSValue::decode(encodedBase).put(exec, ident, JSValue::decode(encodedValue), slot); } -void JIT_OPERATION operationPutByIdNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdNonStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - stubInfo->tookSlowPath = true; - - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext()); JSValue::decode(encodedBase).put(exec, ident, JSValue::decode(encodedValue), slot); } -void JIT_OPERATION operationPutByIdDirectStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdDirectStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - stubInfo->tookSlowPath = true; - - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext()); asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot); } -void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - stubInfo->tookSlowPath = true; - - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext()); asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot); } -void JIT_OPERATION operationPutByIdStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSValue baseValue = JSValue::decode(encodedBase); PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext()); - - Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr; + baseValue.put(exec, ident, value, slot); if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; if (stubInfo->seen) - repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect); + repatchPutByID(exec, baseValue, ident, slot, *stubInfo, NotDirect); else stubInfo->seen = true; } -void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSValue baseValue = JSValue::decode(encodedBase); PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext()); - - Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr; + baseValue.put(exec, ident, value, slot); if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; if (stubInfo->seen) - repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect); + repatchPutByID(exec, baseValue, ident, slot, *stubInfo, NotDirect); else stubInfo->seen = true; } -void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSObject* baseObject = asObject(JSValue::decode(encodedBase)); PutPropertySlot slot(baseObject, true, exec->codeBlock()->putByIdContext()); - Structure* structure = baseObject->structure(*vm); baseObject->putDirect(exec->vm(), ident, value, slot); if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; if (stubInfo->seen) - repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct); + repatchPutByID(exec, baseObject, ident, slot, *stubInfo, Direct); else stubInfo->seen = true; } -void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSObject* baseObject = asObject(JSValue::decode(encodedBase)); PutPropertySlot slot(baseObject, false, exec->codeBlock()->putByIdContext()); - Structure* structure = baseObject->structure(*vm); baseObject->putDirect(exec->vm(), ident, value, slot); if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; if (stubInfo->seen) - repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct); + repatchPutByID(exec, baseObject, ident, slot, *stubInfo, Direct); else stubInfo->seen = true; } -void JIT_OPERATION operationPutByIdStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSValue baseValue = JSValue::decode(encodedBase); PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext()); - Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr; baseValue.put(exec, ident, value, slot); - + if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; - - buildPutByIdList(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect); + + buildPutByIdList(exec, baseValue, ident, slot, *stubInfo, NotDirect); } -void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSValue baseValue = JSValue::decode(encodedBase); PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext()); - - Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr; + baseValue.put(exec, ident, value, slot); if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; - buildPutByIdList(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect); + buildPutByIdList(exec, baseValue, ident, slot, *stubInfo, NotDirect); } -void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSObject* baseObject = asObject(JSValue::decode(encodedBase)); PutPropertySlot slot(baseObject, true, exec->codeBlock()->putByIdContext()); - - Structure* structure = baseObject->structure(*vm); - baseObject->putDirect(*vm, ident, value, slot); + + baseObject->putDirect(exec->vm(), ident, value, slot); if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; - buildPutByIdList(exec, baseObject, structure, ident, slot, *stubInfo, Direct); + buildPutByIdList(exec, baseObject, ident, slot, *stubInfo, Direct); } -void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid) +void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - Identifier ident = Identifier::fromUid(vm, uid); + Identifier ident(vm, uid); AccessType accessType = static_cast<AccessType>(stubInfo->accessType); JSValue value = JSValue::decode(encodedValue); JSObject* baseObject = asObject(JSValue::decode(encodedBase)); PutPropertySlot slot(baseObject, false, exec->codeBlock()->putByIdContext()); - - Structure* structure = baseObject->structure(*vm); - baseObject->putDirect(*vm, ident, value, slot); - + + baseObject ->putDirect(exec->vm(), ident, value, slot); + if (accessType != static_cast<AccessType>(stubInfo->accessType)) return; - buildPutByIdList(exec, baseObject, structure, ident, slot, *stubInfo, Direct); + buildPutByIdList(exec, baseObject, ident, slot, *stubInfo, Direct); } void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObject* base, Structure* structure, PropertyOffset offset, EncodedJSValue value) @@ -473,29 +450,29 @@ void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObj VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - ASSERT(structure->outOfLineCapacity() > base->structure(vm)->outOfLineCapacity()); + ASSERT(structure->outOfLineCapacity() > base->structure()->outOfLineCapacity()); ASSERT(!vm.heap.storageAllocator().fastPathShouldSucceed(structure->outOfLineCapacity() * sizeof(JSValue))); base->setStructureAndReallocateStorageIfNecessary(vm, structure); base->putDirect(vm, offset, JSValue::decode(value)); } -static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value, ByValInfo* byValInfo) +static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value) { - VM& vm = callFrame->vm(); if (LIKELY(subscript.isUInt32())) { uint32_t i = subscript.asUInt32(); if (baseValue.isObject()) { JSObject* object = asObject(baseValue); if (object->canSetIndexQuickly(i)) object->setIndexQuickly(callFrame->vm(), i, value); - else { - byValInfo->arrayProfile->setOutOfBounds(); - object->methodTable(vm)->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode()); - } + else + object->methodTable()->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode()); } else baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode()); + } else if (isName(subscript)) { + PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode()); + baseValue.put(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot); } else { - auto property = subscript.toPropertyKey(callFrame); + Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); if (!callFrame->vm().exception()) { // Don't put to an object if toString threw an exception. PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode()); baseValue.put(callFrame, property, value, slot); @@ -503,45 +480,23 @@ static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, } } -static void directPutByVal(CallFrame* callFrame, JSObject* baseObject, JSValue subscript, JSValue value, ByValInfo* byValInfo) +static void directPutByVal(CallFrame* callFrame, JSObject* baseObject, JSValue subscript, JSValue value) { - bool isStrictMode = callFrame->codeBlock()->isStrictMode(); if (LIKELY(subscript.isUInt32())) { - // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices. - uint32_t index = subscript.asUInt32(); - ASSERT(isIndex(index)); - if (baseObject->canSetIndexQuicklyForPutDirect(index)) { - baseObject->setIndexQuickly(callFrame->vm(), index, value); - return; - } - - byValInfo->arrayProfile->setOutOfBounds(); - baseObject->putDirectIndex(callFrame, index, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); - return; - } - - if (subscript.isDouble()) { - double subscriptAsDouble = subscript.asDouble(); - uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble); - if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) { - baseObject->putDirectIndex(callFrame, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); - return; + uint32_t i = subscript.asUInt32(); + baseObject->putDirectIndex(callFrame, i, value); + } else if (isName(subscript)) { + PutPropertySlot slot(baseObject, callFrame->codeBlock()->isStrictMode()); + baseObject->putDirect(callFrame->vm(), jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot); + } else { + Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame)); + if (!callFrame->vm().exception()) { // Don't put to an object if toString threw an exception. + PutPropertySlot slot(baseObject, callFrame->codeBlock()->isStrictMode()); + baseObject->putDirect(callFrame->vm(), property, value, slot); } } - - // Don't put to an object if toString threw an exception. - auto property = subscript.toPropertyKey(callFrame); - if (callFrame->vm().exception()) - return; - - if (Optional<uint32_t> index = parseIndex(property)) - baseObject->putDirectIndex(callFrame, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow); - else { - PutPropertySlot slot(baseObject, isStrictMode); - baseObject->putDirect(callFrame->vm(), property, value, slot); - } } -void JIT_OPERATION operationPutByVal(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo) +void JIT_OPERATION operationPutByVal(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); @@ -555,19 +510,16 @@ void JIT_OPERATION operationPutByVal(ExecState* exec, EncodedJSValue encodedBase JSObject* object = asObject(baseValue); bool didOptimize = false; - ASSERT(exec->locationAsBytecodeOffset()); - ASSERT(!byValInfo->stubRoutine); + unsigned bytecodeOffset = exec->locationAsBytecodeOffset(); + ASSERT(bytecodeOffset); + ByValInfo& byValInfo = exec->codeBlock()->getByValInfo(bytecodeOffset - 1); + ASSERT(!byValInfo.stubRoutine); - Structure* structure = object->structure(vm); - if (hasOptimizableIndexing(structure)) { + if (hasOptimizableIndexing(object->structure())) { // Attempt to optimize. - JITArrayMode arrayMode = jitArrayModeForStructure(structure); - if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) { - CodeBlock* codeBlock = exec->codeBlock(); - ConcurrentJITLocker locker(codeBlock->m_lock); - byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure); - - JIT::compilePutByVal(&vm, exec->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode); + JITArrayMode arrayMode = jitArrayModeForStructure(object->structure()); + if (arrayMode != byValInfo.arrayMode) { + JIT::compilePutByVal(&vm, exec->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode); didOptimize = true; } } @@ -578,18 +530,19 @@ void JIT_OPERATION operationPutByVal(ExecState* exec, EncodedJSValue encodedBase // that intercepts indexed get, then don't even wait until 10 times. For cases // where we see non-index-intercepting objects, this gives 10 iterations worth of // opportunity for us to observe that the get_by_val may be polymorphic. - if (++byValInfo->slowPathCount >= 10 - || object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) { + if (++byValInfo.slowPathCount >= 10 + || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) { // Don't ever try to optimize. - ctiPatchCallByReturnAddress(exec->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationPutByValGeneric)); + RepatchBuffer repatchBuffer(exec->codeBlock()); + repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationPutByValGeneric)); } } } - putByVal(exec, baseValue, subscript, value, byValInfo); + putByVal(exec, baseValue, subscript, value); } -void JIT_OPERATION operationDirectPutByVal(ExecState* callFrame, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo) +void JIT_OPERATION operationDirectPutByVal(ExecState* callFrame, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue) { VM& vm = callFrame->vm(); NativeCallFrameTracer tracer(&vm, callFrame); @@ -602,20 +555,17 @@ void JIT_OPERATION operationDirectPutByVal(ExecState* callFrame, EncodedJSValue if (subscript.isInt32()) { // See if it's worth optimizing at all. bool didOptimize = false; - - ASSERT(callFrame->locationAsBytecodeOffset()); - ASSERT(!byValInfo->stubRoutine); - - Structure* structure = object->structure(vm); - if (hasOptimizableIndexing(structure)) { + + unsigned bytecodeOffset = callFrame->locationAsBytecodeOffset(); + ASSERT(bytecodeOffset); + ByValInfo& byValInfo = callFrame->codeBlock()->getByValInfo(bytecodeOffset - 1); + ASSERT(!byValInfo.stubRoutine); + + if (hasOptimizableIndexing(object->structure())) { // Attempt to optimize. - JITArrayMode arrayMode = jitArrayModeForStructure(structure); - if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) { - CodeBlock* codeBlock = callFrame->codeBlock(); - ConcurrentJITLocker locker(codeBlock->m_lock); - byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure); - - JIT::compileDirectPutByVal(&vm, callFrame->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode); + JITArrayMode arrayMode = jitArrayModeForStructure(object->structure()); + if (arrayMode != byValInfo.arrayMode) { + JIT::compileDirectPutByVal(&vm, callFrame->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode); didOptimize = true; } } @@ -626,17 +576,18 @@ void JIT_OPERATION operationDirectPutByVal(ExecState* callFrame, EncodedJSValue // that intercepts indexed get, then don't even wait until 10 times. For cases // where we see non-index-intercepting objects, this gives 10 iterations worth of // opportunity for us to observe that the get_by_val may be polymorphic. - if (++byValInfo->slowPathCount >= 10 - || object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) { + if (++byValInfo.slowPathCount >= 10 + || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) { // Don't ever try to optimize. - ctiPatchCallByReturnAddress(callFrame->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationDirectPutByValGeneric)); + RepatchBuffer repatchBuffer(callFrame->codeBlock()); + repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationDirectPutByValGeneric)); } } } - directPutByVal(callFrame, object, subscript, value, byValInfo); + directPutByVal(callFrame, object, subscript, value); } -void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo) +void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); @@ -645,11 +596,11 @@ void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue enco JSValue subscript = JSValue::decode(encodedSubscript); JSValue value = JSValue::decode(encodedValue); - putByVal(exec, baseValue, subscript, value, byValInfo); + putByVal(exec, baseValue, subscript, value); } -void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo) +void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); @@ -658,13 +609,18 @@ void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValu JSValue subscript = JSValue::decode(encodedSubscript); JSValue value = JSValue::decode(encodedValue); RELEASE_ASSERT(baseValue.isObject()); - directPutByVal(exec, asObject(baseValue), subscript, value, byValInfo); + directPutByVal(exec, asObject(baseValue), subscript, value); } -EncodedJSValue JIT_OPERATION operationCallEval(ExecState* exec, ExecState* execCallee) +EncodedJSValue JIT_OPERATION operationCallEval(ExecState* execCallee) { - UNUSED_PARAM(exec); + CallFrame* callerFrame = execCallee->callerFrame(); + ASSERT(execCallee->callerFrame()->codeBlock()->codeType() != FunctionCode + || !execCallee->callerFrame()->codeBlock()->needsFullScopeChain() + || execCallee->callerFrame()->uncheckedR(execCallee->callerFrame()->codeBlock()->activationRegister().offset()).jsValue()); + execCallee->setScope(callerFrame->scope()); + execCallee->setReturnPC(static_cast<Instruction*>(OUR_RETURN_ADDRESS)); execCallee->setCodeBlock(0); if (!isHostFunction(execCallee->calleeAsValue(), globalFuncEval)) @@ -683,6 +639,7 @@ static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializ ExecState* exec = execCallee->callerFrame(); VM* vm = &exec->vm(); + execCallee->setScope(exec->scope()); execCallee->setCodeBlock(0); if (kind == CodeForCall) { @@ -728,65 +685,60 @@ static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializ return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(); } -char* JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callLinkInfo) +inline char* linkFor(ExecState* execCallee, CodeSpecializationKind kind) { ExecState* exec = execCallee->callerFrame(); VM* vm = &exec->vm(); - CodeSpecializationKind kind = callLinkInfo->specializationKind(); NativeCallFrameTracer tracer(vm, exec); JSValue calleeAsValue = execCallee->calleeAsValue(); JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue); - if (!calleeAsFunctionCell) { - // FIXME: We should cache these kinds of calls. They can be common and currently they are - // expensive. - // https://bugs.webkit.org/show_bug.cgi?id=144458 + if (!calleeAsFunctionCell) return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind)); - } JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); - JSScope* scope = callee->scopeUnchecked(); + execCallee->setScope(callee->scopeUnchecked()); ExecutableBase* executable = callee->executable(); MacroAssemblerCodePtr codePtr; CodeBlock* codeBlock = 0; + CallLinkInfo& callLinkInfo = exec->codeBlock()->getCallLinkInfo(execCallee->returnPC()); if (executable->isHostFunction()) - codePtr = executable->entrypointFor(*vm, kind, MustCheckArity, callLinkInfo->registerPreservationMode()); + codePtr = executable->generatedJITCodeFor(kind)->addressForCall(); else { FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable); - - if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) { - exec->vm().throwException(exec, createNotAConstructorError(exec, callee)); - return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress()); - } - - JSObject* error = functionExecutable->prepareForExecution(execCallee, callee, scope, kind); + JSObject* error = functionExecutable->prepareForExecution(execCallee, callee->scope(), kind); if (error) { - exec->vm().throwException(exec, error); + vm->throwException(exec, createStackOverflowError(exec)); return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress()); } codeBlock = functionExecutable->codeBlockFor(kind); - ArityCheckMode arity; - if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo->callType() == CallLinkInfo::CallVarargs || callLinkInfo->callType() == CallLinkInfo::ConstructVarargs) - arity = MustCheckArity; + if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.callType == CallLinkInfo::CallVarargs) + codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind); else - arity = ArityCheckNotRequired; - codePtr = functionExecutable->entrypointFor(*vm, kind, arity, callLinkInfo->registerPreservationMode()); + codePtr = functionExecutable->generatedJITCodeFor(kind)->addressForCall(); } - if (!callLinkInfo->seenOnce()) - callLinkInfo->setSeen(); + if (!callLinkInfo.seenOnce()) + callLinkInfo.setSeen(); else - linkFor(execCallee, *callLinkInfo, codeBlock, callee, codePtr); - + linkFor(execCallee, callLinkInfo, codeBlock, callee, codePtr, kind); return reinterpret_cast<char*>(codePtr.executableAddress()); } -inline char* virtualForWithFunction( - ExecState* execCallee, CallLinkInfo* callLinkInfo, JSCell*& calleeAsFunctionCell) +char* JIT_OPERATION operationLinkCall(ExecState* execCallee) +{ + return linkFor(execCallee, CodeForCall); +} + +char* JIT_OPERATION operationLinkConstruct(ExecState* execCallee) +{ + return linkFor(execCallee, CodeForConstruct); +} + +inline char* virtualForWithFunction(ExecState* execCallee, CodeSpecializationKind kind, JSCell*& calleeAsFunctionCell) { ExecState* exec = execCallee->callerFrame(); VM* vm = &exec->vm(); - CodeSpecializationKind kind = callLinkInfo->specializationKind(); NativeCallFrameTracer tracer(vm, exec); JSValue calleeAsValue = execCallee->calleeAsValue(); @@ -795,43 +747,80 @@ inline char* virtualForWithFunction( return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind)); JSFunction* function = jsCast<JSFunction*>(calleeAsFunctionCell); - JSScope* scope = function->scopeUnchecked(); + execCallee->setScope(function->scopeUnchecked()); ExecutableBase* executable = function->executable(); if (UNLIKELY(!executable->hasJITCodeFor(kind))) { FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable); - - if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) { - exec->vm().throwException(exec, createNotAConstructorError(exec, function)); - return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress()); - } - - JSObject* error = functionExecutable->prepareForExecution(execCallee, function, scope, kind); + JSObject* error = functionExecutable->prepareForExecution(execCallee, function->scope(), kind); if (error) { - exec->vm().throwException(exec, error); + exec->vm().throwException(execCallee, error); return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress()); } } - return reinterpret_cast<char*>(executable->entrypointFor( - *vm, kind, MustCheckArity, callLinkInfo->registerPreservationMode()).executableAddress()); + return reinterpret_cast<char*>(executable->generatedJITCodeWithArityCheckFor(kind).executableAddress()); +} + +inline char* virtualFor(ExecState* execCallee, CodeSpecializationKind kind) +{ + JSCell* calleeAsFunctionCellIgnored; + return virtualForWithFunction(execCallee, kind, calleeAsFunctionCellIgnored); +} + +static bool attemptToOptimizeClosureCall(ExecState* execCallee, JSCell* calleeAsFunctionCell, CallLinkInfo& callLinkInfo) +{ + if (!calleeAsFunctionCell) + return false; + + JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell); + JSFunction* oldCallee = callLinkInfo.callee.get(); + + if (!oldCallee + || oldCallee->structure() != callee->structure() + || oldCallee->executable() != callee->executable()) + return false; + + ASSERT(callee->executable()->hasJITCodeForCall()); + MacroAssemblerCodePtr codePtr = callee->executable()->generatedJITCodeForCall()->addressForCall(); + + CodeBlock* codeBlock; + if (callee->executable()->isHostFunction()) + codeBlock = 0; + else { + codeBlock = jsCast<FunctionExecutable*>(callee->executable())->codeBlockForCall(); + if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters())) + return false; + } + + linkClosureCall( + execCallee, callLinkInfo, codeBlock, + callee->structure(), callee->executable(), codePtr); + + return true; } -char* JIT_OPERATION operationLinkPolymorphicCall(ExecState* execCallee, CallLinkInfo* callLinkInfo) +char* JIT_OPERATION operationLinkClosureCall(ExecState* execCallee) { - ASSERT(callLinkInfo->specializationKind() == CodeForCall); JSCell* calleeAsFunctionCell; - char* result = virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCell); + char* result = virtualForWithFunction(execCallee, CodeForCall, calleeAsFunctionCell); + CallLinkInfo& callLinkInfo = execCallee->callerFrame()->codeBlock()->getCallLinkInfo(execCallee->returnPC()); - linkPolymorphicCall(execCallee, *callLinkInfo, CallVariant(calleeAsFunctionCell)); + if (!attemptToOptimizeClosureCall(execCallee, calleeAsFunctionCell, callLinkInfo)) + linkSlowFor(execCallee, callLinkInfo, CodeForCall); return result; } -char* JIT_OPERATION operationVirtualCall(ExecState* execCallee, CallLinkInfo* callLinkInfo) +char* JIT_OPERATION operationVirtualCall(ExecState* execCallee) +{ + return virtualFor(execCallee, CodeForCall); +} + +char* JIT_OPERATION operationVirtualConstruct(ExecState* execCallee) { - JSCell* calleeAsFunctionCellIgnored; - return virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCellIgnored); + return virtualFor(execCallee, CodeForConstruct); } + size_t JIT_OPERATION operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) { VM* vm = &exec->vm(); @@ -889,7 +878,7 @@ size_t JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSC VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - bool result = WTF::equal(*asString(left)->value(exec).impl(), *asString(right)->value(exec).impl()); + bool result = asString(left)->value(exec) == asString(right)->value(exec); #if USE(JSVALUE64) return JSValue::encode(jsBoolean(result)); #else @@ -899,7 +888,7 @@ size_t JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSC size_t JIT_OPERATION operationHasProperty(ExecState* exec, JSObject* base, JSString* property) { - int result = base->hasProperty(exec, property->toIdentifier(exec)); + int result = base->hasProperty(exec, Identifier(exec, property->value(exec))); return result; } @@ -926,20 +915,12 @@ EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState* exec return JSValue::encode(constructArrayWithSizeQuirk(exec, profile, exec->lexicalGlobalObject(), sizeValue)); } -EncodedJSValue JIT_OPERATION operationNewFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable) -{ - ASSERT(functionExecutable->inherits(FunctionExecutable::info())); - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - return JSValue::encode(JSFunction::create(vm, static_cast<FunctionExecutable*>(functionExecutable), scope)); -} - -EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable) +EncodedJSValue JIT_OPERATION operationNewFunction(ExecState* exec, JSCell* functionExecutable) { ASSERT(functionExecutable->inherits(FunctionExecutable::info())); VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - return JSValue::encode(JSFunction::createWithInvalidatedReallocationWatchpoint(vm, static_cast<FunctionExecutable*>(functionExecutable), scope)); + return JSValue::encode(JSFunction::create(vm, static_cast<FunctionExecutable*>(functionExecutable), exec->scope())); } JSCell* JIT_OPERATION operationNewObject(ExecState* exec, Structure* structure) @@ -956,7 +937,7 @@ EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr NativeCallFrameTracer tracer(&vm, exec); RegExp* regexp = static_cast<RegExp*>(regexpPtr); if (!regexp->isValid()) { - vm.throwException(exec, createSyntaxError(exec, ASCIILiteral("Invalid flags supplied to RegExp constructor."))); + vm.throwException(exec, createSyntaxError(exec, "Invalid flags supplied to RegExp constructor.")); return JSValue::encode(jsUndefined()); } @@ -968,7 +949,7 @@ void JIT_OPERATION operationHandleWatchdogTimer(ExecState* exec) VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - if (UNLIKELY(vm.shouldTriggerTermination(exec))) + if (UNLIKELY(vm.watchdog.didFire(exec))) vm.throwException(exec, createTerminatedExecutionException(&vm)); } @@ -976,13 +957,12 @@ void JIT_OPERATION operationThrowStaticError(ExecState* exec, EncodedJSValue enc { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - JSValue errorMessageValue = JSValue::decode(encodedValue); - RELEASE_ASSERT(errorMessageValue.isString()); - String errorMessage = asString(errorMessageValue)->value(exec); + + String message = errorDescriptionForValue(exec, JSValue::decode(encodedValue))->value(exec); if (referenceErrorFlag) - vm.throwException(exec, createReferenceError(exec, errorMessage)); + vm.throwException(exec, createReferenceError(exec, message)); else - vm.throwException(exec, createTypeError(exec, errorMessage)); + vm.throwException(exec, createTypeError(exec, message)); } void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookID) @@ -994,13 +974,7 @@ void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookID) } #if ENABLE(DFG_JIT) -static void updateAllPredictionsAndOptimizeAfterWarmUp(CodeBlock* codeBlock) -{ - codeBlock->updateAllPredictions(); - codeBlock->optimizeAfterWarmUp(); -} - -SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex) +char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); @@ -1022,11 +996,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte DeferGCForAWhile deferGC(vm.heap); CodeBlock* codeBlock = exec->codeBlock(); - if (codeBlock->jitType() != JITCode::BaselineJIT) { - dataLog("Unexpected code block in Baseline->DFG tier-up: ", *codeBlock, "\n"); - RELEASE_ASSERT_NOT_REACHED(); - } - + if (bytecodeIndex) { // If we're attempting to OSR from a loop, assume that this should be // separately optimized. @@ -1050,37 +1020,26 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte codeBlock->updateAllPredictions(); if (Options::verboseOSR()) dataLog("Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.\n"); - return encodeResult(0, 0); + return 0; } - if (vm.enabledProfiler()) { - updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock); - return encodeResult(0, 0); - } - - Debugger* debugger = codeBlock->globalObject()->debugger(); - if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests())) { - updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock); - return encodeResult(0, 0); - } - if (codeBlock->m_shouldAlwaysBeInlined) { - updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock); + codeBlock->updateAllPredictions(); + codeBlock->optimizeAfterWarmUp(); if (Options::verboseOSR()) dataLog("Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.\n"); - return encodeResult(0, 0); + return 0; } // We cannot be in the process of asynchronous compilation and also have an optimized // replacement. - DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull(); ASSERT( - !worklist - || !(worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown + !vm.worklist + || !(vm.worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown && codeBlock->hasOptimizedReplacement())); DFG::Worklist::State worklistState; - if (worklist) { + if (vm.worklist) { // The call to DFG::Worklist::completeAllReadyPlansForVM() will complete all ready // (i.e. compiled) code blocks. But if it completes ours, we also need to know // what the result was so that we don't plow ahead and attempt OSR or immediate @@ -1099,7 +1058,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte // probably a waste of memory. Our goal here is to complete code blocks as soon as // possible in order to minimize the chances of us executing baseline code after // optimized code is already available. - worklistState = worklist->completeAllReadyPlansForVM( + worklistState = vm.worklist->completeAllReadyPlansForVM( vm, DFG::CompilationKey(codeBlock, DFG::DFGMode)); } else worklistState = DFG::Worklist::NotKnown; @@ -1109,7 +1068,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte // replacement. RELEASE_ASSERT(!codeBlock->hasOptimizedReplacement()); codeBlock->setOptimizationThresholdBasedOnCompilationResult(CompilationDeferred); - return encodeResult(0, 0); + return 0; } if (worklistState == DFG::Worklist::Compiled) { @@ -1122,7 +1081,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte codeBlock->updateAllPredictions(); if (Options::verboseOSR()) dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n"); - return encodeResult(0, 0); + return 0; } } else if (codeBlock->hasOptimizedReplacement()) { if (Options::verboseOSR()) @@ -1146,8 +1105,8 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte "Triggering reoptimization of ", *codeBlock, "(", *codeBlock->replacement(), ") (in loop).\n"); } - codeBlock->replacement()->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTrigger, CountReoptimization); - return encodeResult(0, 0); + codeBlock->replacement()->jettison(CountReoptimization); + return 0; } } else { if (!codeBlock->shouldOptimizeNow()) { @@ -1156,7 +1115,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte "Delaying optimization for ", *codeBlock, " because of insufficient profiling.\n"); } - return encodeResult(0, 0); + return 0; } if (Options::verboseOSR()) @@ -1170,31 +1129,40 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte Operands<JSValue> mustHandleValues(codeBlock->numParameters(), numVarsWithValues); for (size_t i = 0; i < mustHandleValues.size(); ++i) { int operand = mustHandleValues.operandForIndex(i); - mustHandleValues[i] = exec->uncheckedR(operand).jsValue(); + if (operandIsArgument(operand) + && !VirtualRegister(operand).toArgument() + && codeBlock->codeType() == FunctionCode + && codeBlock->specializationKind() == CodeForConstruct) { + // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will + // also never be used. It doesn't matter what we put into the value for this, + // but it has to be an actual value that can be grokked by subsequent DFG passes, + // so we sanitize it here by turning it into Undefined. + mustHandleValues[i] = jsUndefined(); + } else + mustHandleValues[i] = exec->uncheckedR(operand).jsValue(); } - RefPtr<CodeBlock> replacementCodeBlock = codeBlock->newReplacement(); CompilationResult result = DFG::compile( - vm, replacementCodeBlock.get(), 0, DFG::DFGMode, bytecodeIndex, - mustHandleValues, JITToDFGDeferredCompilationCallback::create()); + vm, codeBlock->newReplacement().get(), DFG::DFGMode, bytecodeIndex, + mustHandleValues, JITToDFGDeferredCompilationCallback::create(), + vm.ensureWorklist()); - if (result != CompilationSuccessful) { - ASSERT(result == CompilationDeferred || replacementCodeBlock->hasOneRef()); - return encodeResult(0, 0); - } + if (result != CompilationSuccessful) + return 0; } CodeBlock* optimizedCodeBlock = codeBlock->replacement(); ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType())); - if (void* dataBuffer = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) { + if (void* address = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) { if (Options::verboseOSR()) { dataLog( - "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ".\n"); + "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ", address ", + RawPointer(OUR_RETURN_ADDRESS), " -> ", RawPointer(address), ".\n"); } codeBlock->optimizeSoon(); - return encodeResult(vm.getCTIStub(DFG::osrEntryThunkGenerator).code().executableAddress(), dataBuffer); + return static_cast<char*>(address); } if (Options::verboseOSR()) { @@ -1222,15 +1190,15 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte "Triggering reoptimization of ", *codeBlock, " -> ", *codeBlock->replacement(), " (after OSR fail).\n"); } - optimizedCodeBlock->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTriggerOnOSREntryFail, CountReoptimization); - return encodeResult(0, 0); + optimizedCodeBlock->jettison(CountReoptimization); + return 0; } // OSR failed this time, but it might succeed next time! Let the code run a bit // longer and then try again. codeBlock->optimizeAfterWarmUp(); - return encodeResult(0, 0); + return 0; } #endif @@ -1245,32 +1213,6 @@ void JIT_OPERATION operationPutByIndex(ExecState* exec, EncodedJSValue encodedAr } #if USE(JSVALUE64) -void JIT_OPERATION operationPutGetterById(ExecState* exec, EncodedJSValue encodedObjectValue, Identifier* identifier, EncodedJSValue encodedGetterValue) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - - ASSERT(JSValue::decode(encodedObjectValue).isObject()); - JSObject* baseObj = asObject(JSValue::decode(encodedObjectValue)); - - JSValue getter = JSValue::decode(encodedGetterValue); - ASSERT(getter.isObject()); - baseObj->putGetter(exec, *identifier, asObject(getter)); -} - -void JIT_OPERATION operationPutSetterById(ExecState* exec, EncodedJSValue encodedObjectValue, Identifier* identifier, EncodedJSValue encodedSetterValue) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - - ASSERT(JSValue::decode(encodedObjectValue).isObject()); - JSObject* baseObj = asObject(JSValue::decode(encodedObjectValue)); - - JSValue setter = JSValue::decode(encodedSetterValue); - ASSERT(setter.isObject()); - baseObj->putSetter(exec, *identifier, asObject(setter)); -} - void JIT_OPERATION operationPutGetterSetter(ExecState* exec, EncodedJSValue encodedObjectValue, Identifier* identifier, EncodedJSValue encodedGetterValue, EncodedJSValue encodedSetterValue) { VM& vm = exec->vm(); @@ -1279,7 +1221,7 @@ void JIT_OPERATION operationPutGetterSetter(ExecState* exec, EncodedJSValue enco ASSERT(JSValue::decode(encodedObjectValue).isObject()); JSObject* baseObj = asObject(JSValue::decode(encodedObjectValue)); - GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject()); + GetterSetter* accessor = GetterSetter::create(vm); JSValue getter = JSValue::decode(encodedGetterValue); JSValue setter = JSValue::decode(encodedSetterValue); @@ -1288,13 +1230,13 @@ void JIT_OPERATION operationPutGetterSetter(ExecState* exec, EncodedJSValue enco ASSERT(getter.isObject() || setter.isObject()); if (!getter.isUndefined()) - accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter)); + accessor->setGetter(vm, asObject(getter)); if (!setter.isUndefined()) - accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter)); + accessor->setSetter(vm, asObject(setter)); baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor); } #else -void JIT_OPERATION operationPutGetterById(ExecState* exec, JSCell* object, Identifier* identifier, JSCell* getter) +void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, Identifier* identifier, JSCell* getter, JSCell* setter) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); @@ -1302,51 +1244,48 @@ void JIT_OPERATION operationPutGetterById(ExecState* exec, JSCell* object, Ident ASSERT(object && object->isObject()); JSObject* baseObj = object->getObject(); - ASSERT(getter->isObject()); - baseObj->putGetter(exec, *identifier, getter); + GetterSetter* accessor = GetterSetter::create(vm); + + ASSERT(!getter || getter->isObject()); + ASSERT(!setter || setter->isObject()); + ASSERT(getter || setter); + + if (getter) + accessor->setGetter(vm, getter->getObject()); + if (setter) + accessor->setSetter(vm, setter->getObject()); + baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor); } +#endif -void JIT_OPERATION operationPutSetterById(ExecState* exec, JSCell* object, Identifier* identifier, JSCell* setter) +void JIT_OPERATION operationPushNameScope(ExecState* exec, Identifier* identifier, EncodedJSValue encodedValue, int32_t attibutes) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - ASSERT(object && object->isObject()); - JSObject* baseObj = object->getObject(); + JSNameScope* scope = JSNameScope::create(exec, *identifier, JSValue::decode(encodedValue), attibutes); - ASSERT(setter->isObject()); - baseObj->putSetter(exec, *identifier, setter); + exec->setScope(scope); } -void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, Identifier* identifier, JSCell* getter, JSCell* setter) +void JIT_OPERATION operationPushWithScope(ExecState* exec, EncodedJSValue encodedValue) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - ASSERT(object && object->isObject()); - JSObject* baseObj = object->getObject(); - - GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject()); - - ASSERT(!getter || getter->isObject()); - ASSERT(!setter || setter->isObject()); - ASSERT(getter || setter); + JSObject* o = JSValue::decode(encodedValue).toObject(exec); + if (vm.exception()) + return; - if (getter) - accessor->setGetter(vm, exec->lexicalGlobalObject(), getter->getObject()); - if (setter) - accessor->setSetter(vm, exec->lexicalGlobalObject(), setter->getObject()); - baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor); + exec->setScope(JSWithScope::create(exec, o)); } -#endif -void JIT_OPERATION operationPopScope(ExecState* exec, int32_t scopeReg) +void JIT_OPERATION operationPopScope(ExecState* exec) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); - JSScope* scope = exec->uncheckedR(scopeReg).Register::scope(); - exec->uncheckedR(scopeReg) = scope->next(); + exec->setScope(exec->scope()->next()); } void JIT_OPERATION operationProfileDidCall(ExecState* exec, EncodedJSValue encodedValue) @@ -1369,246 +1308,141 @@ void JIT_OPERATION operationProfileWillCall(ExecState* exec, EncodedJSValue enco EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBaseVal) { - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); + VM* vm = &exec->vm(); + NativeCallFrameTracer tracer(vm, exec); JSValue value = JSValue::decode(encodedValue); JSValue baseVal = JSValue::decode(encodedBaseVal); if (baseVal.isObject()) { JSObject* baseObject = asObject(baseVal); - ASSERT(!baseObject->structure(vm)->typeInfo().implementsDefaultHasInstance()); - if (baseObject->structure(vm)->typeInfo().implementsHasInstance()) { - bool result = baseObject->methodTable(vm)->customHasInstance(baseObject, exec, value); + ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance()); + if (baseObject->structure()->typeInfo().implementsHasInstance()) { + bool result = baseObject->methodTable()->customHasInstance(baseObject, exec, value); return JSValue::encode(jsBoolean(result)); } } - vm.throwException(exec, createInvalidInstanceofParameterError(exec, baseVal)); + vm->throwException(exec, createInvalidParameterError(exec, "instanceof", baseVal)); return JSValue::encode(JSValue()); } +JSCell* JIT_OPERATION operationCreateActivation(ExecState* exec, int32_t offset) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + JSActivation* activation = JSActivation::create(vm, exec, exec->registers() + offset, exec->codeBlock()); + exec->setScope(activation); + return activation; } -static bool canAccessArgumentIndexQuickly(JSObject& object, uint32_t index) +JSCell* JIT_OPERATION operationCreateArguments(ExecState* exec) { - switch (object.structure()->typeInfo().type()) { - case DirectArgumentsType: { - DirectArguments* directArguments = jsCast<DirectArguments*>(&object); - if (directArguments->canAccessArgumentIndexQuicklyInDFG(index)) - return true; - break; - } - case ScopedArgumentsType: { - ScopedArguments* scopedArguments = jsCast<ScopedArguments*>(&object); - if (scopedArguments->canAccessArgumentIndexQuicklyInDFG(index)) - return true; - break; - } - default: - break; - } - return false; + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + // NB: This needs to be exceedingly careful with top call frame tracking, since it + // may be called from OSR exit, while the state of the call stack is bizarre. + Arguments* result = Arguments::create(vm, exec); + ASSERT(!vm.exception()); + return result; } -static JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress) +EncodedJSValue JIT_OPERATION operationGetArgumentsLength(ExecState* exec, int32_t argumentsRegister) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + // Here we can assume that the argumernts were created. Because otherwise the JIT code would + // have not made this call. + Identifier ident(&vm, "length"); + JSValue baseValue = exec->uncheckedR(argumentsRegister).jsValue(); + PropertySlot slot(baseValue); + return JSValue::encode(baseValue.get(exec, ident, slot)); +} + +} + +static JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript, ReturnAddressPtr returnAddress) { if (LIKELY(baseValue.isCell() && subscript.isString())) { - VM& vm = exec->vm(); - Structure& structure = *baseValue.asCell()->structure(vm); - if (JSCell::canUseFastGetOwnProperty(structure)) { - if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) { - if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get())) { - ASSERT(exec->locationAsBytecodeOffset()); - if (byValInfo->stubInfo && byValInfo->cachedId.impl() != existingAtomicString) - byValInfo->tookSlowPath = true; - return result; - } - } - } + if (JSValue result = baseValue.asCell()->fastGetOwnProperty(exec, asString(subscript)->value(exec))) + return result; } if (subscript.isUInt32()) { - ASSERT(exec->locationAsBytecodeOffset()); - byValInfo->tookSlowPath = true; - uint32_t i = subscript.asUInt32(); - if (isJSString(baseValue)) { - if (asString(baseValue)->canGetIndex(i)) { - ctiPatchCallByReturnAddress(exec->codeBlock(), returnAddress, FunctionPtr(operationGetByValString)); - return asString(baseValue)->getIndex(exec, i); - } - byValInfo->arrayProfile->setOutOfBounds(); - } else if (baseValue.isObject()) { - JSObject* object = asObject(baseValue); - if (object->canGetIndexQuickly(i)) - return object->getIndexQuickly(i); - - if (!canAccessArgumentIndexQuickly(*object, i)) - byValInfo->arrayProfile->setOutOfBounds(); + if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) { + ctiPatchCallByReturnAddress(exec->codeBlock(), returnAddress, FunctionPtr(operationGetByValString)); + return asString(baseValue)->getIndex(exec, i); } - return baseValue.get(exec, i); } - baseValue.requireObjectCoercible(exec); - if (exec->hadException()) - return jsUndefined(); - auto property = subscript.toPropertyKey(exec); - if (exec->hadException()) - return jsUndefined(); - - ASSERT(exec->locationAsBytecodeOffset()); - if (byValInfo->stubInfo && byValInfo->cachedId != property) - byValInfo->tookSlowPath = true; + if (isName(subscript)) + return baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName()); + Identifier property(exec, subscript.toString(exec)->value(exec)); return baseValue.get(exec, property); } extern "C" { -EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo) +EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); JSValue baseValue = JSValue::decode(encodedBase); JSValue subscript = JSValue::decode(encodedSubscript); - JSValue result = getByVal(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)); + JSValue result = getByVal(exec, baseValue, subscript, ReturnAddressPtr(OUR_RETURN_ADDRESS)); return JSValue::encode(result); } -EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo) +EncodedJSValue JIT_OPERATION operationGetByValDefault(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); JSValue baseValue = JSValue::decode(encodedBase); JSValue subscript = JSValue::decode(encodedSubscript); - + if (baseValue.isObject() && subscript.isInt32()) { // See if it's worth optimizing this at all. JSObject* object = asObject(baseValue); bool didOptimize = false; - ASSERT(exec->locationAsBytecodeOffset()); - ASSERT(!byValInfo->stubRoutine); - - if (hasOptimizableIndexing(object->structure(vm))) { + unsigned bytecodeOffset = exec->locationAsBytecodeOffset(); + ASSERT(bytecodeOffset); + ByValInfo& byValInfo = exec->codeBlock()->getByValInfo(bytecodeOffset - 1); + ASSERT(!byValInfo.stubRoutine); + + if (hasOptimizableIndexing(object->structure())) { // Attempt to optimize. - Structure* structure = object->structure(vm); - JITArrayMode arrayMode = jitArrayModeForStructure(structure); - if (arrayMode != byValInfo->arrayMode) { - // If we reached this case, we got an interesting array mode we did not expect when we compiled. - // Let's update the profile to do better next time. - CodeBlock* codeBlock = exec->codeBlock(); - ConcurrentJITLocker locker(codeBlock->m_lock); - byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure); - - JIT::compileGetByVal(&vm, exec->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode); + JITArrayMode arrayMode = jitArrayModeForStructure(object->structure()); + if (arrayMode != byValInfo.arrayMode) { + JIT::compileGetByVal(&vm, exec->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode); didOptimize = true; } } - + if (!didOptimize) { // If we take slow path more than 10 times without patching then make sure we // never make that mistake again. Or, if we failed to patch and we have some object // that intercepts indexed get, then don't even wait until 10 times. For cases // where we see non-index-intercepting objects, this gives 10 iterations worth of // opportunity for us to observe that the get_by_val may be polymorphic. - if (++byValInfo->slowPathCount >= 10 - || object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) { + if (++byValInfo.slowPathCount >= 10 + || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) { // Don't ever try to optimize. - ctiPatchCallByReturnAddress(exec->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationGetByValGeneric)); + RepatchBuffer repatchBuffer(exec->codeBlock()); + repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationGetByValGeneric)); } } } - - if (baseValue.isObject() && (subscript.isSymbol() || subscript.isString())) { - const Identifier propertyName = subscript.toPropertyKey(exec); - - if (!subscript.isString() || !parseIndex(propertyName)) { - ASSERT(exec->locationAsBytecodeOffset()); - ASSERT(!byValInfo->stubRoutine); - JIT::compileGetByValWithCachedId(&vm, exec->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), propertyName); - } - - PropertySlot slot(baseValue); - bool hasResult = baseValue.getPropertySlot(exec, propertyName, slot); - return JSValue::encode(hasResult ? slot.getValue(exec, propertyName) : jsUndefined()); - } - - JSValue result = getByVal(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)); - return JSValue::encode(result); -} - -EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - JSValue baseValue = JSValue::decode(encodedBase); - JSValue subscript = JSValue::decode(encodedSubscript); - - ASSERT(baseValue.isObject()); - ASSERT(subscript.isUInt32()); - - JSObject* object = asObject(baseValue); - bool didOptimize = false; - - ASSERT(exec->locationAsBytecodeOffset()); - ASSERT(!byValInfo->stubRoutine); - - if (hasOptimizableIndexing(object->structure(vm))) { - // Attempt to optimize. - JITArrayMode arrayMode = jitArrayModeForStructure(object->structure(vm)); - if (arrayMode != byValInfo->arrayMode) { - JIT::compileHasIndexedProperty(&vm, exec->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode); - didOptimize = true; - } - } - - if (!didOptimize) { - // If we take slow path more than 10 times without patching then make sure we - // never make that mistake again. Or, if we failed to patch and we have some object - // that intercepts indexed get, then don't even wait until 10 times. For cases - // where we see non-index-intercepting objects, this gives 10 iterations worth of - // opportunity for us to observe that the get_by_val may be polymorphic. - if (++byValInfo->slowPathCount >= 10 - || object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) { - // Don't ever try to optimize. - ctiPatchCallByReturnAddress(exec->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationHasIndexedPropertyGeneric)); - } - } - - uint32_t index = subscript.asUInt32(); - if (object->canGetIndexQuickly(index)) - return JSValue::encode(JSValue(JSValue::JSTrue)); - - if (!canAccessArgumentIndexQuickly(*object, index)) - byValInfo->arrayProfile->setOutOfBounds(); - return JSValue::encode(jsBoolean(object->hasProperty(exec, index))); -} - -EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - JSValue baseValue = JSValue::decode(encodedBase); - JSValue subscript = JSValue::decode(encodedSubscript); - ASSERT(baseValue.isObject()); - ASSERT(subscript.isUInt32()); - - JSObject* object = asObject(baseValue); - uint32_t index = subscript.asUInt32(); - if (object->canGetIndexQuickly(index)) - return JSValue::encode(JSValue(JSValue::JSTrue)); - - if (!canAccessArgumentIndexQuickly(*object, index)) - byValInfo->arrayProfile->setOutOfBounds(); - return JSValue::encode(jsBoolean(object->hasProperty(exec, subscript.asUInt32()))); + JSValue result = getByVal(exec, baseValue, subscript, ReturnAddressPtr(OUR_RETURN_ADDRESS)); + return JSValue::encode(result); } -EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo) +EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); @@ -1622,23 +1456,37 @@ EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSV result = asString(baseValue)->getIndex(exec, i); else { result = baseValue.get(exec, i); - if (!isJSString(baseValue)) { - ASSERT(exec->locationAsBytecodeOffset()); - ctiPatchCallByReturnAddress(exec->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(byValInfo->stubRoutine ? operationGetByValGeneric : operationGetByValOptimize)); - } + if (!isJSString(baseValue)) + ctiPatchCallByReturnAddress(exec->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationGetByValDefault)); } - } else { - baseValue.requireObjectCoercible(exec); - if (exec->hadException()) - return JSValue::encode(jsUndefined()); - auto property = subscript.toPropertyKey(exec); - if (exec->hadException()) - return JSValue::encode(jsUndefined()); + } else if (isName(subscript)) + result = baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName()); + else { + Identifier property(exec, subscript.toString(exec)->value(exec)); result = baseValue.get(exec, property); } return JSValue::encode(result); } + +void JIT_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activationCell) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + ASSERT(exec->codeBlock()->needsFullScopeChain()); + jsCast<JSActivation*>(activationCell)->tearOff(vm); +} + +void JIT_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell, JSCell* activationCell) +{ + ASSERT(exec->codeBlock()->usesArguments()); + if (activationCell) { + jsCast<Arguments*>(argumentsCell)->didTearOffActivation(exec, jsCast<JSActivation*>(activationCell)); + return; + } + jsCast<Arguments*>(argumentsCell)->tearOff(exec); +} EncodedJSValue JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue encodedBase, const Identifier* identifier) { @@ -1646,13 +1494,25 @@ EncodedJSValue JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue NativeCallFrameTracer tracer(&vm, exec); JSObject* baseObj = JSValue::decode(encodedBase).toObject(exec); - bool couldDelete = baseObj->methodTable(vm)->deleteProperty(baseObj, exec, *identifier); + bool couldDelete = baseObj->methodTable()->deleteProperty(baseObj, exec, *identifier); JSValue result = jsBoolean(couldDelete); if (!couldDelete && exec->codeBlock()->isStrictMode()) - vm.throwException(exec, createTypeError(exec, ASCIILiteral("Unable to delete property."))); + vm.throwException(exec, createTypeError(exec, "Unable to delete property.")); return JSValue::encode(result); } +JSCell* JIT_OPERATION operationGetPNames(ExecState* exec, JSObject* obj) +{ + VM& vm = exec->vm(); + NativeCallFrameTracer tracer(&vm, exec); + + Structure* structure = obj->structure(); + JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache(); + if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(exec)) + jsPropertyNameIterator = JSPropertyNameIterator::create(exec, obj); + return jsPropertyNameIterator; +} + EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedProto) { VM& vm = exec->vm(); @@ -1666,21 +1526,23 @@ EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState* exec, EncodedJSValue return JSValue::encode(jsBoolean(result)); } -int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset) +CallFrame* JIT_OPERATION operationSizeAndAllocFrameForVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t firstFreeRegister) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); JSStack* stack = &exec->interpreter()->stack(); JSValue arguments = JSValue::decode(encodedArguments); - return sizeFrameForVarargs(exec, stack, arguments, numUsedStackSlots, firstVarArgOffset); + CallFrame* newCallFrame = sizeAndAllocFrameForVarargs(exec, stack, arguments, firstFreeRegister); + return newCallFrame; } -CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue encodedArguments, int32_t firstVarArgOffset, int32_t length) +CallFrame* JIT_OPERATION operationLoadVarargs(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue encodedThis, EncodedJSValue encodedArguments) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); + JSValue thisValue = JSValue::decode(encodedThis); JSValue arguments = JSValue::decode(encodedArguments); - setupVarargsFrame(exec, newCallFrame, arguments, firstVarArgOffset, length); + loadVarargs(exec, newCallFrame, thisValue, arguments); return newCallFrame; } @@ -1747,13 +1609,12 @@ char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState* exec, Enc return reinterpret_cast<char*>(result); } -EncodedJSValue JIT_OPERATION operationResolveScope(ExecState* exec, int32_t scopeReg, int32_t identifierIndex) +EncodedJSValue JIT_OPERATION operationResolveScope(ExecState* exec, int32_t identifierIndex) { VM& vm = exec->vm(); NativeCallFrameTracer tracer(&vm, exec); const Identifier& ident = exec->codeBlock()->identifier(identifierIndex); - JSScope* scope = exec->uncheckedR(scopeReg).Register::scope(); - return JSValue::encode(JSScope::resolve(exec, scope, ident)); + return JSValue::encode(JSScope::resolve(exec, exec->scope(), ident)); } EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* bytecodePC) @@ -1775,15 +1636,11 @@ EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* } // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time. - if (slot.isCacheableValue() && slot.slotBase() == scope && scope->structure(vm)->propertyAccessesAreCacheable()) { + if (slot.isCacheableValue() && slot.slotBase() == scope && scope->structure()->propertyAccessesAreCacheable()) { if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) { - Structure* structure = scope->structure(vm); - { - ConcurrentJITLocker locker(codeBlock->m_lock); - pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), structure); - pc[6].u.operand = slot.cachedOffset(); - } - structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset()); + ConcurrentJITLocker locker(codeBlock->m_lock); + pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure()); + pc[6].u.operand = slot.cachedOffset(); } } @@ -1801,13 +1658,7 @@ void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC) JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[1].u.operand).jsValue()); JSValue value = exec->r(pc[3].u.operand).jsValue(); ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand); - if (modeAndType.type() == LocalClosureVar) { - JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope); - environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value); - if (WatchpointSet* set = pc[5].u.watchpointSet) - set->touch("Executed op_put_scope<LocalClosureVar>"); - return; - } + if (modeAndType.mode() == ThrowIfNotFound && !scope->hasProperty(exec, ident)) { exec->vm().throwException(exec, createUndefinedVariableError(exec, ident)); return; @@ -1819,7 +1670,14 @@ void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC) if (exec->vm().exception()) return; - CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, modeAndType, slot); + // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time. + if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) { + if (slot.isCacheable() && slot.base() == scope && scope->structure()->propertyAccessesAreCacheable()) { + ConcurrentJITLocker locker(codeBlock->m_lock); + pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure()); + pc[6].u.operand = slot.cachedOffset(); + } + } } void JIT_OPERATION operationThrow(ExecState* exec, EncodedJSValue encodedExceptionValue) @@ -1830,8 +1688,8 @@ void JIT_OPERATION operationThrow(ExecState* exec, EncodedJSValue encodedExcepti JSValue exceptionValue = JSValue::decode(encodedExceptionValue); vm->throwException(exec, exceptionValue); - // Results stored out-of-band in vm.targetMachinePCForThrow, vm.callFrameForThrow & vm.vmEntryFrameForThrow - genericUnwind(vm, exec); + // Results stored out-of-band in vm.targetMachinePCForThrow & vm.callFrameForThrow + genericUnwind(vm, exec, exceptionValue); } void JIT_OPERATION operationFlushWriteBarrierBuffer(ExecState* exec, JSCell* cell) @@ -1845,7 +1703,7 @@ void JIT_OPERATION operationOSRWriteBarrier(ExecState* exec, JSCell* cell) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - vm->heap.writeBarrier(cell); + exec->heap()->writeBarrier(cell); } // NB: We don't include the value as part of the barrier because the write barrier elision @@ -1855,7 +1713,7 @@ void JIT_OPERATION operationUnconditionalWriteBarrier(ExecState* exec, JSCell* c { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - vm->heap.writeBarrier(cell); + Heap::writeBarrier(cell); } void JIT_OPERATION operationInitGlobalConst(ExecState* exec, Instruction* pc) @@ -1864,24 +1722,18 @@ void JIT_OPERATION operationInitGlobalConst(ExecState* exec, Instruction* pc) NativeCallFrameTracer tracer(vm, exec); JSValue value = exec->r(pc[2].u.operand).jsValue(); - pc[1].u.variablePointer->set(*vm, exec->codeBlock()->globalObject(), value); + pc[1].u.registerPointer->set(*vm, exec->codeBlock()->globalObject(), value); } -void JIT_OPERATION lookupExceptionHandler(VM* vm, ExecState* exec) +void JIT_OPERATION lookupExceptionHandler(ExecState* exec) { + VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - genericUnwind(vm, exec); - ASSERT(vm->targetMachinePCForThrow); -} -void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM* vm, ExecState* exec) -{ - VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame; - CallFrame* callerFrame = exec->callerFrame(vmEntryFrame); - ASSERT(callerFrame); - - NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame); - genericUnwind(vm, callerFrame); + JSValue exceptionValue = exec->exception(); + ASSERT(exceptionValue); + + genericUnwind(vm, exec, exceptionValue); ASSERT(vm->targetMachinePCForThrow); } @@ -1889,72 +1741,9 @@ void JIT_OPERATION operationVMHandleException(ExecState* exec) { VM* vm = &exec->vm(); NativeCallFrameTracer tracer(vm, exec); - genericUnwind(vm, exec); -} - -// This function "should" just take the ExecState*, but doing so would make it more difficult -// to call from exception check sites. So, unlike all of our other functions, we allow -// ourselves to play some gnarly ABI tricks just to simplify the calling convention. This is -// particularly safe here since this is never called on the critical path - it's only for -// testing. -void JIT_OPERATION operationExceptionFuzz() -{ -#if COMPILER(GCC_OR_CLANG) - ExecState* exec = static_cast<ExecState*>(__builtin_frame_address(1)); - void* returnPC = __builtin_return_address(0); - doExceptionFuzzing(exec, "JITOperations", returnPC); -#endif // COMPILER(GCC_OR_CLANG) -} - -EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState* exec, EncodedJSValue encodedBaseValue, JSCell* propertyName) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - JSValue baseValue = JSValue::decode(encodedBaseValue); - if (baseValue.isUndefinedOrNull()) - return JSValue::encode(jsBoolean(false)); - - JSObject* base = baseValue.toObject(exec); - return JSValue::encode(jsBoolean(base->hasProperty(exec, asString(propertyName)->toIdentifier(exec)))); -} - -EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState* exec, JSCell* baseCell, int32_t subscript) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - JSObject* object = baseCell->toObject(exec, exec->lexicalGlobalObject()); - return JSValue::encode(jsBoolean(object->hasProperty(exec, subscript))); -} - -JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState* exec, JSCell* cell) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - - JSObject* base = cell->toObject(exec, exec->lexicalGlobalObject()); - return propertyNameEnumerator(exec, base); -} - -EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState* exec, JSCell* enumeratorCell, int32_t index) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(enumeratorCell); - JSString* propertyName = enumerator->propertyNameAtIndex(index); - return JSValue::encode(propertyName ? propertyName : jsNull()); -} - -JSCell* JIT_OPERATION operationToIndexString(ExecState* exec, int32_t index) -{ - VM& vm = exec->vm(); - NativeCallFrameTracer tracer(&vm, exec); - return jsString(exec, Identifier::from(exec, index).string()); -} - -void JIT_OPERATION operationProcessTypeProfilerLog(ExecState* exec) -{ - exec->vm().typeProfilerLog()->processLogEntries(ASCIILiteral("Log Full, called from inside baseline JIT")); + ASSERT(!exec->isVMEntrySentinel()); + genericUnwind(vm, exec, vm->exception()); } } // extern "C" @@ -1969,31 +1758,28 @@ extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWi return JSValue::encode(exec->vm().hostCallReturnValue); } -#if COMPILER(GCC_OR_CLANG) && CPU(X86_64) +#if COMPILER(GCC) && CPU(X86_64) asm ( ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" HIDE_SYMBOL(getHostCallReturnValue) "\n" SYMBOL_STRING(getHostCallReturnValue) ":" "\n" + "mov 0(%rbp), %rbp\n" // CallerFrameAndPC::callerFrame "mov %rbp, %rdi\n" "jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif COMPILER(GCC_OR_CLANG) && CPU(X86) +#elif COMPILER(GCC) && CPU(X86) asm ( ".text" "\n" \ ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" HIDE_SYMBOL(getHostCallReturnValue) "\n" SYMBOL_STRING(getHostCallReturnValue) ":" "\n" - "push %ebp\n" - "leal -4(%esp), %esp\n" - "push %ebp\n" - "call " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" - "leal 8(%esp), %esp\n" - "pop %ebp\n" - "ret\n" + "mov 0(%ebp), %ebp\n" // CallerFrameAndPC::callerFrame + "mov %ebp, 4(%esp)\n" + "jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_THUMB2) +#elif COMPILER(GCC) && CPU(ARM_THUMB2) asm ( ".text" "\n" ".align 2" "\n" @@ -2002,17 +1788,19 @@ HIDE_SYMBOL(getHostCallReturnValue) "\n" ".thumb" "\n" ".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n" SYMBOL_STRING(getHostCallReturnValue) ":" "\n" + "ldr r7, [r7, #0]" "\n" // CallerFrameAndPC::callerFrame "mov r0, r7" "\n" "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_TRADITIONAL) +#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL) asm ( ".text" "\n" ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" HIDE_SYMBOL(getHostCallReturnValue) "\n" INLINE_ARM_FUNCTION(getHostCallReturnValue) SYMBOL_STRING(getHostCallReturnValue) ":" "\n" + "ldr r11, [r11, #0]" "\n" // CallerFrameAndPC::callerFrame "mov r0, r11" "\n" "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); @@ -2024,41 +1812,30 @@ asm ( ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" HIDE_SYMBOL(getHostCallReturnValue) "\n" SYMBOL_STRING(getHostCallReturnValue) ":" "\n" + "ldur x29, [x29, #0]" "\n" "mov x0, x29" "\n" "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif COMPILER(GCC_OR_CLANG) && CPU(MIPS) - -#if WTF_MIPS_PIC -#define LOAD_FUNCTION_TO_T9(function) \ - ".set noreorder" "\n" \ - ".cpload $25" "\n" \ - ".set reorder" "\n" \ - "la $t9, " LOCAL_REFERENCE(function) "\n" -#else -#define LOAD_FUNCTION_TO_T9(function) "" "\n" -#endif - +#elif COMPILER(GCC) && CPU(MIPS) asm ( ".text" "\n" ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" HIDE_SYMBOL(getHostCallReturnValue) "\n" SYMBOL_STRING(getHostCallReturnValue) ":" "\n" LOAD_FUNCTION_TO_T9(getHostCallReturnValueWithExecState) + "lw $fp, 0($fp)" "\n" // CallerFrameAndPC::callerFrame "move $a0, $fp" "\n" "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); -#elif COMPILER(GCC_OR_CLANG) && CPU(SH4) - -#define SH4_SCRATCH_REGISTER "r11" - +#elif COMPILER(GCC) && CPU(SH4) asm ( ".text" "\n" ".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" HIDE_SYMBOL(getHostCallReturnValue) "\n" SYMBOL_STRING(getHostCallReturnValue) ":" "\n" + "mov.l @r14, r14" "\n" // CallerFrameAndPC::callerFrame "mov r14, r4" "\n" "mov.l 2f, " SH4_SCRATCH_REGISTER "\n" "braf " SH4_SCRATCH_REGISTER "\n" @@ -2071,6 +1848,7 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n" extern "C" { __declspec(naked) EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() { + __asm mov ebp, [ebp + 0]; // CallerFrameAndPC::callerFrame __asm mov [esp + 4], ebp; __asm jmp getHostCallReturnValueWithExecState } diff --git a/Source/JavaScriptCore/jit/JITOperations.h b/Source/JavaScriptCore/jit/JITOperations.h index 3d83f590d..43ca6177b 100644 --- a/Source/JavaScriptCore/jit/JITOperations.h +++ b/Source/JavaScriptCore/jit/JITOperations.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013-2015 Apple Inc. All rights reserved. + * Copyright (C) 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,15 +29,13 @@ #if ENABLE(JIT) #include "CallFrame.h" -#include "CommonSlowPaths.h" #include "JITExceptions.h" #include "JSArray.h" #include "JSCJSValue.h" #include "MacroAssembler.h" #include "PutKind.h" -#include "SpillRegistersMode.h" #include "StructureStubInfo.h" - +#include "VariableWatchpointSet.h" namespace JSC { @@ -57,42 +55,33 @@ extern "C" { Key: A: JSArray* Aap: ArrayAllocationProfile* - Ap: ArrayProfile* - By: ByValInfo* C: JSCell* Cb: CodeBlock* - Cli: CallLinkInfo* D: double E: ExecState* F: CallFrame* - G: JSGlobalObject* - I: UniquedStringImpl* - Icf: InlineCallFrame* + I: StringImpl* + Icf: InlineCalLFrame* Idc: const Identifier* J: EncodedJSValue Jcp: const JSValue* - Jsc: JSScope* - Jsf: JSFunction* + Jsa: JSActivation* Jss: JSString* - L: JSLexicalEnvironment* O: JSObject* P: pointer (char*) Pc: Instruction* i.e. bytecode PC - Q: int64_t R: Register S: size_t - Sprt: SlowPathReturnType Ssi: StructureStubInfo* St: Structure* - Symtab: SymbolTable* - T: StringImpl* V: void Vm: VM* - Ws: WatchpointSet* + Vws: VariableWatchpointSet* Z: int32_t */ -typedef CallFrame* JIT_OPERATION (*F_JITOperation_EFJZZ)(ExecState*, CallFrame*, EncodedJSValue, int32_t, int32_t); +typedef CallFrame* JIT_OPERATION (*F_JITOperation_EFJJ)(ExecState*, CallFrame*, EncodedJSValue, EncodedJSValue); +typedef CallFrame* JIT_OPERATION (*F_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_E)(ExecState*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EA)(ExecState*, JSArray*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAZ)(ExecState*, JSArray*, int32_t); @@ -100,104 +89,66 @@ typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJ)(ExecState*, ArrayAl typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJcpZ)(ExecState*, ArrayAllocationProfile*, const JSValue*, int32_t); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EC)(ExecState*, JSCell*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECC)(ExecState*, JSCell*, JSCell*); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECI)(ExecState*, JSCell*, UniquedStringImpl*); +typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECI)(ExecState*, JSCell*, StringImpl*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECZ)(ExecState*, JSCell*, int32_t); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EDA)(ExecState*, double, JSArray*); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EE)(ExecState*, ExecState*); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EI)(ExecState*, UniquedStringImpl*); +typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EI)(ExecState*, StringImpl*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJ)(ExecState*, EncodedJSValue); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJC)(ExecState*, EncodedJSValue, JSCell*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJA)(ExecState*, EncodedJSValue, JSArray*); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJI)(ExecState*, EncodedJSValue, UniquedStringImpl*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJIdc)(ExecState*, EncodedJSValue, const Identifier*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, ArrayProfile*); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, ByValInfo*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJssZ)(ExecState*, JSString*, int32_t); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJP)(ExecState*, EncodedJSValue, void*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EP)(ExecState*, void*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPP)(ExecState*, void*, void*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPS)(ExecState*, void*, size_t); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPc)(ExecState*, Instruction*); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESS)(ExecState*, size_t, size_t); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*); +typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, StringImpl*); +typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZ)(ExecState*, int32_t); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZIcfZ)(ExecState*, int32_t, InlineCallFrame*, int32_t); typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZZ)(ExecState*, int32_t, int32_t); -typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue); typedef JSCell* JIT_OPERATION (*C_JITOperation_E)(ExecState*); typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t); typedef JSCell* JIT_OPERATION (*C_JITOperation_EC)(ExecState*, JSCell*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_ECZ)(ExecState*, JSCell*, int32_t); -typedef JSCell* JIT_OPERATION (*C_JITOperation_ECZC)(ExecState*, JSCell*, int32_t, JSCell*); typedef JSCell* JIT_OPERATION (*C_JITOperation_ECC)(ExecState*, JSCell*, JSCell*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*); typedef JSCell* JIT_OPERATION (*C_JITOperation_EIcf)(ExecState*, InlineCallFrame*); typedef JSCell* JIT_OPERATION (*C_JITOperation_EJ)(ExecState*, EncodedJSValue); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EJsc)(ExecState*, JSScope*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EJZC)(ExecState*, EncodedJSValue, int32_t, JSCell*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EJJC)(ExecState*, EncodedJSValue, EncodedJSValue, JSCell*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EJscZ)(ExecState*, JSScope*, int32_t); typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssSt)(ExecState*, JSString*, Structure*); typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJss)(ExecState*, JSString*, JSString*); typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJssJss)(ExecState*, JSString*, JSString*, JSString*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EL)(ExecState*, JSLexicalEnvironment*); typedef JSCell* JIT_OPERATION (*C_JITOperation_EO)(ExecState*, JSObject*); typedef JSCell* JIT_OPERATION (*C_JITOperation_EOZ)(ExecState*, JSObject*, int32_t); typedef JSCell* JIT_OPERATION (*C_JITOperation_ESt)(ExecState*, Structure*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EStJscSymtabJ)(ExecState*, Structure*, JSScope*, SymbolTable*, EncodedJSValue); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EStRZJsfL)(ExecState*, Structure*, Register*, int32_t, JSFunction*, JSLexicalEnvironment*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EStRZJsf)(ExecState*, Structure*, Register*, int32_t, JSFunction*); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EStZ)(ExecState*, Structure*, int32_t); -typedef JSCell* JIT_OPERATION (*C_JITOperation_EStZZ)(ExecState*, Structure*, int32_t, int32_t); typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t); typedef double JIT_OPERATION (*D_JITOperation_D)(double); typedef double JIT_OPERATION (*D_JITOperation_DD)(double, double); typedef double JIT_OPERATION (*D_JITOperation_ZZ)(int32_t, int32_t); typedef double JIT_OPERATION (*D_JITOperation_EJ)(ExecState*, EncodedJSValue); -typedef int64_t JIT_OPERATION(*Q_JITOperation_J)(EncodedJSValue); -typedef int64_t JIT_OPERATION(*Q_JITOperation_D)(double); typedef int32_t JIT_OPERATION (*Z_JITOperation_D)(double); typedef int32_t JIT_OPERATION (*Z_JITOperation_E)(ExecState*); -typedef int32_t JIT_OPERATION (*Z_JITOperation_EC)(ExecState*, JSCell*); -typedef int32_t JIT_OPERATION (*Z_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*); -typedef int32_t JIT_OPERATION (*Z_JITOperation_ESJss)(ExecState*, size_t, JSString*); -typedef int32_t JIT_OPERATION (*Z_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t); -typedef int32_t JIT_OPERATION (*Z_JITOperation_EJZZ)(ExecState*, EncodedJSValue, int32_t, int32_t); typedef size_t JIT_OPERATION (*S_JITOperation_ECC)(ExecState*, JSCell*, JSCell*); -typedef size_t JIT_OPERATION (*S_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*); typedef size_t JIT_OPERATION (*S_JITOperation_EJ)(ExecState*, EncodedJSValue); typedef size_t JIT_OPERATION (*S_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue); typedef size_t JIT_OPERATION (*S_JITOperation_EOJss)(ExecState*, JSObject*, JSString*); typedef size_t JIT_OPERATION (*S_JITOperation_J)(EncodedJSValue); -typedef SlowPathReturnType JIT_OPERATION (*Sprt_JITOperation_EZ)(ExecState*, int32_t); -typedef void JIT_OPERATION (*V_JITOperation)(); typedef void JIT_OPERATION (*V_JITOperation_E)(ExecState*); typedef void JIT_OPERATION (*V_JITOperation_EC)(ExecState*, JSCell*); typedef void JIT_OPERATION (*V_JITOperation_ECb)(ExecState*, CodeBlock*); typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*); typedef void JIT_OPERATION (*V_JITOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*); -typedef void JIT_OPERATION (*V_JITOperation_ECIC)(ExecState*, JSCell*, Identifier*, JSCell*); typedef void JIT_OPERATION (*V_JITOperation_ECICC)(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*); typedef void JIT_OPERATION (*V_JITOperation_ECCIcf)(ExecState*, JSCell*, JSCell*, InlineCallFrame*); typedef void JIT_OPERATION (*V_JITOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue); typedef void JIT_OPERATION (*V_JITOperation_ECPSPS)(ExecState*, JSCell*, void*, size_t, void*, size_t); typedef void JIT_OPERATION (*V_JITOperation_ECZ)(ExecState*, JSCell*, int32_t); typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*); -typedef void JIT_OPERATION (*V_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue); +typedef void JIT_OPERATION (*V_JITOperation_EIdJZ)(ExecState*, Identifier*, EncodedJSValue, int32_t); typedef void JIT_OPERATION (*V_JITOperation_EJ)(ExecState*, EncodedJSValue); -typedef void JIT_OPERATION (*V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, UniquedStringImpl*); -typedef void JIT_OPERATION (*V_JITOperation_EJIdJ)(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue); +typedef void JIT_OPERATION (*V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, StringImpl*); typedef void JIT_OPERATION (*V_JITOperation_EJIdJJ)(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue); typedef void JIT_OPERATION (*V_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue); -typedef void JIT_OPERATION (*V_JITOperation_EJJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ArrayProfile*); -typedef void JIT_OPERATION (*V_JITOperation_EJJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*); typedef void JIT_OPERATION (*V_JITOperation_EJPP)(ExecState*, EncodedJSValue, void*, void*); typedef void JIT_OPERATION (*V_JITOperation_EJZJ)(ExecState*, EncodedJSValue, int32_t, EncodedJSValue); typedef void JIT_OPERATION (*V_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t); @@ -205,17 +156,12 @@ typedef void JIT_OPERATION (*V_JITOperation_EOZD)(ExecState*, JSObject*, int32_t typedef void JIT_OPERATION (*V_JITOperation_EOZJ)(ExecState*, JSObject*, int32_t, EncodedJSValue); typedef void JIT_OPERATION (*V_JITOperation_EPc)(ExecState*, Instruction*); typedef void JIT_OPERATION (*V_JITOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue); -typedef void JIT_OPERATION (*V_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*); -typedef void JIT_OPERATION (*V_JITOperation_EWs)(ExecState*, WatchpointSet*); +typedef void JIT_OPERATION (*V_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, StringImpl*); +typedef void JIT_OPERATION (*V_JITOperation_EVws)(ExecState*, VariableWatchpointSet*); typedef void JIT_OPERATION (*V_JITOperation_EZ)(ExecState*, int32_t); -typedef void JIT_OPERATION (*V_JITOperation_EZJ)(ExecState*, int32_t, EncodedJSValue); -typedef void JIT_OPERATION (*V_JITOperation_EZJZZZ)(ExecState*, int32_t, EncodedJSValue, int32_t, int32_t, int32_t); typedef void JIT_OPERATION (*V_JITOperation_EVm)(ExecState*, VM*); -typedef void JIT_OPERATION (*V_JITOperation_J)(EncodedJSValue); -typedef void JIT_OPERATION (*V_JITOperation_Z)(int32_t); typedef char* JIT_OPERATION (*P_JITOperation_E)(ExecState*); typedef char* JIT_OPERATION (*P_JITOperation_EC)(ExecState*, JSCell*); -typedef char* JIT_OPERATION (*P_JITOperation_ECli)(ExecState*, CallLinkInfo*); typedef char* JIT_OPERATION (*P_JITOperation_EJS)(ExecState*, EncodedJSValue, size_t); typedef char* JIT_OPERATION (*P_JITOperation_EO)(ExecState*, JSObject*); typedef char* JIT_OPERATION (*P_JITOperation_EOS)(ExecState*, JSObject*, size_t); @@ -228,49 +174,51 @@ typedef char* JIT_OPERATION (*P_JITOperation_EStJ)(ExecState*, Structure*, Encod typedef char* JIT_OPERATION (*P_JITOperation_EStPS)(ExecState*, Structure*, void*, size_t); typedef char* JIT_OPERATION (*P_JITOperation_EStSS)(ExecState*, Structure*, size_t, size_t); typedef char* JIT_OPERATION (*P_JITOperation_EStZ)(ExecState*, Structure*, int32_t); +typedef char* JIT_OPERATION (*P_JITOperation_EZ)(ExecState*, int32_t); typedef char* JIT_OPERATION (*P_JITOperation_EZZ)(ExecState*, int32_t, int32_t); -typedef StringImpl* JIT_OPERATION (*T_JITOperation_EJss)(ExecState*, JSString*); +typedef StringImpl* JIT_OPERATION (*I_JITOperation_EJss)(ExecState*, JSString*); typedef JSString* JIT_OPERATION (*Jss_JITOperation_EZ)(ExecState*, int32_t); // This method is used to lookup an exception hander, keyed by faultLocation, which is // the return location from one of the calls out to one of the helper operations above. - -void JIT_OPERATION lookupExceptionHandler(VM*, ExecState*) WTF_INTERNAL; -void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM*, ExecState*) WTF_INTERNAL; + +void JIT_OPERATION lookupExceptionHandler(ExecState*) WTF_INTERNAL; void JIT_OPERATION operationVMHandleException(ExecState*) WTF_INTERNAL; -void JIT_OPERATION operationThrowStackOverflowError(ExecState*, CodeBlock*) WTF_INTERNAL; +void JIT_OPERATION operationStackCheck(ExecState*, CodeBlock*) WTF_INTERNAL; int32_t JIT_OPERATION operationCallArityCheck(ExecState*) WTF_INTERNAL; int32_t JIT_OPERATION operationConstructArityCheck(ExecState*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdDirectStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; -void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, StringImpl*); +EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, StringImpl*); +EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue); +EncodedJSValue JIT_OPERATION operationCallCustomGetter(ExecState*, JSCell*, PropertySlot::GetValueFunc, StringImpl*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationCallGetter(ExecState*, JSCell*, JSCell*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdDirectStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; +void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL; void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState*, JSObject*, Structure*, PropertyOffset, EncodedJSValue) WTF_INTERNAL; -void JIT_OPERATION operationPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL; -void JIT_OPERATION operationDirectPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL; -void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL; -void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationCallEval(ExecState*, ExecState*) WTF_INTERNAL; -char* JIT_OPERATION operationLinkCall(ExecState*, CallLinkInfo*) WTF_INTERNAL; -char* JIT_OPERATION operationLinkPolymorphicCall(ExecState*, CallLinkInfo*) WTF_INTERNAL; -char* JIT_OPERATION operationVirtualCall(ExecState*, CallLinkInfo*) WTF_INTERNAL; - +void JIT_OPERATION operationPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; +void JIT_OPERATION operationDirectPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; +void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; +void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationCallEval(ExecState*) WTF_INTERNAL; +char* JIT_OPERATION operationVirtualCall(ExecState*) WTF_INTERNAL; +char* JIT_OPERATION operationLinkCall(ExecState*) WTF_INTERNAL; +char* JIT_OPERATION operationLinkClosureCall(ExecState*) WTF_INTERNAL; +char* JIT_OPERATION operationVirtualConstruct(ExecState*) WTF_INTERNAL; +char* JIT_OPERATION operationLinkConstruct(ExecState*) WTF_INTERNAL; size_t JIT_OPERATION operationCompareLess(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; size_t JIT_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; size_t JIT_OPERATION operationCompareGreater(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; @@ -286,8 +234,7 @@ size_t JIT_OPERATION operationHasProperty(ExecState*, JSObject*, JSString*) WTF_ EncodedJSValue JIT_OPERATION operationNewArrayWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationNewArrayBufferWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState*, ArrayAllocationProfile*, EncodedJSValue size) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSCell*) WTF_INTERNAL; JSCell* JIT_OPERATION operationNewObject(ExecState*, Structure*) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState*, void*) WTF_INTERNAL; void JIT_OPERATION operationHandleWatchdogTimer(ExecState*) WTF_INTERNAL; @@ -295,39 +242,39 @@ void JIT_OPERATION operationThrowStaticError(ExecState*, EncodedJSValue, int32_t void JIT_OPERATION operationThrow(ExecState*, EncodedJSValue) WTF_INTERNAL; void JIT_OPERATION operationDebug(ExecState*, int32_t) WTF_INTERNAL; #if ENABLE(DFG_JIT) -SlowPathReturnType JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL; +char* JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL; #endif void JIT_OPERATION operationPutByIndex(ExecState*, EncodedJSValue, int32_t, EncodedJSValue); #if USE(JSVALUE64) -void JIT_OPERATION operationPutGetterById(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue) WTF_INTERNAL; -void JIT_OPERATION operationPutSetterById(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue) WTF_INTERNAL; void JIT_OPERATION operationPutGetterSetter(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL; #else -void JIT_OPERATION operationPutGetterById(ExecState*, JSCell*, Identifier*, JSCell*) WTF_INTERNAL; -void JIT_OPERATION operationPutSetterById(ExecState*, JSCell*, Identifier*, JSCell*) WTF_INTERNAL; void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*) WTF_INTERNAL; #endif -void JIT_OPERATION operationPushFunctionNameScope(ExecState*, int32_t, SymbolTable*, EncodedJSValue) WTF_INTERNAL; -void JIT_OPERATION operationPopScope(ExecState*, int32_t) WTF_INTERNAL; +void JIT_OPERATION operationPushNameScope(ExecState*, Identifier*, EncodedJSValue, int32_t) WTF_INTERNAL; +void JIT_OPERATION operationPushWithScope(ExecState*, EncodedJSValue) WTF_INTERNAL; +void JIT_OPERATION operationPopScope(ExecState*) WTF_INTERNAL; void JIT_OPERATION operationProfileDidCall(ExecState*, EncodedJSValue) WTF_INTERNAL; void JIT_OPERATION operationProfileWillCall(ExecState*, EncodedJSValue) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState*, EncodedJSValue, EncodedJSValue baseVal) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationGetByValString(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL; +JSCell* JIT_OPERATION operationCreateActivation(ExecState*, int32_t offset) WTF_INTERNAL; +JSCell* JIT_OPERATION operationCreateArguments(ExecState*) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationGetArgumentsLength(ExecState*, int32_t) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationGetByValDefault(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationGetByValString(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL; +void JIT_OPERATION operationTearOffActivation(ExecState*, JSCell*) WTF_INTERNAL; +void JIT_OPERATION operationTearOffArguments(ExecState*, JSCell*, JSCell*) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationDeleteById(ExecState*, EncodedJSValue base, const Identifier*) WTF_INTERNAL; JSCell* JIT_OPERATION operationGetPNames(ExecState*, JSObject*) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState*, EncodedJSValue, EncodedJSValue proto) WTF_INTERNAL; -int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState*, EncodedJSValue arguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset) WTF_INTERNAL; -CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState*, CallFrame*, EncodedJSValue arguments, int32_t firstVarArgOffset, int32_t length) WTF_INTERNAL; +CallFrame* JIT_OPERATION operationSizeAndAllocFrameForVarargs(ExecState*, EncodedJSValue arguments, int32_t firstFreeRegister) WTF_INTERNAL; +CallFrame* JIT_OPERATION operationLoadVarargs(ExecState*, CallFrame*, EncodedJSValue thisValue, EncodedJSValue arguments) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationToObject(ExecState*, EncodedJSValue) WTF_INTERNAL; char* JIT_OPERATION operationSwitchCharWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; char* JIT_OPERATION operationSwitchImmWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL; -EncodedJSValue JIT_OPERATION operationResolveScope(ExecState*, int32_t scope, int32_t identifierIndex) WTF_INTERNAL; +EncodedJSValue JIT_OPERATION operationResolveScope(ExecState*, int32_t identifierIndex) WTF_INTERNAL; EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL; void JIT_OPERATION operationPutToScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL; @@ -338,16 +285,6 @@ void JIT_OPERATION operationOSRWriteBarrier(ExecState*, JSCell*); void JIT_OPERATION operationInitGlobalConst(ExecState*, Instruction*); -void JIT_OPERATION operationExceptionFuzz(); - -EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState*, EncodedJSValue, JSCell*); -EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState*, JSCell*, int32_t); -JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState*, JSCell*); -EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState*, JSCell*, int32_t); -JSCell* JIT_OPERATION operationToIndexString(ExecState*, int32_t); - -void JIT_OPERATION operationProcessTypeProfilerLog(ExecState*) WTF_INTERNAL; - } // extern "C" } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp b/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp deleted file mode 100644 index 544bca394..000000000 --- a/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp +++ /dev/null @@ -1,46 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" - -#if !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64) - -#include "CallFrame.h" -#include "JSCJSValue.h" -#include "JSCInlines.h" - -namespace JSC { - -// FIXME: The following is a workaround that is only needed because JITStubsMSVC64.asm -// is built unconditionally even when the JIT is disable, and it references this function. -// We only need to provide a stub to satisfy the linkage. It will never be called. -extern "C" EncodedJSValue getHostCallReturnValueWithExecState(ExecState*) -{ - return JSValue::encode(JSValue()); -} - -} // namespace JSC - -#endif // !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64) diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp index 172e013d5..4241baf32 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,20 +29,18 @@ #include "JIT.h" #include "CodeBlock.h" -#include "DirectArguments.h" #include "GCAwareJITStubRoutine.h" #include "GetterSetter.h" #include "Interpreter.h" #include "JITInlines.h" #include "JSArray.h" -#include "JSEnvironmentRecord.h" #include "JSFunction.h" +#include "JSPropertyNameIterator.h" +#include "JSVariableObject.h" #include "LinkBuffer.h" #include "RepatchBuffer.h" #include "ResultType.h" #include "SamplingTool.h" -#include "ScopedArguments.h" -#include "ScopedArgumentsTable.h" #include <wtf/StringPrintStream.h> @@ -53,10 +51,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm) { JSInterfaceJIT jit(vm); JumpList failures; - failures.append(JSC::branchStructure(jit, - NotEqual, - Address(regT0, JSCell::structureIDOffset()), - vm->stringStructure.get())); + failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get()))); // Load string length to regT2, and start the process of loading the data pointer into regT0 jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2); @@ -88,7 +83,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm) jit.move(TrustedImm32(0), regT0); jit.ret(); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("String get_by_val stub")); } @@ -98,24 +93,21 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) int base = currentInstruction[2].u.operand; int property = currentInstruction[3].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_codeBlock->addByValInfo(); - + emitGetVirtualRegisters(base, regT0, property, regT1); - - emitJumpSlowCaseIfNotJSCell(regT0, base); - - PatchableJump notIndex = emitPatchableJumpIfNotImmediateInteger(regT1); - addSlowCase(notIndex); + emitJumpSlowCaseIfNotImmediateInteger(regT1); // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter. // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if // number was signed since m_vectorLength is always less than intmax (since the total allocation - // size is always less than 4Gb). As such zero extending will have been correct (and extending the value - // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign + // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value + // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign // extending since it makes it easier to re-tag the value in the slow case. zeroExtend32ToPtr(regT1, regT1); - emitArrayProfilingSiteWithCell(regT0, regT2, profile); + emitJumpSlowCaseIfNotJSCell(regT0, base); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + emitArrayProfilingSite(regT2, regT3, profile); and32(TrustedImm32(IndexingShapeMask), regT2); PatchableJump badType; @@ -145,21 +137,19 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) Label done = label(); - if (!ASSERT_DISABLED) { - Jump resultOK = branchTest64(NonZero, regT0); - abortWithReason(JITGetByValResultIsNotEmpty); - resultOK.link(this); - } +#if !ASSERT_DISABLED + Jump resultOK = branchTest64(NonZero, regT0); + breakpoint(); + resultOK.link(this); +#endif emitValueProfilingSite(); emitPutVirtualRegister(dst); - - Label nextHotPath = label(); - - m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath)); + + m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); } -JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType) +JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType) { JumpList slowCases; @@ -168,11 +158,13 @@ JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType) slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()))); loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0); slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0)); + moveDoubleTo64(fpRegT0, regT0); + sub64(tagTypeNumberRegister, regT0); return slowCases; } -JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape) +JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape) { JumpList slowCases; @@ -185,7 +177,7 @@ JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, Inde return slowCases; } -JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType) +JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType) { JumpList slowCases; @@ -201,55 +193,18 @@ JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType) return slowCases; } -JITGetByIdGenerator JIT::emitGetByValWithCachedId(Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases) -{ - // base: regT0 - // property: regT1 - // scratch: regT3 - - int dst = currentInstruction[1].u.operand; - - slowCases.append(emitJumpIfNotJSCell(regT1)); - if (propertyName.isSymbol()) { - slowCases.append(branchStructure(NotEqual, Address(regT1, JSCell::structureIDOffset()), m_vm->symbolStructure.get())); - loadPtr(Address(regT1, Symbol::offsetOfPrivateName()), regT3); - } else { - slowCases.append(branchStructure(NotEqual, Address(regT1, JSCell::structureIDOffset()), m_vm->stringStructure.get())); - loadPtr(Address(regT1, JSString::offsetOfValue()), regT3); - } - slowCases.append(branchPtr(NotEqual, regT3, TrustedImmPtr(propertyName.impl()))); - - JITGetByIdGenerator gen( - m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(), - JSValueRegs(regT0), JSValueRegs(regT0), DontSpill); - gen.generateFastPath(*this); - - fastDoneCase = jump(); - - Label coldPathBegin = label(); - gen.slowPathJump().link(this); - - Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl()); - gen.reportSlowPathCall(coldPathBegin, call); - slowDoneCase = jump(); - - return gen; -} - void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { int dst = currentInstruction[1].u.operand; int base = currentInstruction[2].u.operand; int property = currentInstruction[3].u.operand; - ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - linkSlowCaseIfNotJSCell(iter, base); // base cell check linkSlowCase(iter); // property int32 check + linkSlowCaseIfNotJSCell(iter, base); // base cell check Jump nonCell = jump(); linkSlowCase(iter); // base array check - Jump notString = branchStructure(NotEqual, - Address(regT0, JSCell::structureIDOffset()), - m_vm->stringStructure.get()); + Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())); emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code())); Jump failed = branchTest64(Zero, regT0); emitPutVirtualRegister(dst, regT0); @@ -258,14 +213,20 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas notString.link(this); nonCell.link(this); + Jump skipProfiling = jump(); + linkSlowCase(iter); // vector length check linkSlowCase(iter); // empty value + emitArrayProfileOutOfBoundsSpecialCase(profile); + + skipProfiling.link(this); + Label slowPath = label(); emitGetVirtualRegister(base, regT0); emitGetVirtualRegister(property, regT1); - Call call = callOperation(operationGetByValOptimize, dst, regT0, regT1, byValInfo); + Call call = callOperation(operationGetByValDefault, dst, regT0, regT1); m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; @@ -287,11 +248,11 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch); done.link(this); } else { - if (!ASSERT_DISABLED) { - Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); - abortWithReason(JITOffsetIsNotOutOfLine); - isOutOfLine.link(this); - } +#if !ASSERT_DISABLED + Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); + breakpoint(); + isOutOfLine.link(this); +#endif loadPtr(Address(base, JSObject::butterflyOffset()), scratch); neg32(offset); } @@ -299,19 +260,64 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result); } +void JIT::emit_op_get_by_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + unsigned expected = currentInstruction[4].u.operand; + int iter = currentInstruction[5].u.operand; + int i = currentInstruction[6].u.operand; + + emitGetVirtualRegister(property, regT0); + addSlowCase(branch64(NotEqual, regT0, addressFor(expected))); + emitGetVirtualRegisters(base, regT0, iter, regT1); + emitJumpSlowCaseIfNotJSCell(regT0, base); + + // Test base's structure + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); + load32(addressFor(i), regT3); + sub32(TrustedImm32(1), regT3); + addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); + Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity))); + add32(TrustedImm32(firstOutOfLineOffset), regT3); + sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3); + inlineProperty.link(this); + compileGetDirectOffset(regT0, regT0, regT3, regT1); + + emitPutVirtualRegister(dst, regT0); +} + +void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + + linkSlowCase(iter); + linkSlowCaseIfNotJSCell(iter, base); + linkSlowCase(iter); + linkSlowCase(iter); + + emitGetVirtualRegister(base, regT0); + emitGetVirtualRegister(property, regT1); + callOperation(operationGetByValGeneric, dst, regT0, regT1); +} + void JIT::emit_op_put_by_val(Instruction* currentInstruction) { int base = currentInstruction[1].u.operand; int property = currentInstruction[2].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_codeBlock->addByValInfo(); emitGetVirtualRegisters(base, regT0, property, regT1); emitJumpSlowCaseIfNotImmediateInteger(regT1); // See comment in op_get_by_val. zeroExtend32ToPtr(regT1, regT1); emitJumpSlowCaseIfNotJSCell(regT0, base); - emitArrayProfilingSiteWithCell(regT0, regT2, profile); + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + emitArrayProfilingSite(regT2, regT3, profile); and32(TrustedImm32(IndexingShapeMask), regT2); PatchableJump badType; @@ -341,7 +347,8 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) Label done = label(); - m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, done)); + m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); + } JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape) @@ -440,7 +447,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas int property = currentInstruction[2].u.operand; int value = currentInstruction[3].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; linkSlowCase(iter); // property int32 check linkSlowCaseIfNotJSCell(iter, base); // base cell check @@ -466,7 +472,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas emitGetVirtualRegister(property, regT1); emitGetVirtualRegister(value, regT2); bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct; - Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT0, regT1, regT2, byValInfo); + Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT0, regT1, regT2); m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; @@ -480,20 +486,6 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction) callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1); } -void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - emitGetVirtualRegister(currentInstruction[3].u.operand, regT1); - callOperation(operationPutGetterById, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), regT1); -} - -void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction) -{ - emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); - emitGetVirtualRegister(currentInstruction[3].u.operand, regT1); - callOperation(operationPutSetterById, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), regT1); -} - void JIT::emit_op_put_getter_setter(Instruction* currentInstruction) { emitGetVirtualRegister(currentInstruction[1].u.operand, regT0); @@ -521,19 +513,20 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction) emitJumpSlowCaseIfNotJSCell(regT0, baseVReg); - if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) - emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT1, m_bytecodeOffset); + if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset); + } JITGetByIdGenerator gen( m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(), - JSValueRegs(regT0), JSValueRegs(regT0), DontSpill); + callFrameRegister, JSValueRegs(regT0), JSValueRegs(regT0), true); gen.generateFastPath(*this); addSlowCase(gen.slowPathJump()); m_getByIds.append(gen); emitValueProfilingSite(); emitPutVirtualRegister(resultVReg); - assertStackPointerOffset(); } void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -560,7 +553,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) int valueVReg = currentInstruction[3].u.operand; unsigned direct = currentInstruction[8].u.operand; - emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase); + emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBaseAndValue); // In order to be able to patch both the Structure, and the object offset, we store one pointer, // to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code @@ -573,8 +566,8 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) JITPutByIdGenerator gen( m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(), - JSValueRegs(regT0), JSValueRegs(regT1), regT2, DontSpill, m_codeBlock->ecmaMode(), - direct ? Direct : NotDirect); + callFrameRegister, JSValueRegs(regT0), JSValueRegs(regT1), regT2, true, + m_codeBlock->ecmaMode(), direct ? Direct : NotDirect); gen.generateFastPath(*this); addSlowCase(gen.slowPathJump()); @@ -643,10 +636,16 @@ void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks) addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); } -void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth) +void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth) { emitVarInjectionCheck(needsVarInjectionChecks); - emitGetVirtualRegister(scope, regT0); + emitGetVirtualRegister(JSStack::ScopeChain, regT0); + if (m_codeBlock->needsActivation()) { + emitGetVirtualRegister(m_codeBlock->activationRegister(), regT1); + Jump noActivation = branchTestPtr(Zero, regT1); + loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); + noActivation.link(this); + } for (unsigned i = 0; i < depth; ++i) loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); emitPutVirtualRegister(dst); @@ -655,9 +654,8 @@ void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, u void JIT::emit_op_resolve_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int scope = currentInstruction[2].u.operand; - ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); - unsigned depth = currentInstruction[5].u.operand; + ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand); + unsigned depth = currentInstruction[4].u.operand; switch (resolveType) { case GlobalProperty: @@ -670,37 +668,32 @@ void JIT::emit_op_resolve_scope(Instruction* currentInstruction) break; case ClosureVar: case ClosureVarWithVarInjectionChecks: - emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth); + emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth); break; case Dynamic: addSlowCase(jump()); break; - case LocalClosureVar: - RELEASE_ASSERT_NOT_REACHED(); } } void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { int dst = currentInstruction[1].u.operand; - ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); + ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand); if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar) return; linkSlowCase(iter); - int32_t scope = currentInstruction[2].u.operand; - int32_t identifierIndex = currentInstruction[3].u.operand; - callOperation(operationResolveScope, dst, scope, identifierIndex); + int32_t indentifierIndex = currentInstruction[2].u.operand; + callOperation(operationResolveScope, dst, indentifierIndex); } void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot) { emitGetVirtualRegister(scope, regT0); loadPtr(structureSlot, regT1); - addSlowCase(branchTestPtr(Zero, regT1)); - load32(Address(regT1, Structure::structureIDOffset()), regT1); - addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1)); + addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT1)); } void JIT::emitGetGlobalProperty(uintptr_t* operandSlot) @@ -717,7 +710,8 @@ void JIT::emitGetGlobalVar(uintptr_t operand) void JIT::emitGetClosureVar(int scope, uintptr_t operand) { emitGetVirtualRegister(scope, regT0); - loadPtr(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)), regT0); + loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0); + loadPtr(Address(regT0, operand * sizeof(Register)), regT0); } void JIT::emit_op_get_from_scope(Instruction* currentInstruction) @@ -747,8 +741,6 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction) case Dynamic: addSlowCase(jump()); break; - case LocalClosureVar: - RELEASE_ASSERT_NOT_REACHED(); } emitPutVirtualRegister(dst); emitValueProfilingSite(); @@ -762,8 +754,6 @@ void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<Slo if (resolveType == GlobalVar || resolveType == ClosureVar) return; - if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) - linkSlowCase(iter); linkSlowCase(iter); callOperation(WithProfile, operationGetFromScope, dst, currentInstruction); } @@ -778,19 +768,49 @@ void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value) storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue))); } -void JIT::emitPutGlobalVar(uintptr_t operand, int value, WatchpointSet* set) +void JIT::emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet* set) +{ + if (!set || set->state() == IsInvalidated) + return; + + load8(set->addressOfState(), scratch); + + JumpList ready; + + ready.append(branch32(Equal, scratch, TrustedImm32(IsInvalidated))); + + if (set->state() == ClearWatchpoint) { + Jump isWatched = branch32(NotEqual, scratch, TrustedImm32(ClearWatchpoint)); + + store64(value, set->addressOfInferredValue()); + store8(TrustedImm32(IsWatched), set->addressOfState()); + ready.append(jump()); + + isWatched.link(this); + } + + ready.append(branch64(Equal, AbsoluteAddress(set->addressOfInferredValue()), value)); + addSlowCase(branchTest8(NonZero, AbsoluteAddress(set->addressOfSetIsNotEmpty()))); + store8(TrustedImm32(IsInvalidated), set->addressOfState()); + move(TrustedImm64(JSValue::encode(JSValue())), scratch); + store64(scratch, set->addressOfInferredValue()); + + ready.link(this); +} + +void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set) { emitGetVirtualRegister(value, regT0); - emitNotifyWrite(set); + emitNotifyWrite(regT0, regT1, set); storePtr(regT0, reinterpret_cast<void*>(operand)); } -void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set) +void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value) { emitGetVirtualRegister(value, regT1); emitGetVirtualRegister(scope, regT0); - emitNotifyWrite(set); - storePtr(regT1, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register))); + loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0); + storePtr(regT1, Address(regT0, operand * sizeof(Register))); } void JIT::emit_op_put_to_scope(Instruction* currentInstruction) @@ -814,12 +834,11 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction) emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet); break; - case LocalClosureVar: case ClosureVar: case ClosureVarWithVarInjectionChecks: emitWriteBarrier(scope, value, ShouldFilterValue); emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); - emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet); + emitPutClosureVar(scope, *operandSlot, value); break; case Dynamic: addSlowCase(jump()); @@ -831,13 +850,11 @@ void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowC { ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); unsigned linkCount = 0; - if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar) + if (resolveType != GlobalVar && resolveType != ClosureVar) linkCount++; - if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar) + if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) && currentInstruction[5].u.watchpointSet->state() != IsInvalidated) linkCount++; - if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) - linkCount++; if (!linkCount) return; while (linkCount--) @@ -845,53 +862,57 @@ void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowC callOperation(operationPutToScope, currentInstruction); } -void JIT::emit_op_get_from_arguments(Instruction* currentInstruction) +void JIT::emit_op_init_global_const(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; - int arguments = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitGetVirtualRegister(arguments, regT0); - load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0); - emitValueProfilingSite(); - emitPutVirtualRegister(dst); + JSGlobalObject* globalObject = m_codeBlock->globalObject(); + emitWriteBarrier(globalObject, currentInstruction[2].u.operand, ShouldFilterValue); + emitGetVirtualRegister(currentInstruction[2].u.operand, regT0); + store64(regT0, currentInstruction[1].u.registerPointer); } -void JIT::emit_op_put_to_arguments(Instruction* currentInstruction) +#endif // USE(JSVALUE64) + +JIT::Jump JIT::checkMarkWord(RegisterID owner, RegisterID scratch1, RegisterID scratch2) { - int arguments = currentInstruction[1].u.operand; - int index = currentInstruction[2].u.operand; - int value = currentInstruction[3].u.operand; - - emitWriteBarrier(arguments, value, ShouldFilterValue); - - emitGetVirtualRegister(arguments, regT0); - emitGetVirtualRegister(value, regT1); - store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>))); + move(owner, scratch1); + move(owner, scratch2); + + andPtr(TrustedImmPtr(MarkedBlock::blockMask), scratch1); + andPtr(TrustedImmPtr(~MarkedBlock::blockMask), scratch2); + + rshift32(TrustedImm32(3 + 4), scratch2); + + return branchTest8(Zero, BaseIndex(scratch1, scratch2, TimesOne, MarkedBlock::offsetOfMarks())); } -#endif // USE(JSVALUE64) +JIT::Jump JIT::checkMarkWord(JSCell* owner) +{ + MarkedBlock* block = MarkedBlock::blockFor(owner); + size_t index = (reinterpret_cast<size_t>(owner) & ~MarkedBlock::blockMask) >> (3 + 4); + void* address = (reinterpret_cast<char*>(block) + MarkedBlock::offsetOfMarks()) + index; + + return branchTest8(Zero, AbsoluteAddress(address)); +} #if USE(JSVALUE64) void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode) { #if ENABLE(GGC) + emitGetVirtualRegister(value, regT0); Jump valueNotCell; - if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) { - emitGetVirtualRegister(value, regT0); + if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister); - } emitGetVirtualRegister(owner, regT0); Jump ownerNotCell; - if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase) + if (mode == ShouldFilterBaseAndValue) ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister); - Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT0); + Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2); callOperation(operationUnconditionalWriteBarrier, regT0); - ownerIsRememberedOrInEden.link(this); + ownerNotMarked.link(this); - if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase) + if (mode == ShouldFilterBaseAndValue) ownerNotCell.link(this); if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) valueNotCell.link(this); @@ -910,7 +931,12 @@ void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) if (mode == ShouldFilterValue) valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister); - emitWriteBarrier(owner); + if (!MarkedBlock::blockFor(owner)->isMarked(owner)) { + Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2); + callOperation(operationUnconditionalWriteBarrier, owner); + ownerNotMarked.link(this); + } else + callOperation(operationUnconditionalWriteBarrier, owner); if (mode == ShouldFilterValue) valueNotCell.link(this); @@ -926,22 +952,21 @@ void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode) { #if ENABLE(GGC) + emitLoadTag(value, regT0); Jump valueNotCell; - if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) { - emitLoadTag(value, regT0); + if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag)); - } emitLoad(owner, regT0, regT1); Jump ownerNotCell; - if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue) + if (mode == ShouldFilterBaseAndValue) ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag)); - Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT1); + Jump ownerNotMarked = checkMarkWord(regT1, regT0, regT2); callOperation(operationUnconditionalWriteBarrier, regT1); - ownerIsRememberedOrInEden.link(this); + ownerNotMarked.link(this); - if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue) + if (mode == ShouldFilterBaseAndValue) ownerNotCell.link(this); if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) valueNotCell.link(this); @@ -955,13 +980,17 @@ void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) { #if ENABLE(GGC) + emitLoadTag(value, regT0); Jump valueNotCell; - if (mode == ShouldFilterValue) { - emitLoadTag(value, regT0); + if (mode == ShouldFilterValue) valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag)); - } - emitWriteBarrier(owner); + if (!MarkedBlock::blockFor(owner)->isMarked(owner)) { + Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2); + callOperation(operationUnconditionalWriteBarrier, owner); + ownerNotMarked.link(this); + } else + callOperation(operationUnconditionalWriteBarrier, owner); if (mode == ShouldFilterValue) valueNotCell.link(this); @@ -974,18 +1003,40 @@ void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode) #endif // USE(JSVALUE64) -void JIT::emitWriteBarrier(JSCell* owner) +JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch) { -#if ENABLE(GGC) - if (!MarkedBlock::blockFor(owner)->isMarked(owner)) { - Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(owner); - callOperation(operationUnconditionalWriteBarrier, owner); - ownerIsRememberedOrInEden.link(this); - } else - callOperation(operationUnconditionalWriteBarrier, owner); -#else - UNUSED_PARAM(owner); -#endif // ENABLE(GGC) + if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) { + structure->addTransitionWatchpoint(stubInfo->addWatchpoint(m_codeBlock)); +#if !ASSERT_DISABLED + move(TrustedImmPtr(object), scratch); + Jump ok = branchPtr(Equal, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure)); + breakpoint(); + ok.link(this); +#endif + Jump result; // Returning an unset jump this way because otherwise VC++ would complain. + return result; + } + + move(TrustedImmPtr(object), scratch); + return branchPtr(NotEqual, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure)); +} + +void JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, JumpList& failureCases, RegisterID scratch) +{ + Jump failureCase = addStructureTransitionCheck(object, structure, stubInfo, scratch); + if (!failureCase.isSet()) + return; + + failureCases.append(failureCase); +} + +void JIT::testPrototype(JSValue prototype, JumpList& failureCases, StructureStubInfo* stubInfo) +{ + if (prototype.isNull()) + return; + + ASSERT(prototype.isCell()); + addStructureTransitionCheck(prototype.asCell(), prototype.asCell()->structure(), stubInfo, failureCases, regT3); } void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) @@ -1008,12 +1059,6 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd case JITArrayStorage: slowCases = emitArrayStorageGetByVal(currentInstruction, badType); break; - case JITDirectArguments: - slowCases = emitDirectArgumentsGetByVal(currentInstruction, badType); - break; - case JITScopedArguments: - slowCases = emitScopedArgumentsGetByVal(currentInstruction, badType); - break; default: TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode); if (isInt(type)) @@ -1025,7 +1070,7 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd Jump done = jump(); - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); + LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); @@ -1033,7 +1078,7 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( - m_codeBlock, patchBuffer, + patchBuffer, ("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); RepatchBuffer repatchBuffer(m_codeBlock); @@ -1041,39 +1086,6 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationGetByValGeneric)); } -void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) -{ - Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; - - Jump fastDoneCase; - Jump slowDoneCase; - JumpList slowCases; - - JITGetByIdGenerator gen = emitGetByValWithCachedId(currentInstruction, propertyName, fastDoneCase, slowDoneCase, slowCases); - - ConcurrentJITLocker locker(m_codeBlock->m_lock); - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); - patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); - patchBuffer.link(fastDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); - patchBuffer.link(slowDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToNextHotPath)); - - for (const auto& callSite : m_calls) { - if (callSite.to) - patchBuffer.link(callSite.from, FunctionPtr(callSite.to)); - } - gen.finalize(patchBuffer); - - byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( - m_codeBlock, patchBuffer, - ("Baseline get_by_val with cached property name '%s' stub for %s, return point %p", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value())); - byValInfo->cachedId = propertyName; - byValInfo->stubInfo = gen.stubInfo(); - - RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code())); - repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationGetByValGeneric)); -} - void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex; @@ -1115,7 +1127,7 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd Jump done = jump(); - LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock); + LinkBuffer patchBuffer(*m_vm, this, m_codeBlock); patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath)); patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone)); @@ -1129,12 +1141,12 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct; if (!isDirect) { byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( - m_codeBlock, patchBuffer, + patchBuffer, ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); } else { byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( - m_codeBlock, patchBuffer, + patchBuffer, ("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value())); } RepatchBuffer repatchBuffer(m_codeBlock); @@ -1142,75 +1154,6 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric)); } -JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType) -{ - JumpList slowCases; - -#if USE(JSVALUE64) - RegisterID base = regT0; - RegisterID property = regT1; - JSValueRegs result = JSValueRegs(regT0); - RegisterID scratch = regT3; -#else - RegisterID base = regT0; - RegisterID property = regT2; - JSValueRegs result = JSValueRegs(regT1, regT0); - RegisterID scratch = regT3; -#endif - - load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); - badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType)); - - slowCases.append(branch32(AboveOrEqual, property, Address(base, DirectArguments::offsetOfLength()))); - slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfOverrides()))); - - zeroExtend32ToPtr(property, scratch); - loadValue(BaseIndex(base, scratch, TimesEight, DirectArguments::storageOffset()), result); - - return slowCases; -} - -JIT::JumpList JIT::emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType) -{ - JumpList slowCases; - -#if USE(JSVALUE64) - RegisterID base = regT0; - RegisterID property = regT1; - JSValueRegs result = JSValueRegs(regT0); - RegisterID scratch = regT3; - RegisterID scratch2 = regT4; -#else - RegisterID base = regT0; - RegisterID property = regT2; - JSValueRegs result = JSValueRegs(regT1, regT0); - RegisterID scratch = regT3; - RegisterID scratch2 = regT4; -#endif - - load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); - badType = patchableBranch32(NotEqual, scratch, TrustedImm32(ScopedArgumentsType)); - slowCases.append(branch32(AboveOrEqual, property, Address(base, ScopedArguments::offsetOfTotalLength()))); - - loadPtr(Address(base, ScopedArguments::offsetOfTable()), scratch); - load32(Address(scratch, ScopedArgumentsTable::offsetOfLength()), scratch2); - Jump overflowCase = branch32(AboveOrEqual, property, scratch2); - loadPtr(Address(base, ScopedArguments::offsetOfScope()), scratch2); - loadPtr(Address(scratch, ScopedArgumentsTable::offsetOfArguments()), scratch); - load32(BaseIndex(scratch, property, TimesFour), scratch); - slowCases.append(branch32(Equal, scratch, TrustedImm32(ScopeOffset::invalidOffset))); - loadValue(BaseIndex(scratch2, scratch, TimesEight, JSEnvironmentRecord::offsetOfVariables()), result); - Jump done = jump(); - overflowCase.link(this); - sub32(property, scratch2); - neg32(scratch2); - loadValue(BaseIndex(base, scratch2, TimesEight, ScopedArguments::overflowStorageOffset()), result); - slowCases.append(branchIfEmpty(result)); - done.link(this); - - return slowCases; -} - JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type) { ASSERT(isInt(type)); @@ -1233,21 +1176,21 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp JumpList slowCases; - load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); - badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type))); + loadPtr(Address(base, JSCell::structureOffset()), scratch); + badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type))); slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength()))); loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base); switch (elementSize(type)) { case 1: if (isSigned(type)) - load8SignedExtendTo32(BaseIndex(base, property, TimesOne), resultPayload); + load8Signed(BaseIndex(base, property, TimesOne), resultPayload); else load8(BaseIndex(base, property, TimesOne), resultPayload); break; case 2: if (isSigned(type)) - load16SignedExtendTo32(BaseIndex(base, property, TimesTwo), resultPayload); + load16Signed(BaseIndex(base, property, TimesTwo), resultPayload); else load16(BaseIndex(base, property, TimesTwo), resultPayload); break; @@ -1303,9 +1246,9 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT #endif JumpList slowCases; - - load8(Address(base, JSCell::typeInfoTypeOffset()), scratch); - badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type))); + + loadPtr(Address(base, JSCell::structureOffset()), scratch); + badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type))); slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength()))); loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base); @@ -1323,8 +1266,8 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT } Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0); - static const double NaN = PNaN; - loadDouble(TrustedImmPtr(&NaN), fpRegT0); + static const double NaN = QNaN; + loadDouble(&NaN, fpRegT0); notNaN.link(this); #if USE(JSVALUE64) @@ -1338,7 +1281,6 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type) { - ArrayProfile* profile = currentInstruction[4].u.arrayProfile; ASSERT(isInt(type)); int value = currentInstruction[3].u.operand; @@ -1357,12 +1299,9 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa JumpList slowCases; - load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch); - badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type))); - Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength())); - emitArrayProfileOutOfBoundsSpecialCase(profile); - Jump done = jump(); - inBounds.link(this); + loadPtr(Address(base, JSCell::structureOffset()), earlyScratch); + badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type))); + slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength()))); #if USE(JSVALUE64) emitGetVirtualRegister(value, earlyScratch); @@ -1403,14 +1342,11 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa CRASH(); } - done.link(this); - return slowCases; } JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type) { - ArrayProfile* profile = currentInstruction[4].u.arrayProfile; ASSERT(isFloat(type)); int value = currentInstruction[3].u.operand; @@ -1429,12 +1365,9 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, JumpList slowCases; - load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch); - badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type))); - Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength())); - emitArrayProfileOutOfBoundsSpecialCase(profile); - Jump done = jump(); - inBounds.link(this); + loadPtr(Address(base, JSCell::structureOffset()), earlyScratch); + badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type))); + slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength()))); #if USE(JSVALUE64) emitGetVirtualRegister(value, earlyScratch); @@ -1473,8 +1406,6 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, CRASH(); } - done.link(this); - return slowCases; } diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp index fb957a226..5bc8d1abb 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,13 +30,13 @@ #include "JIT.h" #include "CodeBlock.h" -#include "DirectArguments.h" #include "GCAwareJITStubRoutine.h" #include "Interpreter.h" #include "JITInlines.h" #include "JSArray.h" -#include "JSEnvironmentRecord.h" #include "JSFunction.h" +#include "JSPropertyNameIterator.h" +#include "JSVariableObject.h" #include "LinkBuffer.h" #include "RepatchBuffer.h" #include "ResultType.h" @@ -57,28 +57,6 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction) callOperation(operationPutByIndex, regT1, regT0, property, regT3, regT2); } -void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction) -{ - int base = currentInstruction[1].u.operand; - int property = currentInstruction[2].u.operand; - int getter = currentInstruction[3].u.operand; - - emitLoadPayload(base, regT1); - emitLoadPayload(getter, regT3); - callOperation(operationPutGetterById, regT1, &m_codeBlock->identifier(property), regT3); -} - -void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction) -{ - int base = currentInstruction[1].u.operand; - int property = currentInstruction[2].u.operand; - int setter = currentInstruction[3].u.operand; - - emitLoadPayload(base, regT1); - emitLoadPayload(setter, regT3); - callOperation(operationPutSetterById, regT1, &m_codeBlock->identifier(property), regT3); -} - void JIT::emit_op_put_getter_setter(Instruction* currentInstruction) { int base = currentInstruction[1].u.operand; @@ -105,7 +83,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm) { JSInterfaceJIT jit(vm); JumpList failures; - failures.append(JSC::branchStructure(jit, NotEqual, Address(regT0, JSCell::structureIDOffset()), vm->stringStructure.get())); + failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get()))); // Load string length to regT1, and start the process of loading the data pointer into regT0 jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1); @@ -139,7 +117,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm) jit.move(TrustedImm32(0), regT0); jit.ret(); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("String get_by_val stub")); } @@ -149,14 +127,13 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) int base = currentInstruction[2].u.operand; int property = currentInstruction[3].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_codeBlock->addByValInfo(); emitLoad2(base, regT1, regT0, property, regT3, regT2); + addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); emitJumpSlowCaseIfNotJSCell(base, regT1); - PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)); - addSlowCase(notIndex); - emitArrayProfilingSiteWithCell(regT0, regT1, profile); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + emitArrayProfilingSite(regT1, regT3, profile); and32(TrustedImm32(IndexingShapeMask), regT1); PatchableJump badType; @@ -185,27 +162,27 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction) Label done = label(); - if (!ASSERT_DISABLED) { - Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)); - abortWithReason(JITGetByValResultIsNotEmpty); - resultOK.link(this); - } +#if !ASSERT_DISABLED + Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)); + breakpoint(); + resultOK.link(this); +#endif emitValueProfilingSite(); emitStore(dst, regT1, regT0); - - Label nextHotPath = label(); - m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath)); + m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); } -JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape) +JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape) { JumpList slowCases; badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape)); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3); slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength()))); + load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag))); @@ -213,82 +190,52 @@ JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, Inde return slowCases; } -JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType) +JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType) { JumpList slowCases; badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape)); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3); slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength()))); + loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0); slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0)); + moveDoubleToInts(fpRegT0, regT0, regT1); return slowCases; } -JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType) +JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType) { JumpList slowCases; add32(TrustedImm32(-ArrayStorageShape), regT1, regT3); badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape)); + loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3); slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset()))); + load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag))); return slowCases; } - -JITGetByIdGenerator JIT::emitGetByValWithCachedId(Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases) -{ - int dst = currentInstruction[1].u.operand; - - // base: tag(regT1), payload(regT0) - // property: tag(regT3), payload(regT2) - // scratch: regT4 - - slowCases.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag))); - if (propertyName.isSymbol()) { - slowCases.append(branchStructure(NotEqual, Address(regT2, JSCell::structureIDOffset()), m_vm->symbolStructure.get())); - loadPtr(Address(regT2, Symbol::offsetOfPrivateName()), regT4); - } else { - slowCases.append(branchStructure(NotEqual, Address(regT2, JSCell::structureIDOffset()), m_vm->stringStructure.get())); - loadPtr(Address(regT2, JSString::offsetOfValue()), regT4); - } - slowCases.append(branchPtr(NotEqual, regT4, TrustedImmPtr(propertyName.impl()))); - - JITGetByIdGenerator gen( - m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(), - JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), DontSpill); - gen.generateFastPath(*this); - - fastDoneCase = jump(); - - Label coldPathBegin = label(); - gen.slowPathJump().link(this); - - Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT1, regT0, propertyName.impl()); - gen.reportSlowPathCall(coldPathBegin, call); - slowDoneCase = jump(); - - return gen; -} - + void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { int dst = currentInstruction[1].u.operand; int base = currentInstruction[2].u.operand; int property = currentInstruction[3].u.operand; - ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; - - linkSlowCaseIfNotJSCell(iter, base); // base cell check + ArrayProfile* profile = currentInstruction[4].u.arrayProfile; + linkSlowCase(iter); // property int32 check + linkSlowCaseIfNotJSCell(iter, base); // base cell check Jump nonCell = jump(); linkSlowCase(iter); // base array check - Jump notString = branchStructure(NotEqual, Address(regT0, JSCell::structureIDOffset()), m_vm->stringStructure.get()); + Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())); emitNakedCall(m_vm->getCTIStub(stringGetByValStubGenerator).code()); Jump failed = branchTestPtr(Zero, regT0); emitStore(dst, regT1, regT0); @@ -296,15 +243,21 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas failed.link(this); notString.link(this); nonCell.link(this); + + Jump skipProfiling = jump(); linkSlowCase(iter); // vector length check linkSlowCase(iter); // empty value + emitArrayProfileOutOfBoundsSpecialCase(profile); + + skipProfiling.link(this); + Label slowPath = label(); emitLoad(base, regT1, regT0); emitLoad(property, regT3, regT2); - Call call = callOperation(operationGetByValOptimize, dst, regT1, regT0, regT3, regT2, byValInfo); + Call call = callOperation(operationGetByValDefault, dst, regT1, regT0, regT3, regT2); m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call; @@ -318,13 +271,13 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) int base = currentInstruction[1].u.operand; int property = currentInstruction[2].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_codeBlock->addByValInfo(); emitLoad2(base, regT1, regT0, property, regT3, regT2); addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); emitJumpSlowCaseIfNotJSCell(base, regT1); - emitArrayProfilingSiteWithCell(regT0, regT1, profile); + loadPtr(Address(regT0, JSCell::structureOffset()), regT1); + emitArrayProfilingSite(regT1, regT3, profile); and32(TrustedImm32(IndexingShapeMask), regT1); PatchableJump badType; @@ -354,7 +307,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction) Label done = label(); - m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, done)); + m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done)); } JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape) @@ -459,7 +412,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas int property = currentInstruction[2].u.operand; int value = currentInstruction[3].u.operand; ArrayProfile* profile = currentInstruction[4].u.arrayProfile; - ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo; linkSlowCase(iter); // property int32 check linkSlowCaseIfNotJSCell(iter, base); // base cell check @@ -499,7 +451,6 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas emitLoad(value, regT0, regT1); addCallArgument(regT1); addCallArgument(regT0); - addCallArgument(TrustedImmPtr(byValInfo)); Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByVal : operationPutByVal); #else // The register selection below is chosen to reduce register swapping on ARM. @@ -507,7 +458,7 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas emitLoad(base, regT2, regT1); emitLoad(property, regT3, regT0); emitLoad(value, regT5, regT4); - Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT2, regT1, regT3, regT0, regT5, regT4, byValInfo); + Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT2, regT1, regT3, regT0, regT5, regT4); #endif m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath; @@ -524,12 +475,14 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction) emitLoad(base, regT1, regT0); emitJumpSlowCaseIfNotJSCell(base, regT1); - if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) - emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT2, m_bytecodeOffset); + if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) { + loadPtr(Address(regT0, JSCell::structureOffset()), regT2); + emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset); + } JITGetByIdGenerator gen( m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(), - JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), DontSpill); + callFrameRegister, JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), true); gen.generateFastPath(*this); addSlowCase(gen.slowPathJump()); m_getByIds.append(gen); @@ -566,16 +519,19 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) int value = currentInstruction[3].u.operand; int direct = currentInstruction[8].u.operand; - emitWriteBarrier(base, value, ShouldFilterBase); + emitWriteBarrier(base, value, ShouldFilterBaseAndValue); emitLoad2(base, regT1, regT0, value, regT3, regT2); emitJumpSlowCaseIfNotJSCell(base, regT1); + + emitLoad(base, regT1, regT0); + emitLoad(value, regT3, regT2); JITPutByIdGenerator gen( m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(), - JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), - regT1, DontSpill, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect); + callFrameRegister, JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), + regT1, true, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect); gen.generateFastPath(*this); addSlowCase(gen.slowPathJump()); @@ -592,10 +548,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase linkSlowCase(iter); Label coldPathBegin(this); - - // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag. - emitLoadTag(base, regT1); - + JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++]; Call call = callOperation( @@ -652,11 +605,11 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, Register addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base); done.link(this); } else { - if (!ASSERT_DISABLED) { - Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); - abortWithReason(JITOffsetIsNotOutOfLine); - isOutOfLine.link(this); - } +#if !ASSERT_DISABLED + Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset)); + breakpoint(); + isOutOfLine.link(this); +#endif loadPtr(Address(base, JSObject::butterflyOffset()), base); neg32(offset); } @@ -664,6 +617,54 @@ void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, Register load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag); } +void JIT::emit_op_get_by_pname(Instruction* currentInstruction) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + unsigned expected = currentInstruction[4].u.operand; + int iter = currentInstruction[5].u.operand; + int i = currentInstruction[6].u.operand; + + emitLoad2(property, regT1, regT0, base, regT3, regT2); + emitJumpSlowCaseIfNotJSCell(property, regT1); + addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected))); + // Property registers are now available as the property is known + emitJumpSlowCaseIfNotJSCell(base, regT3); + emitLoadPayload(iter, regT1); + + // Test base's structure + loadPtr(Address(regT2, JSCell::structureOffset()), regT0); + addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))); + load32(addressFor(i), regT3); + sub32(TrustedImm32(1), regT3); + addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); + Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity))); + add32(TrustedImm32(firstOutOfLineOffset), regT3); + sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3); + inlineProperty.link(this); + compileGetDirectOffset(regT2, regT1, regT0, regT3); + + emitStore(dst, regT1, regT0); +} + +void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) +{ + int dst = currentInstruction[1].u.operand; + int base = currentInstruction[2].u.operand; + int property = currentInstruction[3].u.operand; + + linkSlowCaseIfNotJSCell(iter, property); + linkSlowCase(iter); + linkSlowCaseIfNotJSCell(iter, base); + linkSlowCase(iter); + linkSlowCase(iter); + + emitLoad(base, regT1, regT0); + emitLoad(property, regT3, regT2); + callOperation(operationGetByValGeneric, dst, regT1, regT0, regT3, regT2); +} + void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks) { if (!needsVarInjectionChecks) @@ -671,11 +672,17 @@ void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks) addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated))); } -void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth) +void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth) { emitVarInjectionCheck(needsVarInjectionChecks); move(TrustedImm32(JSValue::CellTag), regT1); - emitLoadPayload(scope, regT0); + emitLoadPayload(JSStack::ScopeChain, regT0); + if (m_codeBlock->needsActivation()) { + emitLoadPayload(m_codeBlock->activationRegister().offset(), regT2); + Jump noActivation = branchTestPtr(Zero, regT2); + loadPtr(Address(regT2, JSScope::offsetOfNext()), regT0); + noActivation.link(this); + } for (unsigned i = 0; i < depth; ++i) loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0); emitStore(dst, regT1, regT0); @@ -684,9 +691,8 @@ void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, u void JIT::emit_op_resolve_scope(Instruction* currentInstruction) { int dst = currentInstruction[1].u.operand; - int scope = currentInstruction[2].u.operand; - ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); - unsigned depth = currentInstruction[5].u.operand; + ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand); + unsigned depth = currentInstruction[4].u.operand; switch (resolveType) { case GlobalProperty: @@ -700,35 +706,32 @@ void JIT::emit_op_resolve_scope(Instruction* currentInstruction) break; case ClosureVar: case ClosureVarWithVarInjectionChecks: - emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth); + emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth); break; case Dynamic: addSlowCase(jump()); break; - case LocalClosureVar: - RELEASE_ASSERT_NOT_REACHED(); } } void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) { int dst = currentInstruction[1].u.operand; - ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand); + ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand); if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar) return; linkSlowCase(iter); - int32_t scope = currentInstruction[2].u.operand; - int32_t identifierIndex = currentInstruction[3].u.operand; - callOperation(operationResolveScope, dst, scope, identifierIndex); + int32_t indentifierIndex = currentInstruction[2].u.operand; + callOperation(operationResolveScope, dst, indentifierIndex); } void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot) { emitLoad(scope, regT1, regT0); loadPtr(structureSlot, regT2); - addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT2)); + addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2)); } void JIT::emitGetGlobalProperty(uintptr_t* operandSlot) @@ -747,8 +750,9 @@ void JIT::emitGetGlobalVar(uintptr_t operand) void JIT::emitGetClosureVar(int scope, uintptr_t operand) { emitLoad(scope, regT1, regT0); - load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset), regT1); - load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset), regT0); + loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0); + load32(Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), regT1); + load32(Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), regT0); } void JIT::emit_op_get_from_scope(Instruction* currentInstruction) @@ -778,8 +782,6 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction) case Dynamic: addSlowCase(jump()); break; - case LocalClosureVar: - RELEASE_ASSERT_NOT_REACHED(); } emitValueProfilingSite(); emitStore(dst, regT1, regT0); @@ -808,21 +810,57 @@ void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value) store32(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); } -void JIT::emitPutGlobalVar(uintptr_t operand, int value, WatchpointSet* set) +void JIT::emitNotifyWrite(RegisterID tag, RegisterID payload, RegisterID scratch, VariableWatchpointSet* set) +{ + if (!set || set->state() == IsInvalidated) + return; + + load8(set->addressOfState(), scratch); + + JumpList ready; + + ready.append(branch32(Equal, scratch, TrustedImm32(IsInvalidated))); + + if (set->state() == ClearWatchpoint) { + Jump isWatched = branch32(NotEqual, scratch, TrustedImm32(ClearWatchpoint)); + + store32(tag, &set->addressOfInferredValue()->u.asBits.tag); + store32(payload, &set->addressOfInferredValue()->u.asBits.payload); + store8(TrustedImm32(IsWatched), set->addressOfState()); + ready.append(jump()); + + isWatched.link(this); + } + + Jump definitelyNotEqual = branch32( + NotEqual, AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.payload), payload); + ready.append(branch32( + Equal, AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.tag), tag)); + definitelyNotEqual.link(this); + addSlowCase(branchTest8(NonZero, AbsoluteAddress(set->addressOfSetIsNotEmpty()))); + store8(TrustedImm32(IsInvalidated), set->addressOfState()); + store32( + TrustedImm32(JSValue::EmptyValueTag), &set->addressOfInferredValue()->u.asBits.tag); + store32(TrustedImm32(0), &set->addressOfInferredValue()->u.asBits.payload); + + ready.link(this); +} + +void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set) { emitLoad(value, regT1, regT0); - emitNotifyWrite(set); + emitNotifyWrite(regT1, regT0, regT2, set); store32(regT1, reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); store32(regT0, reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); } -void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set) +void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value) { emitLoad(value, regT3, regT2); emitLoad(scope, regT1, regT0); - emitNotifyWrite(set); - store32(regT3, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset)); - store32(regT2, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset)); + loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0); + store32(regT3, Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + store32(regT2, Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); } void JIT::emit_op_put_to_scope(Instruction* currentInstruction) @@ -846,12 +884,11 @@ void JIT::emit_op_put_to_scope(Instruction* currentInstruction) emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet); break; - case LocalClosureVar: case ClosureVar: case ClosureVarWithVarInjectionChecks: emitWriteBarrier(scope, value, ShouldFilterValue); emitVarInjectionCheck(needsVarInjectionChecks(resolveType)); - emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet); + emitPutClosureVar(scope, *operandSlot, value); break; case Dynamic: addSlowCase(jump()); @@ -863,9 +900,9 @@ void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowC { ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type(); unsigned linkCount = 0; - if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar) + if (resolveType != GlobalVar && resolveType != ClosureVar) linkCount++; - if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar) + if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks) && currentInstruction[5].u.watchpointSet->state() != IsInvalidated) linkCount++; if (!linkCount) @@ -875,31 +912,19 @@ void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowC callOperation(operationPutToScope, currentInstruction); } -void JIT::emit_op_get_from_arguments(Instruction* currentInstruction) +void JIT::emit_op_init_global_const(Instruction* currentInstruction) { - int dst = currentInstruction[1].u.operand; - int arguments = currentInstruction[2].u.operand; - int index = currentInstruction[3].u.operand; - - emitLoadPayload(arguments, regT0); - load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset), regT1); - load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset), regT0); - emitValueProfilingSite(); - emitStore(dst, regT1, regT0); -} + WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer; + int value = currentInstruction[2].u.operand; -void JIT::emit_op_put_to_arguments(Instruction* currentInstruction) -{ - int arguments = currentInstruction[1].u.operand; - int index = currentInstruction[2].u.operand; - int value = currentInstruction[3].u.operand; - - emitWriteBarrier(arguments, value, ShouldFilterValue); + JSGlobalObject* globalObject = m_codeBlock->globalObject(); + + emitWriteBarrier(globalObject, value, ShouldFilterValue); + + emitLoad(value, regT1, regT0); - emitLoadPayload(arguments, regT0); - emitLoad(value, regT1, regT2); - store32(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset)); - store32(regT2, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset)); + store32(regT1, registerPointer->tagPointer()); + store32(regT0, registerPointer->payloadPointer()); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.cpp b/Source/JavaScriptCore/jit/JITStubRoutine.cpp index 8a6839b62..28543a8b8 100644 --- a/Source/JavaScriptCore/jit/JITStubRoutine.cpp +++ b/Source/JavaScriptCore/jit/JITStubRoutine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -29,18 +29,13 @@ #if ENABLE(JIT) #include "JSObject.h" -#include "JSCInlines.h" + #include "SlotVisitor.h" namespace JSC { JITStubRoutine::~JITStubRoutine() { } -bool JITStubRoutine::visitWeak(RepatchBuffer&) -{ - return true; -} - void JITStubRoutine::observeZeroRefCount() { RELEASE_ASSERT(!m_refCount); diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h index b2113a65f..020ef6907 100644 --- a/Source/JavaScriptCore/jit/JITStubRoutine.h +++ b/Source/JavaScriptCore/jit/JITStubRoutine.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,8 @@ #ifndef JITStubRoutine_h #define JITStubRoutine_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "ExecutableAllocator.h" @@ -36,7 +38,6 @@ namespace JSC { class JITStubRoutineSet; -class RepatchBuffer; // This is a base-class for JIT stub routines, and also the class you want // to instantiate directly if you have a routine that does not need any @@ -60,10 +61,10 @@ public: // Use this if you want to pass a CodePtr to someone who insists on taking // a RefPtr<JITStubRoutine>. - static Ref<JITStubRoutine> createSelfManagedRoutine( + static PassRefPtr<JITStubRoutine> createSelfManagedRoutine( MacroAssemblerCodePtr rawCodePointer) { - return adoptRef(*new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer))); + return adoptRef(new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer))); } virtual ~JITStubRoutine(); @@ -140,11 +141,6 @@ public: return true; } - - // Return true if you are still valid after. Return false if you are now invalid. If you return - // false, you will usually not do any clearing because the idea is that you will simply be - // destroyed. - virtual bool visitWeak(RepatchBuffer&); protected: virtual void observeZeroRefCount(); @@ -154,8 +150,11 @@ protected: }; // Helper for the creation of simple stub routines that need no help from the GC. -#define FINALIZE_CODE_FOR_STUB(codeBlock, patchBuffer, dataLogFArguments) \ - (adoptRef(new JITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments)))) +#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogFArguments) \ + (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogFArguments)))) + +#define FINALIZE_CODE_FOR_DFG_STUB(patchBuffer, dataLogFArguments) \ + (adoptRef(new JITStubRoutine(FINALIZE_DFG_CODE((patchBuffer), dataLogFArguments)))) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp index c9111aa33..47c509e3d 100644 --- a/Source/JavaScriptCore/jit/JITStubs.cpp +++ b/Source/JavaScriptCore/jit/JITStubs.cpp @@ -12,7 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -29,12 +29,9 @@ */ #include "config.h" -#include "JITStubs.h" #if ENABLE(JIT) - -#include "JSCInlines.h" -#include <wtf/InlineASM.h> +#include "JITStubs.h" #if CPU(ARM_TRADITIONAL) #include "JITStubsARM.h" diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h index 7206a1498..24d95dfd4 100644 --- a/Source/JavaScriptCore/jit/JITStubs.h +++ b/Source/JavaScriptCore/jit/JITStubs.h @@ -11,7 +11,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -37,16 +37,18 @@ namespace JSC { #if ENABLE(JIT) #if OS(WINDOWS) +class ExecState; +class Register; struct ProtoCallFrame; -class VM; extern "C" { - EncodedJSValue vmEntryToJavaScript(void*, VM*, ProtoCallFrame*); - EncodedJSValue vmEntryToNative(void*, VM*, ProtoCallFrame*); + EncodedJSValue callToJavaScript(void*, ExecState**, ProtoCallFrame*, Register*); + void returnFromJavaScript(); + EncodedJSValue callToNativeFunction(void*, ExecState**, ProtoCallFrame*, Register*); } #endif -#if ENABLE(MASM_PROBE) +#if USE(MASM_PROBE) extern "C" void ctiMasmProbeTrampoline(); #endif diff --git a/Source/JavaScriptCore/jit/JITStubsARM.h b/Source/JavaScriptCore/jit/JITStubsARM.h index 38c968f83..fd59188f4 100644 --- a/Source/JavaScriptCore/jit/JITStubsARM.h +++ b/Source/JavaScriptCore/jit/JITStubsARM.h @@ -12,7 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -41,9 +41,9 @@ namespace JSC { -#if COMPILER(GCC_OR_CLANG) +#if COMPILER(GCC) -#if ENABLE(MASM_PROBE) +#if USE(MASM_PROBE) // The following are offsets for MacroAssembler::ProbeContext fields accessed // by the ctiMasmProbeTrampoline stub. @@ -143,10 +143,10 @@ COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); #undef PROBE_OFFSETOF -#endif // ENABLE(MASM_PROBE) +#endif // USE(MASM_PROBE) -#if ENABLE(MASM_PROBE) +#if USE(MASM_PROBE) asm ( ".text" "\n" ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" @@ -291,11 +291,11 @@ SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" "pop { pc }" "\n" ); -#endif // ENABLE(MASM_PROBE) +#endif // USE(MASM_PROBE) -#endif // COMPILER(GCC_OR_CLANG) +#endif // COMPILER(GCC) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITStubsARMv7.h b/Source/JavaScriptCore/jit/JITStubsARMv7.h index 9a71b631b..28bbf8a92 100644 --- a/Source/JavaScriptCore/jit/JITStubsARMv7.h +++ b/Source/JavaScriptCore/jit/JITStubsARMv7.h @@ -12,7 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -41,9 +41,9 @@ namespace JSC { -#if COMPILER(GCC_OR_CLANG) +#if COMPILER(GCC) -#if ENABLE(MASM_PROBE) +#if USE(MASM_PROBE) // The following are offsets for MacroAssembler::ProbeContext fields accessed // by the ctiMasmProbeTrampoline stub. @@ -52,7 +52,7 @@ namespace JSC { #define PROBE_ARG1_OFFSET (1 * PTR_SIZE) #define PROBE_ARG2_OFFSET (2 * PTR_SIZE) -#define PROBE_FIRST_GPREG_OFFSET (3 * PTR_SIZE) +#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE) #define GPREG_SIZE 4 #define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE)) @@ -94,6 +94,8 @@ namespace JSC { #define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE)) #define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE)) #define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE)) + +#if CPU(APPLE_ARMV7S) #define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) #define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE)) #define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE)) @@ -111,6 +113,10 @@ namespace JSC { #define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE)) #define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE)) #define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE)) +#else +#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE)) +#endif // CPU(APPLE_ARMV7S) + // These ASSERTs remind you that if you change the layout of ProbeContext, // you need to change ctiMasmProbeTrampoline offsets above to match. @@ -156,6 +162,7 @@ COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline); COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline); +#if CPU(APPLE_ARMV7S) COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline); COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline); COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline); @@ -172,6 +179,7 @@ COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline); COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline); COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline); +#endif // CPU(APPLE_ARMV7S) COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); @@ -230,7 +238,11 @@ SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n" "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n" +#if CPU(APPLE_ARMV7S) "vstmia.64 ip, { d0-d31 }" "\n" +#else + "vstmia.64 ip, { d0-d15 }" "\n" +#endif "mov fp, sp" "\n" // Save the ProbeContext*. @@ -243,8 +255,13 @@ SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" // To enable probes to modify register state, we copy all registers // out of the ProbeContext before returning. +#if CPU(APPLE_ARMV7S) "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n" "vldmdb.64 ip!, { d0-d31 }" "\n" +#else + "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n" + "vldmdb.64 ip!, { d0-d15 }" "\n" +#endif "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n" "ldmdb ip, { r0-r11 }" "\n" "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n" @@ -325,9 +342,9 @@ SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" "pop { pc }" "\n" ); -#endif // ENABLE(MASM_PROBE) +#endif // USE(MASM_PROBE) -#endif // COMPILER(GCC_OR_CLANG) +#endif // COMPILER(GCC) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITStubsMSVC64.asm b/Source/JavaScriptCore/jit/JITStubsMSVC64.asm deleted file mode 100644 index d073a2496..000000000 --- a/Source/JavaScriptCore/jit/JITStubsMSVC64.asm +++ /dev/null @@ -1,44 +0,0 @@ -;/* -; Copyright (C) 2014 Apple Inc. All rights reserved. -; -; Redistribution and use in source and binary forms, with or without -; modification, are permitted provided that the following conditions -; are met: -; 1. Redistributions of source code must retain the above copyright -; notice, this list of conditions and the following disclaimer. -; 2. Redistributions in binary form must reproduce the above copyright -; notice, this list of conditions and the following disclaimer in the -; documentation and/or other materials provided with the distribution. -; -; THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY -; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR -; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR -; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, -; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, -; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR -; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY -; OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -;*/ - -EXTERN getHostCallReturnValueWithExecState : near - -PUBLIC getHostCallReturnValue - -_TEXT SEGMENT - -getHostCallReturnValue PROC - mov rcx, rbp - ; Allocate space for all 4 parameter registers, and align stack pointer to 16 bytes boundary by allocating another 8 bytes. - ; The stack alignment is needed to fix a crash in the CRT library on a floating point instruction. - sub rsp, 40 - call getHostCallReturnValueWithExecState - add rsp, 40 - ret -getHostCallReturnValue ENDP - -_TEXT ENDS - -END diff --git a/Source/JavaScriptCore/jit/JITStubsX86.h b/Source/JavaScriptCore/jit/JITStubsX86.h index d5bdcce26..7a26a5afa 100644 --- a/Source/JavaScriptCore/jit/JITStubsX86.h +++ b/Source/JavaScriptCore/jit/JITStubsX86.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved. * @@ -12,7 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -32,6 +32,7 @@ #define JITStubsX86_h #include "JITStubsX86Common.h" +#include <wtf/InlineASM.h> #if !CPU(X86) #error "JITStubsX86.h should only be #included if CPU(X86)" @@ -43,9 +44,9 @@ namespace JSC { -#if COMPILER(GCC_OR_CLANG) +#if COMPILER(GCC) -#if ENABLE(MASM_PROBE) +#if USE(MASM_PROBE) asm ( ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" @@ -95,14 +96,14 @@ SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" "movl 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n" "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n" - "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n" - "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n" - "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n" - "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n" - "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n" - "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n" - "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n" - "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n" + "movdqa %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n" + "movdqa %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n" + "movdqa %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n" + "movdqa %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n" + "movdqa %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n" + "movdqa %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n" + "movdqa %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n" + "movdqa %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n" // Reserve stack space for the arg while maintaining the required stack // pointer 32 byte alignment: @@ -119,14 +120,14 @@ SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp), %esi" "\n" "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp), %edi" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n" // There are 6 more registers left to restore: // eax, ecx, ebp, esp, eip, and eflags. @@ -196,9 +197,452 @@ SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" "popl %ebp" "\n" "ret" "\n" ); -#endif // ENABLE(MASM_PROBE) +#endif // USE(MASM_PROBE) -#endif // COMPILER(GCC_OR_CLANG) +#if OS(WINDOWS) +extern "C" { + + // FIXME: Since Windows doesn't use the LLInt, we have inline stubs here. + // Until the LLInt is changed to support Windows, these stub needs to be updated. + asm ( + ".globl " SYMBOL_STRING(callToJavaScript) "\n" + HIDE_SYMBOL(callToJavaScript) "\n" + SYMBOL_STRING(callToJavaScript) ":" "\n" + "mov (%esp),%edx" "\n" + "push %ebp" "\n" + "mov %ebp,%eax" "\n" + "mov %esp,%ebp" "\n" + "push %esi" "\n" + "push %edi" "\n" + "push %ebx" "\n" + "sub $0x1c,%esp" "\n" + "mov 0x34(%esp),%ecx" "\n" + "mov 0x38(%esp),%esi" "\n" + "mov 0x3c(%esp),%ebp" "\n" + "sub $0x20,%ebp" "\n" + "movl $0x0,0x24(%ebp)" "\n" + "movl $0x0,0x20(%ebp)" "\n" + "movl $0x0,0x1c(%ebp)" "\n" + "mov %ecx,0x18(%ebp)" "\n" + "mov (%ecx),%ebx" "\n" + "movl $0x0,0x14(%ebp)" "\n" + "mov %ebx,0x10(%ebp)" "\n" + "movl $0x0,0xc(%ebp)" "\n" + "movl $0x1,0x8(%ebp)" "\n" + "mov %edx,0x4(%ebp)" "\n" + "mov %eax,0x0(%ebp)" "\n" + "mov %ebp,%eax" "\n" + + "mov 0x28(%esi),%edx" "\n" + "add $0x5,%edx" "\n" + "shl $0x3,%edx" "\n" + "sub %edx,%ebp" "\n" + "mov %eax,0x0(%ebp)" "\n" + + "mov $0x5,%eax" "\n" + + ".copyHeaderLoop:" "\n" + "sub $0x1,%eax" "\n" + "mov (%esi,%eax,8),%ecx" "\n" + "mov %ecx,0x8(%ebp,%eax,8)" "\n" + "mov 0x4(%esi,%eax,8),%ecx" "\n" + "mov %ecx,0xc(%ebp,%eax,8)" "\n" + "test %eax,%eax" "\n" + "jne .copyHeaderLoop" "\n" + + "mov 0x18(%esi),%edx" "\n" + "sub $0x1,%edx" "\n" + "mov 0x28(%esi),%ecx" "\n" + "sub $0x1,%ecx" "\n" + + "cmp %ecx,%edx" "\n" + "je .copyArgs" "\n" + + "xor %eax,%eax" "\n" + "mov $0xfffffffc,%ebx" "\n" + + ".fillExtraArgsLoop:" "\n" + "sub $0x1,%ecx" "\n" + "mov %eax,0x30(%ebp,%ecx,8)" "\n" + "mov %ebx,0x34(%ebp,%ecx,8)" "\n" + "cmp %ecx,%edx" "\n" + "jne .fillExtraArgsLoop" "\n" + + ".copyArgs:" "\n" + "mov 0x2c(%esi),%eax" "\n" + + ".copyArgsLoop:" "\n" + "test %edx,%edx" "\n" + "je .copyArgsDone" "\n" + "sub $0x1,%edx" "\n" + "mov (%eax,%edx,8),%ecx" "\n" + "mov 0x4(%eax,%edx,8),%ebx" "\n" + "mov %ecx,0x30(%ebp,%edx,8)" "\n" + "mov %ebx,0x34(%ebp,%edx,8)" "\n" + "jmp .copyArgsLoop" "\n" + + ".copyArgsDone:" "\n" + "mov 0x34(%esp),%ecx" "\n" + "mov %ebp,(%ecx)" "\n" + + "call *0x30(%esp)" "\n" + + "cmpl $0x1,0x8(%ebp)" "\n" + "je .calleeFramePopped" "\n" + "mov 0x0(%ebp),%ebp" "\n" + + ".calleeFramePopped:" "\n" + "mov 0x18(%ebp),%ecx" "\n" + "mov 0x10(%ebp),%ebx" "\n" + "mov %ebx,(%ecx)" "\n" + + "add $0x1c,%esp" "\n" + "pop %ebx" "\n" + "pop %edi" "\n" + "pop %esi" "\n" + "pop %ebp" "\n" + "ret" "\n" + + ".globl " SYMBOL_STRING(returnFromJavaScript) "\n" + HIDE_SYMBOL(returnFromJavaScript) "\n" + SYMBOL_STRING(returnFromJavaScript) ":" "\n" + "add $0x1c,%esp" "\n" + "pop %ebx" "\n" + "pop %edi" "\n" + "pop %esi" "\n" + "pop %ebp" "\n" + "ret" "\n" + + ".globl " SYMBOL_STRING(callToNativeFunction) "\n" + HIDE_SYMBOL(callToNativeFunction) "\n" + SYMBOL_STRING(callToNativeFunction) ":" "\n" + "mov (%esp),%edx" "\n" + "push %ebp" "\n" + "mov %ebp,%eax" "\n" + "mov %esp,%ebp" "\n" + "push %esi" "\n" + "push %edi" "\n" + "push %ebx" "\n" + "sub $0x1c,%esp" "\n" + "mov 0x34(%esp),%ecx" "\n" + "mov 0x38(%esp),%esi" "\n" + "mov 0x3c(%esp),%ebp" "\n" + "sub $0x20,%ebp" "\n" + "movl $0x0,0x24(%ebp)" "\n" + "movl $0x0,0x20(%ebp)" "\n" + "movl $0x0,0x1c(%ebp)" "\n" + "mov %ecx,0x18(%ebp)" "\n" + "mov (%ecx),%ebx" "\n" + "movl $0x0,0x14(%ebp)" "\n" + "mov %ebx,0x10(%ebp)" "\n" + "movl $0x0,0xc(%ebp)" "\n" + "movl $0x1,0x8(%ebp)" "\n" + "mov %edx,0x4(%ebp)" "\n" + "mov %eax,0x0(%ebp)" "\n" + "mov %ebp,%eax" "\n" + + "mov 0x28(%esi),%edx" "\n" + "add $0x5,%edx" "\n" + "shl $0x3,%edx" "\n" + "sub %edx,%ebp" "\n" + "mov %eax,0x0(%ebp)" "\n" + + "mov $0x5,%eax" "\n" + + "copyHeaderLoop:" "\n" + "sub $0x1,%eax" "\n" + "mov (%esi,%eax,8),%ecx" "\n" + "mov %ecx,0x8(%ebp,%eax,8)" "\n" + "mov 0x4(%esi,%eax,8),%ecx" "\n" + "mov %ecx,0xc(%ebp,%eax,8)" "\n" + "test %eax,%eax" "\n" + "jne copyHeaderLoop" "\n" + + "mov 0x18(%esi),%edx" "\n" + "sub $0x1,%edx" "\n" + "mov 0x28(%esi),%ecx" "\n" + "sub $0x1,%ecx" "\n" + + "cmp %ecx,%edx" "\n" + "je copyArgs" "\n" + + "xor %eax,%eax" "\n" + "mov $0xfffffffc,%ebx" "\n" + + "fillExtraArgsLoop:" "\n" + "sub $0x1,%ecx" "\n" + "mov %eax,0x30(%ebp,%ecx,8)" "\n" + "mov %ebx,0x34(%ebp,%ecx,8)" "\n" + "cmp %ecx,%edx" "\n" + "jne fillExtraArgsLoop" "\n" + + "copyArgs:" "\n" + "mov 0x2c(%esi),%eax" "\n" + + "copyArgsLoop:" "\n" + "test %edx,%edx" "\n" + "je copyArgsDone" "\n" + "sub $0x1,%edx" "\n" + "mov (%eax,%edx,8),%ecx" "\n" + "mov 0x4(%eax,%edx,8),%ebx" "\n" + "mov %ecx,0x30(%ebp,%edx,8)" "\n" + "mov %ebx,0x34(%ebp,%edx,8)" "\n" + "jmp copyArgsLoop" "\n" + + "copyArgsDone:" "\n" + "mov 0x34(%esp),%ecx" "\n" + "mov %ebp,(%ecx)" "\n" + + "mov 0x30(%esp),%edi" "\n" + "mov %ebp,0x30(%esp)" "\n" + "mov %ebp,%ecx" "\n" + "call *%edi" "\n" + + "cmpl $0x1,0x8(%ebp)" "\n" + "je calleeFramePopped" "\n" + "mov 0x0(%ebp),%ebp" "\n" + + "calleeFramePopped:" "\n" + "mov 0x18(%ebp),%ecx" "\n" + "mov 0x10(%ebp),%ebx" "\n" + "mov %ebx,(%ecx)" "\n" + + "add $0x1c,%esp" "\n" + "pop %ebx" "\n" + "pop %edi" "\n" + "pop %esi" "\n" + "pop %ebp" "\n" + "ret" "\n" + ); +} + +#endif // OS(WINDOWS) + +#endif // COMPILER(GCC) + +#if COMPILER(MSVC) + +extern "C" { + + // FIXME: Since Windows doesn't use the LLInt, we have inline stubs here. + // Until the LLInt is changed to support Windows, these stub needs to be updated. + __declspec(naked) EncodedJSValue callToJavaScript(void* code, ExecState**, ProtoCallFrame*, Register*) + { + __asm { + mov edx, [esp] + push ebp; + mov eax, ebp; + mov ebp, esp; + push esi; + push edi; + push ebx; + sub esp, 0x1c; + mov ecx, dword ptr[esp + 0x34]; + mov esi, dword ptr[esp + 0x38]; + mov ebp, dword ptr[esp + 0x3c]; + sub ebp, 0x20; + mov dword ptr[ebp + 0x24], 0; + mov dword ptr[ebp + 0x20], 0; + mov dword ptr[ebp + 0x1c], 0; + mov dword ptr[ebp + 0x18], ecx; + mov ebx, [ecx]; + mov dword ptr[ebp + 0x14], 0; + mov dword ptr[ebp + 0x10], ebx; + mov dword ptr[ebp + 0xc], 0; + mov dword ptr[ebp + 0x8], 1; + mov dword ptr[ebp + 0x4], edx; + mov dword ptr[ebp], eax; + mov eax, ebp; + + mov edx, dword ptr[esi + 0x28]; + add edx, 5; + sal edx, 3; + sub ebp, edx; + mov dword ptr[ebp], eax; + + mov eax, 5; + + copyHeaderLoop: + sub eax, 1; + mov ecx, dword ptr[esi + eax * 8]; + mov dword ptr 8[ebp + eax * 8], ecx; + mov ecx, dword ptr 4[esi + eax * 8]; + mov dword ptr 12[ebp + eax * 8], ecx; + test eax, eax; + jnz copyHeaderLoop; + + mov edx, dword ptr[esi + 0x18]; + sub edx, 1; + mov ecx, dword ptr[esi + 0x28]; + sub ecx, 1; + + cmp edx, ecx; + je copyArgs; + + xor eax, eax; + mov ebx, -4; + + fillExtraArgsLoop: + sub ecx, 1; + mov dword ptr 0x30[ebp + ecx * 8], eax; + mov dword ptr 0x34[ebp + ecx * 8], ebx; + cmp edx, ecx; + jne fillExtraArgsLoop; + + copyArgs: + mov eax, dword ptr[esi + 0x2c]; + + copyArgsLoop: + test edx, edx; + jz copyArgsDone; + sub edx, 1; + mov ecx, dword ptr 0[eax + edx * 8]; + mov ebx, dword ptr 4[eax + edx * 8]; + mov dword ptr 0x30[ebp + edx * 8], ecx; + mov dword ptr 0x34[ebp + edx * 8], ebx; + jmp copyArgsLoop; + + copyArgsDone: + mov ecx, dword ptr[esp + 0x34]; + mov dword ptr[ecx], ebp; + + call dword ptr[esp + 0x30]; + + cmp dword ptr[ebp + 8], 1; + je calleeFramePopped; + mov ebp, dword ptr[ebp]; + + calleeFramePopped: + mov ecx, dword ptr[ebp + 0x18]; + mov ebx, dword ptr[ebp + 0x10]; + mov dword ptr[ecx], ebx; + + add esp, 0x1c; + pop ebx; + pop edi; + pop esi; + pop ebp; + ret; + } + } + + __declspec(naked) void returnFromJavaScript() + { + __asm { + add esp, 0x1c; + pop ebx; + pop edi; + pop esi; + pop ebp; + ret; + } + } + + __declspec(naked) EncodedJSValue callToNativeFunction(void* code, ExecState**, ProtoCallFrame*, Register*) + { + __asm { + mov edx, [esp] + push ebp; + mov eax, ebp; + mov ebp, esp; + push esi; + push edi; + push ebx; + sub esp, 0x1c; + mov ecx, [esp + 0x34]; + mov esi, [esp + 0x38]; + mov ebp, [esp + 0x3c]; + sub ebp, 0x20; + mov dword ptr[ebp + 0x24], 0; + mov dword ptr[ebp + 0x20], 0; + mov dword ptr[ebp + 0x1c], 0; + mov dword ptr[ebp + 0x18], ecx; + mov ebx, [ecx]; + mov dword ptr[ebp + 0x14], 0; + mov dword ptr[ebp + 0x10], ebx; + mov dword ptr[ebp + 0xc], 0; + mov dword ptr[ebp + 0x8], 1; + mov dword ptr[ebp + 0x4], edx; + mov dword ptr[ebp], eax; + mov eax, ebp; + + mov edx, dword ptr[esi + 0x28]; + add edx, 5; + sal edx, 3; + sub ebp, edx; + mov dword ptr[ebp], eax; + + mov eax, 5; + + copyHeaderLoop: + sub eax, 1; + mov ecx, dword ptr[esi + eax * 8]; + mov dword ptr 8[ebp + eax * 8], ecx; + mov ecx, dword ptr 4[esi + eax * 8]; + mov dword ptr 12[ebp + eax * 8], ecx; + test eax, eax; + jnz copyHeaderLoop; + + mov edx, dword ptr[esi + 0x18]; + sub edx, 1; + mov ecx, dword ptr[esi + 0x28]; + sub ecx, 1; + + cmp edx, ecx; + je copyArgs; + + xor eax, eax; + mov ebx, -4; + + fillExtraArgsLoop: + sub ecx, 1; + mov dword ptr 0x30[ebp + ecx * 8], eax; + mov dword ptr 0x34[ebp + ecx * 8], ebx; + cmp edx, ecx; + jne fillExtraArgsLoop; + + copyArgs: + mov eax, dword ptr[esi + 0x2c]; + + copyArgsLoop: + test edx, edx; + jz copyArgsDone; + sub edx, 1; + mov ecx, dword ptr 0[eax + edx * 8]; + mov ebx, dword ptr 4[eax + edx * 8]; + mov dword ptr 0x30[ebp + edx * 8], ecx; + mov dword ptr 0x34[ebp + edx * 8], ebx; + jmp copyArgsLoop; + + copyArgsDone: + mov ecx, dword ptr[esp + 0x34]; + mov dword ptr[ecx], ebp; + + mov edi, dword ptr[esp + 0x30]; + mov dword ptr[esp + 0x30], ebp; + mov ecx, ebp; + call edi; + + cmp dword ptr[ebp + 8], 1; + je calleeFramePopped; + mov ebp, dword ptr[ebp]; + + calleeFramePopped: + mov ecx, dword ptr[ebp + 0x18]; + mov ebx, dword ptr[ebp + 0x10]; + mov dword ptr[ecx], ebx; + + add esp, 0x1c; + pop ebx; + pop edi; + pop esi; + pop ebp; + ret; + } + } +} + +#endif // COMPILER(MSVC) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITStubsX86Common.h b/Source/JavaScriptCore/jit/JITStubsX86Common.h index 786a30f2e..f102f3b25 100644 --- a/Source/JavaScriptCore/jit/JITStubsX86Common.h +++ b/Source/JavaScriptCore/jit/JITStubsX86Common.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -34,9 +34,9 @@ namespace JSC { -#if COMPILER(GCC_OR_CLANG) +#if COMPILER(GCC) -#if ENABLE(MASM_PROBE) +#if USE(MASM_PROBE) // The following are offsets for MacroAssembler::ProbeContext fields accessed // by the ctiMasmProbeTrampoline stub. @@ -50,35 +50,39 @@ namespace JSC { #define PROBE_ARG1_OFFSET (1 * PTR_SIZE) #define PROBE_ARG2_OFFSET (2 * PTR_SIZE) -#define PROBE_FIRST_GPR_OFFSET (3 * PTR_SIZE) -#define PROBE_CPU_EAX_OFFSET (PROBE_FIRST_GPR_OFFSET + (0 * PTR_SIZE)) -#define PROBE_CPU_ECX_OFFSET (PROBE_FIRST_GPR_OFFSET + (1 * PTR_SIZE)) -#define PROBE_CPU_EDX_OFFSET (PROBE_FIRST_GPR_OFFSET + (2 * PTR_SIZE)) -#define PROBE_CPU_EBX_OFFSET (PROBE_FIRST_GPR_OFFSET + (3 * PTR_SIZE)) -#define PROBE_CPU_ESP_OFFSET (PROBE_FIRST_GPR_OFFSET + (4 * PTR_SIZE)) -#define PROBE_CPU_EBP_OFFSET (PROBE_FIRST_GPR_OFFSET + (5 * PTR_SIZE)) -#define PROBE_CPU_ESI_OFFSET (PROBE_FIRST_GPR_OFFSET + (6 * PTR_SIZE)) -#define PROBE_CPU_EDI_OFFSET (PROBE_FIRST_GPR_OFFSET + (7 * PTR_SIZE)) +#define PROBE_CPU_EAX_OFFSET (4 * PTR_SIZE) +#define PROBE_CPU_EBX_OFFSET (5 * PTR_SIZE) +#define PROBE_CPU_ECX_OFFSET (6 * PTR_SIZE) +#define PROBE_CPU_EDX_OFFSET (7 * PTR_SIZE) +#define PROBE_CPU_ESI_OFFSET (8 * PTR_SIZE) +#define PROBE_CPU_EDI_OFFSET (9 * PTR_SIZE) +#define PROBE_CPU_EBP_OFFSET (10 * PTR_SIZE) +#define PROBE_CPU_ESP_OFFSET (11 * PTR_SIZE) #if CPU(X86) -#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE)) +#define PROBE_FIRST_SPECIAL_OFFSET (12 * PTR_SIZE) #else // CPU(X86_64) -#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPR_OFFSET + (8 * PTR_SIZE)) -#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPR_OFFSET + (9 * PTR_SIZE)) -#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPR_OFFSET + (10 * PTR_SIZE)) -#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPR_OFFSET + (11 * PTR_SIZE)) -#define PROBE_CPU_R12_OFFSET (PROBE_FIRST_GPR_OFFSET + (12 * PTR_SIZE)) -#define PROBE_CPU_R13_OFFSET (PROBE_FIRST_GPR_OFFSET + (13 * PTR_SIZE)) -#define PROBE_CPU_R14_OFFSET (PROBE_FIRST_GPR_OFFSET + (14 * PTR_SIZE)) -#define PROBE_CPU_R15_OFFSET (PROBE_FIRST_GPR_OFFSET + (15 * PTR_SIZE)) -#define PROBE_FIRST_SPECIAL_OFFSET (PROBE_FIRST_GPR_OFFSET + (16 * PTR_SIZE)) +#define PROBE_CPU_R8_OFFSET (12 * PTR_SIZE) +#define PROBE_CPU_R9_OFFSET (13 * PTR_SIZE) +#define PROBE_CPU_R10_OFFSET (14 * PTR_SIZE) +#define PROBE_CPU_R11_OFFSET (15 * PTR_SIZE) +#define PROBE_CPU_R12_OFFSET (16 * PTR_SIZE) +#define PROBE_CPU_R13_OFFSET (17 * PTR_SIZE) +#define PROBE_CPU_R14_OFFSET (18 * PTR_SIZE) +#define PROBE_CPU_R15_OFFSET (19 * PTR_SIZE) +#define PROBE_FIRST_SPECIAL_OFFSET (20 * PTR_SIZE) #endif // CPU(X86_64) #define PROBE_CPU_EIP_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (0 * PTR_SIZE)) #define PROBE_CPU_EFLAGS_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (1 * PTR_SIZE)) -#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE)) -#define XMM_SIZE 8 +#if CPU(X86) +#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (4 * PTR_SIZE)) // After padding. +#else // CPU(X86_64) +#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE)) // After padding. +#endif // CPU(X86_64) + +#define XMM_SIZE 16 #define PROBE_CPU_XMM0_OFFSET (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE)) #define PROBE_CPU_XMM1_OFFSET (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE)) #define PROBE_CPU_XMM2_OFFSET (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE)) @@ -88,19 +92,7 @@ namespace JSC { #define PROBE_CPU_XMM6_OFFSET (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE)) #define PROBE_CPU_XMM7_OFFSET (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE)) -#if CPU(X86) #define PROBE_SIZE (PROBE_CPU_XMM7_OFFSET + XMM_SIZE) -#else // CPU(X86_64) -#define PROBE_CPU_XMM8_OFFSET (PROBE_FIRST_XMM_OFFSET + (8 * XMM_SIZE)) -#define PROBE_CPU_XMM9_OFFSET (PROBE_FIRST_XMM_OFFSET + (9 * XMM_SIZE)) -#define PROBE_CPU_XMM10_OFFSET (PROBE_FIRST_XMM_OFFSET + (10 * XMM_SIZE)) -#define PROBE_CPU_XMM11_OFFSET (PROBE_FIRST_XMM_OFFSET + (11 * XMM_SIZE)) -#define PROBE_CPU_XMM12_OFFSET (PROBE_FIRST_XMM_OFFSET + (12 * XMM_SIZE)) -#define PROBE_CPU_XMM13_OFFSET (PROBE_FIRST_XMM_OFFSET + (13 * XMM_SIZE)) -#define PROBE_CPU_XMM14_OFFSET (PROBE_FIRST_XMM_OFFSET + (14 * XMM_SIZE)) -#define PROBE_CPU_XMM15_OFFSET (PROBE_FIRST_XMM_OFFSET + (15 * XMM_SIZE)) -#define PROBE_SIZE (PROBE_CPU_XMM15_OFFSET + XMM_SIZE) -#endif // CPU(X86_64) // These ASSERTs remind you that if you change the layout of ProbeContext, // you need to change ctiMasmProbeTrampoline offsets above to match. @@ -140,24 +132,16 @@ COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm5) == PROBE_CPU_XMM5_OFFSET, ProbeContext_c COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm6) == PROBE_CPU_XMM6_OFFSET, ProbeContext_cpu_xmm6_offset_matches_ctiMasmProbeTrampoline); COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm7) == PROBE_CPU_XMM7_OFFSET, ProbeContext_cpu_xmm7_offset_matches_ctiMasmProbeTrampoline); -#if CPU(X86_64) -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm8) == PROBE_CPU_XMM8_OFFSET, ProbeContext_cpu_xmm8_offset_matches_ctiMasmProbeTrampoline); -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm9) == PROBE_CPU_XMM9_OFFSET, ProbeContext_cpu_xmm9_offset_matches_ctiMasmProbeTrampoline); -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm10) == PROBE_CPU_XMM10_OFFSET, ProbeContext_cpu_xmm10_offset_matches_ctiMasmProbeTrampoline); -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm11) == PROBE_CPU_XMM11_OFFSET, ProbeContext_cpu_xmm11_offset_matches_ctiMasmProbeTrampoline); -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm12) == PROBE_CPU_XMM12_OFFSET, ProbeContext_cpu_xmm12_offset_matches_ctiMasmProbeTrampoline); -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm13) == PROBE_CPU_XMM13_OFFSET, ProbeContext_cpu_xmm13_offset_matches_ctiMasmProbeTrampoline); -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm14) == PROBE_CPU_XMM14_OFFSET, ProbeContext_cpu_xmm14_offset_matches_ctiMasmProbeTrampoline); -COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm15) == PROBE_CPU_XMM15_OFFSET, ProbeContext_cpu_xmm15_offset_matches_ctiMasmProbeTrampoline); -#endif // CPU(X86_64) - COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline); +// Also double check that the xmm registers are 16 byte (128-bit) aligned as +// required by the movdqa instruction used in the trampoline. +COMPILE_ASSERT(!(PROBE_OFFSETOF(cpu.xmm0) % 16), ProbeContext_xmm0_offset_not_aligned_properly); #undef PROBE_OFFSETOF -#endif // ENABLE(MASM_PROBE) +#endif // USE(MASM_PROBE) -#endif // COMPILER(GCC_OR_CLANG) +#endif // COMPILER(GCC) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITStubsX86_64.h b/Source/JavaScriptCore/jit/JITStubsX86_64.h index 061746566..f2ed206ab 100644 --- a/Source/JavaScriptCore/jit/JITStubsX86_64.h +++ b/Source/JavaScriptCore/jit/JITStubsX86_64.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved. * @@ -12,7 +12,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -43,9 +43,9 @@ namespace JSC { -#if COMPILER(GCC_OR_CLANG) +#if COMPILER(GCC) -#if ENABLE(MASM_PROBE) +#if USE(MASM_PROBE) asm ( ".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n" HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n" @@ -104,22 +104,14 @@ SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" "movq %r14, " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp)" "\n" "movq %r15, " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp)" "\n" - "movq %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n" - "movq %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n" - "movq %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n" - "movq %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n" - "movq %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n" - "movq %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n" - "movq %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n" - "movq %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n" - "movq %xmm8, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp)" "\n" - "movq %xmm9, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp)" "\n" - "movq %xmm10, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp)" "\n" - "movq %xmm11, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp)" "\n" - "movq %xmm12, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp)" "\n" - "movq %xmm13, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp)" "\n" - "movq %xmm14, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp)" "\n" - "movq %xmm15, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp)" "\n" + "movdqa %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n" + "movdqa %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n" + "movdqa %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n" + "movdqa %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n" + "movdqa %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n" + "movdqa %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n" + "movdqa %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n" + "movdqa %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n" "movq %rbp, %rdi" "\n" // the ProbeContext* arg. "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n" @@ -141,22 +133,14 @@ SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n" "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp), %r14" "\n" "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp), %r15" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM8_OFFSET) "(%rbp), %xmm8" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM9_OFFSET) "(%rbp), %xmm9" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM10_OFFSET) "(%rbp), %xmm10" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM11_OFFSET) "(%rbp), %xmm11" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM12_OFFSET) "(%rbp), %xmm12" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM13_OFFSET) "(%rbp), %xmm13" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM14_OFFSET) "(%rbp), %xmm14" "\n" - "movq " STRINGIZE_VALUE_OF(PROBE_CPU_XMM15_OFFSET) "(%rbp), %xmm15" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n" + "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n" // There are 6 more registers left to restore: // rax, rcx, rbp, rsp, rip, and rflags. @@ -225,9 +209,9 @@ SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n" "popq %rbp" "\n" "ret" "\n" ); -#endif // ENABLE(MASM_PROBE) +#endif // USE(MASM_PROBE) -#endif // COMPILER(GCC_OR_CLANG) +#endif // COMPILER(GCC) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITThunks.cpp b/Source/JavaScriptCore/jit/JITThunks.cpp index fa610444a..4c48163e9 100644 --- a/Source/JavaScriptCore/jit/JITThunks.cpp +++ b/Source/JavaScriptCore/jit/JITThunks.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,12 +31,12 @@ #include "Executable.h" #include "JIT.h" #include "VM.h" -#include "JSCInlines.h" +#include "Operations.h" namespace JSC { JITThunks::JITThunks() - : m_hostFunctionStubMap(std::make_unique<HostFunctionStubMap>()) + : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap)) { } @@ -46,27 +46,24 @@ JITThunks::~JITThunks() MacroAssemblerCodePtr JITThunks::ctiNativeCall(VM* vm) { +#if ENABLE(LLINT) if (!vm->canUseJIT()) return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline); +#endif return ctiStub(vm, nativeCallGenerator).code(); } - MacroAssemblerCodePtr JITThunks::ctiNativeConstruct(VM* vm) { +#if ENABLE(LLINT) if (!vm->canUseJIT()) return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline); +#endif return ctiStub(vm, nativeConstructGenerator).code(); } -MacroAssemblerCodePtr JITThunks::ctiNativeTailCall(VM* vm) -{ - ASSERT(vm->canUseJIT()); - return ctiStub(vm, nativeTailCallGenerator).code(); -} - MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator) { - LockHolder locker(m_lock); + Locker locker(m_lock); CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef()); if (entry.isNewEntry) { // Compilation thread can only retrieve existing entries. @@ -76,12 +73,6 @@ MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator) return entry.iterator->value; } -void JITThunks::finalize(Handle<Unknown> handle, void*) -{ - auto* nativeExecutable = jsCast<NativeExecutable*>(handle.get().asCell()); - weakRemove(*m_hostFunctionStubMap, std::make_pair(nativeExecutable->function(), nativeExecutable->constructor()), nativeExecutable); -} - NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor) { ASSERT(!isCompilationThread()); @@ -89,41 +80,35 @@ NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, N if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, constructor))) return nativeExecutable; - NativeExecutable* nativeExecutable = NativeExecutable::create( - *vm, - adoptRef(new NativeJITCode(JIT::compileCTINativeCall(vm, function), JITCode::HostCallThunk)), - function, - adoptRef(new NativeJITCode(MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), JITCode::HostCallThunk)), - constructor, NoIntrinsic); - weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), Weak<NativeExecutable>(nativeExecutable, this)); + NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, JIT::compileCTINativeCall(vm, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), constructor, NoIntrinsic); + weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), Weak<NativeExecutable>(nativeExecutable)); return nativeExecutable; } NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic) { ASSERT(!isCompilationThread()); - ASSERT(vm->canUseJIT()); if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, &callHostFunctionAsConstructor))) return nativeExecutable; - RefPtr<JITCode> forCall; + MacroAssemblerCodeRef code; if (generator) { - MacroAssemblerCodeRef entry = generator(vm); - forCall = adoptRef(new DirectJITCode(entry, entry.code(), JITCode::HostCallThunk)); + if (vm->canUseJIT()) + code = generator(vm); + else + code = MacroAssemblerCodeRef(); } else - forCall = adoptRef(new NativeJITCode(JIT::compileCTINativeCall(vm, function), JITCode::HostCallThunk)); - - RefPtr<JITCode> forConstruct = adoptRef(new NativeJITCode(MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), JITCode::HostCallThunk)); - - NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, forCall, function, forConstruct, callHostFunctionAsConstructor, intrinsic); - weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), Weak<NativeExecutable>(nativeExecutable, this)); + code = JIT::compileCTINativeCall(vm, function); + + NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), callHostFunctionAsConstructor, intrinsic); + weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), Weak<NativeExecutable>(nativeExecutable)); return nativeExecutable; } void JITThunks::clearHostFunctionStubs() { - m_hostFunctionStubMap = nullptr; + m_hostFunctionStubMap.clear(); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JITThunks.h b/Source/JavaScriptCore/jit/JITThunks.h index 2e02883fa..97e7ecd6b 100644 --- a/Source/JavaScriptCore/jit/JITThunks.h +++ b/Source/JavaScriptCore/jit/JITThunks.h @@ -26,6 +26,8 @@ #ifndef JITThunks_h #define JITThunks_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "CallData.h" @@ -34,9 +36,9 @@ #include "MacroAssemblerCodeRef.h" #include "ThunkGenerator.h" #include "Weak.h" -#include "WeakHandleOwner.h" #include "WeakInlines.h" #include <wtf/HashMap.h> +#include <wtf/OwnPtr.h> #include <wtf/RefPtr.h> #include <wtf/ThreadingPrimitives.h> @@ -45,15 +47,13 @@ namespace JSC { class VM; class NativeExecutable; -class JITThunks final : private WeakHandleOwner { - WTF_MAKE_FAST_ALLOCATED; +class JITThunks { public: JITThunks(); - virtual ~JITThunks(); + ~JITThunks(); MacroAssemblerCodePtr ctiNativeCall(VM*); MacroAssemblerCodePtr ctiNativeConstruct(VM*); - MacroAssemblerCodePtr ctiNativeTailCall(VM*); MacroAssemblerCodeRef ctiStub(VM*, ThunkGenerator); @@ -63,12 +63,14 @@ public: void clearHostFunctionStubs(); private: - void finalize(Handle<Unknown>, void* context) override; + // Main thread can hold this lock for a while, so use an adaptive mutex. + typedef Mutex Lock; + typedef MutexLocker Locker; typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap; CTIStubMap m_ctiStubMap; typedef HashMap<std::pair<NativeFunction, NativeFunction>, Weak<NativeExecutable>> HostFunctionStubMap; - std::unique_ptr<HostFunctionStubMap> m_hostFunctionStubMap; + OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap; Lock m_lock; }; diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp index f7f0ab9bc..c83125da4 100644 --- a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp +++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp @@ -30,16 +30,15 @@ #include "CodeBlock.h" #include "Executable.h" -#include "JSCInlines.h" namespace JSC { JITToDFGDeferredCompilationCallback::JITToDFGDeferredCompilationCallback() { } JITToDFGDeferredCompilationCallback::~JITToDFGDeferredCompilationCallback() { } -Ref<JITToDFGDeferredCompilationCallback> JITToDFGDeferredCompilationCallback::create() +PassRefPtr<JITToDFGDeferredCompilationCallback> JITToDFGDeferredCompilationCallback::create() { - return adoptRef(*new JITToDFGDeferredCompilationCallback()); + return adoptRef(new JITToDFGDeferredCompilationCallback()); } void JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously( @@ -65,8 +64,6 @@ void JITToDFGDeferredCompilationCallback::compilationDidComplete( codeBlock->install(); codeBlock->alternative()->setOptimizationThresholdBasedOnCompilationResult(result); - - DeferredCompilationCallback::compilationDidComplete(codeBlock, result); } } // JSC diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h index f64b979ce..cf1c0770c 100644 --- a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h +++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h @@ -26,6 +26,8 @@ #ifndef JITToDFGDeferredCompilationCallback_h #define JITToDFGDeferredCompilationCallback_h +#include <wtf/Platform.h> + #if ENABLE(DFG_JIT) #include "DeferredCompilationCallback.h" @@ -42,7 +44,7 @@ protected: public: virtual ~JITToDFGDeferredCompilationCallback(); - static Ref<JITToDFGDeferredCompilationCallback> create(); + static PassRefPtr<JITToDFGDeferredCompilationCallback> create(); virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*) override; virtual void compilationDidComplete(CodeBlock*, CompilationResult) override; diff --git a/Source/JavaScriptCore/jit/JITWriteBarrier.h b/Source/JavaScriptCore/jit/JITWriteBarrier.h index b410ecadb..ca2ca6eb2 100644 --- a/Source/JavaScriptCore/jit/JITWriteBarrier.h +++ b/Source/JavaScriptCore/jit/JITWriteBarrier.h @@ -31,7 +31,6 @@ #include "MacroAssembler.h" #include "SlotVisitor.h" #include "UnusedPointer.h" -#include "VM.h" #include "WriteBarrier.h" namespace JSC { @@ -43,7 +42,8 @@ class VM; #define JITWriteBarrierFlag ((void*)2) class JITWriteBarrierBase { public: - explicit operator bool() const { return get(); } + typedef void* (JITWriteBarrierBase::*UnspecifiedBoolType); + operator UnspecifiedBoolType*() const { return get() ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; } bool operator!() const { return !get(); } void setFlagOnBarrier() @@ -77,9 +77,9 @@ protected: { } - void set(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value) + void set(VM&, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value) { - vm.heap.writeBarrier(owner, value); + Heap::writeBarrier(owner, value); m_location = location; ASSERT(((!!m_location) && m_location.executableAddress() != JITWriteBarrierFlag) || (location.executableAddress() == m_location.executableAddress())); MacroAssembler::repatchPointer(m_location, value); diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h index 9c77118a8..ac1ab7965 100644 --- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h +++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h @@ -50,7 +50,9 @@ namespace JSC { } #if USE(JSVALUE32_64) - static const unsigned Int32Tag = static_cast<unsigned>(JSValue::Int32Tag); + // Can't just propogate JSValue::Int32Tag as visual studio doesn't like it + static const unsigned Int32Tag = 0xffffffff; + COMPILE_ASSERT(Int32Tag == JSValue::Int32Tag, Int32Tag_out_of_sync); #else static const unsigned Int32Tag = static_cast<unsigned>(TagTypeNumber >> 32); #endif @@ -71,7 +73,7 @@ namespace JSC { void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest); #endif - Jump emitJumpIfNotType(RegisterID baseReg, JSType); + Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType); void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); @@ -222,9 +224,10 @@ namespace JSC { } #endif - ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, JSType type) + ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type) { - return branch8(NotEqual, Address(baseReg, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); + loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg); + return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type)); } ALWAYS_INLINE void JSInterfaceJIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from) diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp deleted file mode 100644 index 6e55e635f..000000000 --- a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp +++ /dev/null @@ -1,137 +0,0 @@ -/* - * Copyright (C) 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "PolymorphicCallStubRoutine.h" - -#if ENABLE(JIT) - -#include "CallLinkInfo.h" -#include "CodeBlock.h" -#include "JSCInlines.h" -#include "LinkBuffer.h" - -namespace JSC { - -PolymorphicCallNode::~PolymorphicCallNode() -{ - if (isOnList()) - remove(); -} - -void PolymorphicCallNode::unlink(RepatchBuffer& repatchBuffer) -{ - if (m_callLinkInfo) { - if (Options::showDisassembly()) - dataLog("Unlinking polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n"); - - m_callLinkInfo->unlink(repatchBuffer); - } - - if (isOnList()) - remove(); -} - -void PolymorphicCallNode::clearCallLinkInfo() -{ - if (Options::showDisassembly()) - dataLog("Clearing call link info for polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n"); - - m_callLinkInfo = nullptr; -} - -void PolymorphicCallCase::dump(PrintStream& out) const -{ - out.print("<variant = ", m_variant, ", codeBlock = ", pointerDump(m_codeBlock), ">"); -} - -PolymorphicCallStubRoutine::PolymorphicCallStubRoutine( - const MacroAssemblerCodeRef& codeRef, VM& vm, const JSCell* owner, ExecState* callerFrame, - CallLinkInfo& info, const Vector<PolymorphicCallCase>& cases, - std::unique_ptr<uint32_t[]> fastCounts) - : GCAwareJITStubRoutine(codeRef, vm) - , m_fastCounts(WTF::move(fastCounts)) -{ - for (PolymorphicCallCase callCase : cases) { - m_variants.append(WriteBarrier<JSCell>(vm, owner, callCase.variant().rawCalleeCell())); - if (shouldShowDisassemblyFor(callerFrame->codeBlock())) - dataLog("Linking polymorphic call in ", *callerFrame->codeBlock(), " at ", callerFrame->codeOrigin(), " to ", callCase.variant(), ", codeBlock = ", pointerDump(callCase.codeBlock()), "\n"); - if (CodeBlock* codeBlock = callCase.codeBlock()) - codeBlock->linkIncomingPolymorphicCall(callerFrame, m_callNodes.add(&info)); - } - m_variants.shrinkToFit(); - WTF::storeStoreFence(); -} - -PolymorphicCallStubRoutine::~PolymorphicCallStubRoutine() { } - -CallVariantList PolymorphicCallStubRoutine::variants() const -{ - CallVariantList result; - for (size_t i = 0; i < m_variants.size(); ++i) - result.append(CallVariant(m_variants[i].get())); - return result; -} - -CallEdgeList PolymorphicCallStubRoutine::edges() const -{ - // We wouldn't have these if this was an FTL stub routine. We shouldn't be asking for profiling - // from the FTL. - RELEASE_ASSERT(m_fastCounts); - - CallEdgeList result; - for (size_t i = 0; i < m_variants.size(); ++i) - result.append(CallEdge(CallVariant(m_variants[i].get()), m_fastCounts[i])); - return result; -} - -void PolymorphicCallStubRoutine::clearCallNodesFor(CallLinkInfo* info) -{ - for (Bag<PolymorphicCallNode>::iterator iter = m_callNodes.begin(); !!iter; ++iter) { - PolymorphicCallNode& node = **iter; - // All nodes should point to info, but okay to be a little paranoid. - if (node.hasCallLinkInfo(info)) - node.clearCallLinkInfo(); - } -} - -bool PolymorphicCallStubRoutine::visitWeak(RepatchBuffer&) -{ - for (auto& variant : m_variants) { - if (!Heap::isMarked(variant.get())) - return false; - } - return true; -} - -void PolymorphicCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor) -{ - for (auto& variant : m_variants) - visitor.append(&variant); -} - -} // namespace JSC - -#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h deleted file mode 100644 index 31492b614..000000000 --- a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h +++ /dev/null @@ -1,115 +0,0 @@ -/* - * Copyright (C) 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef PolymorphicCallStubRoutine_h -#define PolymorphicCallStubRoutine_h - -#if ENABLE(JIT) - -#include "CallEdge.h" -#include "CallVariant.h" -#include "CodeOrigin.h" -#include "GCAwareJITStubRoutine.h" -#include <wtf/FastMalloc.h> -#include <wtf/Noncopyable.h> -#include <wtf/Vector.h> - -namespace JSC { - -class CallLinkInfo; - -class PolymorphicCallNode : public BasicRawSentinelNode<PolymorphicCallNode> { - WTF_MAKE_NONCOPYABLE(PolymorphicCallNode); -public: - PolymorphicCallNode(CallLinkInfo* info) - : m_callLinkInfo(info) - { - } - - ~PolymorphicCallNode(); - - void unlink(RepatchBuffer&); - - bool hasCallLinkInfo(CallLinkInfo* info) { return m_callLinkInfo == info; } - void clearCallLinkInfo(); - -private: - CallLinkInfo* m_callLinkInfo; -}; - -class PolymorphicCallCase { -public: - PolymorphicCallCase() - : m_codeBlock(nullptr) - { - } - - PolymorphicCallCase(CallVariant variant, CodeBlock* codeBlock) - : m_variant(variant) - , m_codeBlock(codeBlock) - { - } - - CallVariant variant() const { return m_variant; } - CodeBlock* codeBlock() const { return m_codeBlock; } - - void dump(PrintStream&) const; - -private: - CallVariant m_variant; - CodeBlock* m_codeBlock; -}; - -class PolymorphicCallStubRoutine : public GCAwareJITStubRoutine { -public: - PolymorphicCallStubRoutine( - const MacroAssemblerCodeRef&, VM&, const JSCell* owner, - ExecState* callerFrame, CallLinkInfo&, const Vector<PolymorphicCallCase>&, - std::unique_ptr<uint32_t[]> fastCounts); - - virtual ~PolymorphicCallStubRoutine(); - - CallVariantList variants() const; - CallEdgeList edges() const; - - void clearCallNodesFor(CallLinkInfo*); - - bool visitWeak(RepatchBuffer&) override; - -protected: - virtual void markRequiredObjectsInternal(SlotVisitor&) override; - -private: - Vector<WriteBarrier<JSCell>, 2> m_variants; - std::unique_ptr<uint32_t[]> m_fastCounts; - Bag<PolymorphicCallNode> m_callNodes; -}; - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // PolymorphicCallStubRoutine_h - diff --git a/Source/JavaScriptCore/jit/Reg.cpp b/Source/JavaScriptCore/jit/Reg.cpp deleted file mode 100644 index 6c0258061..000000000 --- a/Source/JavaScriptCore/jit/Reg.cpp +++ /dev/null @@ -1,49 +0,0 @@ -/* - * Copyright (C) 2014 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "Reg.h" - -#if ENABLE(JIT) - -#include "FPRInfo.h" -#include "GPRInfo.h" - -namespace JSC { - -void Reg::dump(PrintStream& out) const -{ - if (!*this) - out.print("<none>"); - else if (isGPR()) - out.print(gpr()); - else - out.print(fpr()); -} - -} // namespace JSC - -#endif // ENABLE(JIT) - diff --git a/Source/JavaScriptCore/jit/Reg.h b/Source/JavaScriptCore/jit/Reg.h deleted file mode 100644 index 0704e2ec3..000000000 --- a/Source/JavaScriptCore/jit/Reg.h +++ /dev/null @@ -1,176 +0,0 @@ -/* - * Copyright (C) 2014 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef Reg_h -#define Reg_h - -#if ENABLE(JIT) - -#include "MacroAssembler.h" - -namespace JSC { - -// Reg is a polymorphic register class. It can refer to either integer or float registers. -// Here are some use cases: -// -// GPRReg gpr; -// Reg reg = gpr; -// reg.isSet() == true -// reg.isGPR() == true -// reg.isFPR() == false -// -// for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { -// if (reg.isGPR()) { -// } else /* reg.isFPR() */ { -// } -// } -// -// The above loop could have also used !!reg or reg.isSet() as a condition. - -class Reg { -public: - Reg() - : m_index(invalid()) - { - } - - Reg(MacroAssembler::RegisterID reg) - : m_index(MacroAssembler::registerIndex(reg)) - { - } - - Reg(MacroAssembler::FPRegisterID reg) - : m_index(MacroAssembler::registerIndex(reg)) - { - } - - static Reg fromIndex(unsigned index) - { - Reg result; - result.m_index = index; - return result; - } - - static Reg first() - { - Reg result; - result.m_index = 0; - return result; - } - - static Reg last() - { - Reg result; - result.m_index = MacroAssembler::numberOfRegisters() + MacroAssembler::numberOfFPRegisters() - 1; - return result; - } - - Reg next() const - { - ASSERT(!!*this); - if (*this == last()) - return Reg(); - Reg result; - result.m_index = m_index + 1; - return result; - } - - unsigned index() const { return m_index; } - - bool isSet() const { return m_index != invalid(); } - bool operator!() const { return !isSet(); } - - bool isGPR() const - { - return m_index < MacroAssembler::numberOfRegisters(); - } - - bool isFPR() const - { - return (m_index - MacroAssembler::numberOfRegisters()) < MacroAssembler::numberOfFPRegisters(); - } - - MacroAssembler::RegisterID gpr() const - { - ASSERT(isGPR()); - return static_cast<MacroAssembler::RegisterID>(MacroAssembler::firstRegister() + m_index); - } - - MacroAssembler::FPRegisterID fpr() const - { - ASSERT(isFPR()); - return static_cast<MacroAssembler::FPRegisterID>( - MacroAssembler::firstFPRegister() + (m_index - MacroAssembler::numberOfRegisters())); - } - - bool operator==(const Reg& other) const - { - return m_index == other.m_index; - } - - bool operator!=(const Reg& other) const - { - return m_index != other.m_index; - } - - bool operator<(const Reg& other) const - { - return m_index < other.m_index; - } - - bool operator>(const Reg& other) const - { - return m_index > other.m_index; - } - - bool operator<=(const Reg& other) const - { - return m_index <= other.m_index; - } - - bool operator>=(const Reg& other) const - { - return m_index >= other.m_index; - } - - unsigned hash() const - { - return m_index; - } - - void dump(PrintStream&) const; - -private: - static uint8_t invalid() { return 0xff; } - - uint8_t m_index; -}; - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // Reg_h - diff --git a/Source/JavaScriptCore/jit/RegisterPreservationWrapperGenerator.cpp b/Source/JavaScriptCore/jit/RegisterPreservationWrapperGenerator.cpp deleted file mode 100644 index 070b5cb25..000000000 --- a/Source/JavaScriptCore/jit/RegisterPreservationWrapperGenerator.cpp +++ /dev/null @@ -1,236 +0,0 @@ -/* - * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "RegisterPreservationWrapperGenerator.h" - -#if ENABLE(JIT) - -#include "AssemblyHelpers.h" -#include "LinkBuffer.h" -#include "JSCInlines.h" -#include "StackAlignment.h" - -namespace JSC { - -RegisterSet registersToPreserve() -{ - RegisterSet calleeSaves = RegisterSet::calleeSaveRegisters(); - - // No need to preserve FP since that always gets preserved anyway. - calleeSaves.clear(GPRInfo::callFrameRegister); - - return calleeSaves; -} - -ptrdiff_t registerPreservationOffset() -{ - unsigned numberOfCalleeSaves = registersToPreserve().numberOfSetRegisters(); - - // Need to preserve the old return PC. - unsigned numberOfValuesToSave = numberOfCalleeSaves + 1; - - // Alignment. Preserve the same alignment invariants that the caller imposed. - unsigned numberOfNewStackSlots = - WTF::roundUpToMultipleOf(stackAlignmentRegisters(), numberOfValuesToSave); - - return sizeof(Register) * numberOfNewStackSlots; -} - -MacroAssemblerCodeRef generateRegisterPreservationWrapper(VM& vm, ExecutableBase* executable, MacroAssemblerCodePtr target) -{ -#if ENABLE(FTL_JIT) - // We shouldn't ever be generating wrappers for native functions. - RegisterSet toSave = registersToPreserve(); - ptrdiff_t offset = registerPreservationOffset(); - - AssemblyHelpers jit(&vm, 0); - - jit.preserveReturnAddressAfterCall(GPRInfo::regT1); - jit.load32( - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset), - GPRInfo::regT2); - - // Place the stack pointer where we want it to be. - jit.subPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister); - - // Compute the number of things we will be copying. - jit.add32( - AssemblyHelpers::TrustedImm32( - JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize), - GPRInfo::regT2); - - ASSERT(!toSave.get(GPRInfo::regT4)); - jit.move(AssemblyHelpers::stackPointerRegister, GPRInfo::regT4); - - AssemblyHelpers::Label loop = jit.label(); - jit.sub32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); - jit.load64(AssemblyHelpers::Address(GPRInfo::regT4, offset), GPRInfo::regT0); - jit.store64(GPRInfo::regT0, GPRInfo::regT4); - jit.addPtr(AssemblyHelpers::TrustedImm32(sizeof(Register)), GPRInfo::regT4); - jit.branchTest32(AssemblyHelpers::NonZero, GPRInfo::regT2).linkTo(loop, &jit); - - // At this point regT4 + offset points to where we save things. - ptrdiff_t currentOffset = 0; - jit.storePtr(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset)); - - for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) { - if (!toSave.get(gpr)) - continue; - currentOffset += sizeof(Register); - jit.store64(gpr, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset)); - } - for (FPRReg fpr = AssemblyHelpers::firstFPRegister(); fpr <= AssemblyHelpers::lastFPRegister(); fpr = static_cast<FPRReg>(fpr + 1)) { - if (!toSave.get(fpr)) - continue; - currentOffset += sizeof(Register); - jit.storeDouble(fpr, AssemblyHelpers::Address(GPRInfo::regT4, currentOffset)); - } - - // Assume that there aren't any saved FP registers. - - // Restore the tag registers. - jit.move(AssemblyHelpers::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); - jit.add64(AssemblyHelpers::TrustedImm32(TagMask - TagTypeNumber), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister); - - jit.move( - AssemblyHelpers::TrustedImmPtr( - vm.getCTIStub(registerRestorationThunkGenerator).code().executableAddress()), - GPRInfo::nonArgGPR0); - jit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR0); - AssemblyHelpers::Jump jump = jit.jump(); - - LinkBuffer linkBuffer(vm, jit, GLOBAL_THUNK_ID); - linkBuffer.link(jump, CodeLocationLabel(target)); - - if (Options::verboseFTLToJSThunk()) - dataLog("Need a thunk for calls from FTL to non-FTL version of ", *executable, "\n"); - - return FINALIZE_DFG_CODE(linkBuffer, ("Register preservation wrapper for %s/%s, %p", toCString(executable->hashFor(CodeForCall)).data(), toCString(executable->hashFor(CodeForConstruct)).data(), target.executableAddress())); -#else // ENABLE(FTL_JIT) - UNUSED_PARAM(vm); - UNUSED_PARAM(executable); - UNUSED_PARAM(target); - // We don't support non-FTL builds for two reasons: - // - It just so happens that currently only the FTL bottoms out in this code. - // - The code above uses 64-bit instructions. It doesn't necessarily have to; it would be - // easy to change it so that it doesn't. But obviously making that change would be a - // prerequisite to removing this #if. - UNREACHABLE_FOR_PLATFORM(); - return MacroAssemblerCodeRef(); -#endif // ENABLE(FTL_JIT) -} - -static void generateRegisterRestoration(AssemblyHelpers& jit) -{ -#if ENABLE(FTL_JIT) - RegisterSet toSave = registersToPreserve(); - ptrdiff_t offset = registerPreservationOffset(); - - ASSERT(!toSave.get(GPRInfo::regT4)); - - // We need to place the stack pointer back to where the caller thought they left it. - // But also, in order to recover the registers, we need to figure out how big the - // arguments area is. - - jit.load32( - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset), - GPRInfo::regT4); - - jit.move(GPRInfo::regT4, GPRInfo::regT2); - jit.lshift32(AssemblyHelpers::TrustedImm32(3), GPRInfo::regT2); - - jit.addPtr(AssemblyHelpers::TrustedImm32(offset), AssemblyHelpers::stackPointerRegister); - jit.addPtr(AssemblyHelpers::stackPointerRegister, GPRInfo::regT2); - - // We saved things at: - // - // adjSP + (JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize + NumArgs) * 8 - // - // Where: - // - // adjSP = origSP - offset - // - // regT2 now points at: - // - // origSP + NumArgs * 8 - // = adjSP + offset + NumArgs * 8 - // - // So if we subtract offset and then add JSStack::CallFrameHeaderSize and subtract - // JSStack::CallerFrameAndPCSize, we'll get the thing we want. - ptrdiff_t currentOffset = -offset + sizeof(Register) * ( - JSStack::CallFrameHeaderSize - JSStack::CallerFrameAndPCSize); - jit.loadPtr(AssemblyHelpers::Address(GPRInfo::regT2, currentOffset), GPRInfo::regT1); - - for (GPRReg gpr = AssemblyHelpers::firstRegister(); gpr <= AssemblyHelpers::lastRegister(); gpr = static_cast<GPRReg>(gpr + 1)) { - if (!toSave.get(gpr)) - continue; - currentOffset += sizeof(Register); - jit.load64(AssemblyHelpers::Address(GPRInfo::regT2, currentOffset), gpr); - } - for (FPRReg fpr = AssemblyHelpers::firstFPRegister(); fpr <= AssemblyHelpers::lastFPRegister(); fpr = static_cast<FPRReg>(fpr + 1)) { - if (!toSave.get(fpr)) - continue; - currentOffset += sizeof(Register); - jit.loadDouble(AssemblyHelpers::Address(GPRInfo::regT2, currentOffset), fpr); - } - - // Thunks like this rely on the ArgumentCount being intact. Pay it forward. - jit.store32( - GPRInfo::regT4, - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (JSStack::ArgumentCount - JSStack::CallerFrameAndPCSize) * sizeof(Register) + PayloadOffset)); - - if (!ASSERT_DISABLED) { - AssemblyHelpers::Jump ok = jit.branchPtr( - AssemblyHelpers::Above, GPRInfo::regT1, AssemblyHelpers::TrustedImmPtr(static_cast<size_t>(0x1000))); - jit.abortWithReason(RPWUnreasonableJumpTarget); - ok.link(&jit); - } - - jit.jump(GPRInfo::regT1); -#else // ENABLE(FTL_JIT) - UNUSED_PARAM(jit); - UNREACHABLE_FOR_PLATFORM(); -#endif // ENABLE(FTL_JIT) -} - -MacroAssemblerCodeRef registerRestorationThunkGenerator(VM* vm) -{ - AssemblyHelpers jit(vm, 0); - generateRegisterRestoration(jit); - LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(linkBuffer, ("Register restoration thunk")); -} - -} // namespace JSC - -#endif // ENABLE(JIT) - diff --git a/Source/JavaScriptCore/jit/RegisterPreservationWrapperGenerator.h b/Source/JavaScriptCore/jit/RegisterPreservationWrapperGenerator.h deleted file mode 100644 index 8ee1fc6af..000000000 --- a/Source/JavaScriptCore/jit/RegisterPreservationWrapperGenerator.h +++ /dev/null @@ -1,51 +0,0 @@ -/* - * Copyright (C) 2013 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef RegisterPreservationWrapperGenerator_h -#define RegisterPreservationWrapperGenerator_h - -#if ENABLE(JIT) - -#include "ArityCheckMode.h" -#include "AssemblyHelpers.h" -#include "MacroAssemblerCodeRef.h" -#include "VM.h" - -namespace JSC { - -RegisterSet registersToPreserve(); - -ptrdiff_t registerPreservationOffset(); - -MacroAssemblerCodeRef generateRegisterPreservationWrapper(VM&, ExecutableBase*, MacroAssemblerCodePtr target); - -MacroAssemblerCodeRef registerRestorationThunkGenerator(VM*); - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // RegisterPreservationWrapperGenerator_h - diff --git a/Source/JavaScriptCore/jit/RegisterSet.cpp b/Source/JavaScriptCore/jit/RegisterSet.cpp index 6302e261d..362ada0de 100644 --- a/Source/JavaScriptCore/jit/RegisterSet.cpp +++ b/Source/JavaScriptCore/jit/RegisterSet.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -30,92 +30,39 @@ #include "GPRInfo.h" #include "MacroAssembler.h" -#include "JSCInlines.h" -#include <wtf/CommaPrinter.h> namespace JSC { RegisterSet RegisterSet::stackRegisters() { - return RegisterSet( - MacroAssembler::stackPointerRegister, - MacroAssembler::framePointerRegister); -} - -RegisterSet RegisterSet::reservedHardwareRegisters() -{ -#if CPU(ARM64) - return RegisterSet(ARM64Registers::lr); -#else - return RegisterSet(); -#endif + RegisterSet result; + result.set(MacroAssembler::stackPointerRegister); + result.set(MacroAssembler::framePointerRegister); + return result; } -RegisterSet RegisterSet::runtimeRegisters() +RegisterSet RegisterSet::specialRegisters() { + RegisterSet result; + result.merge(stackRegisters()); + result.set(GPRInfo::callFrameRegister); #if USE(JSVALUE64) - return RegisterSet(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister); -#else - return RegisterSet(); + result.set(GPRInfo::tagTypeNumberRegister); + result.set(GPRInfo::tagMaskRegister); #endif -} - -RegisterSet RegisterSet::specialRegisters() -{ - return RegisterSet( - stackRegisters(), reservedHardwareRegisters(), runtimeRegisters()); + return result; } RegisterSet RegisterSet::calleeSaveRegisters() { RegisterSet result; -#if CPU(X86) - result.set(X86Registers::ebx); - result.set(X86Registers::ebp); - result.set(X86Registers::edi); - result.set(X86Registers::esi); -#elif CPU(X86_64) +#if CPU(X86_64) result.set(X86Registers::ebx); result.set(X86Registers::ebp); result.set(X86Registers::r12); result.set(X86Registers::r13); result.set(X86Registers::r14); result.set(X86Registers::r15); -#elif CPU(ARM_THUMB2) - result.set(ARMRegisters::r4); - result.set(ARMRegisters::r5); - result.set(ARMRegisters::r6); - result.set(ARMRegisters::r8); -#if !PLATFORM(IOS) - result.set(ARMRegisters::r9); -#endif - result.set(ARMRegisters::r10); - result.set(ARMRegisters::r11); -#elif CPU(ARM_TRADITIONAL) - result.set(ARMRegisters::r4); - result.set(ARMRegisters::r5); - result.set(ARMRegisters::r6); - result.set(ARMRegisters::r7); - result.set(ARMRegisters::r8); - result.set(ARMRegisters::r9); - result.set(ARMRegisters::r10); - result.set(ARMRegisters::r11); -#elif CPU(ARM64) - // We don't include LR in the set of callee-save registers even though it technically belongs - // there. This is because we use this set to describe the set of registers that need to be saved - // beyond what you would save by the platform-agnostic "preserve return address" and "restore - // return address" operations in CCallHelpers. - for ( - ARM64Registers::RegisterID reg = ARM64Registers::x19; - reg <= ARM64Registers::x28; - reg = static_cast<ARM64Registers::RegisterID>(reg + 1)) - result.set(reg); - result.set(ARM64Registers::fp); - for ( - ARM64Registers::FPRegisterID reg = ARM64Registers::q8; - reg <= ARM64Registers::q15; - reg = static_cast<ARM64Registers::FPRegisterID>(reg + 1)) - result.set(reg); #else UNREACHABLE_FOR_PLATFORM(); #endif @@ -146,29 +93,9 @@ RegisterSet RegisterSet::allRegisters() return result; } -size_t RegisterSet::numberOfSetGPRs() const -{ - RegisterSet temp = *this; - temp.filter(allGPRs()); - return temp.numberOfSetRegisters(); -} - -size_t RegisterSet::numberOfSetFPRs() const -{ - RegisterSet temp = *this; - temp.filter(allFPRs()); - return temp.numberOfSetRegisters(); -} - void RegisterSet::dump(PrintStream& out) const { - CommaPrinter comma; - out.print("["); - for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { - if (get(reg)) - out.print(comma, reg); - } - out.print("]"); + m_vector.dump(out); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/RegisterSet.h b/Source/JavaScriptCore/jit/RegisterSet.h index 44bfecd87..84ad226ad 100644 --- a/Source/JavaScriptCore/jit/RegisterSet.h +++ b/Source/JavaScriptCore/jit/RegisterSet.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,12 +26,13 @@ #ifndef RegisterSet_h #define RegisterSet_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "FPRInfo.h" #include "GPRInfo.h" #include "MacroAssembler.h" -#include "Reg.h" #include "TempRegisterSet.h" #include <wtf/BitVector.h> @@ -39,25 +40,18 @@ namespace JSC { class RegisterSet { public: - template<typename... Regs> - explicit RegisterSet(Regs... regs) - { - setMany(regs...); - } + RegisterSet() { } static RegisterSet stackRegisters(); - static RegisterSet reservedHardwareRegisters(); - static RegisterSet runtimeRegisters(); - static RegisterSet specialRegisters(); // The union of stack, reserved hardware, and runtime registers. + static RegisterSet specialRegisters(); static RegisterSet calleeSaveRegisters(); static RegisterSet allGPRs(); static RegisterSet allFPRs(); static RegisterSet allRegisters(); - - void set(Reg reg, bool value = true) + + void set(GPRReg reg, bool value = true) { - ASSERT(!!reg); - m_vector.set(reg.index(), value); + m_vector.set(MacroAssembler::registerIndex(reg), value); } void set(JSValueRegs regs) @@ -67,24 +61,29 @@ public: set(regs.payloadGPR()); } - void clear(Reg reg) + void clear(GPRReg reg) { - ASSERT(!!reg); set(reg, false); } - bool get(Reg reg) const + bool get(GPRReg reg) const { return m_vector.get(MacroAssembler::registerIndex(reg)); } + + void set(FPRReg reg, bool value = true) + { + m_vector.set(MacroAssembler::registerIndex(reg), value); + } + + void clear(FPRReg reg) { - ASSERT(!!reg); - return m_vector.get(reg.index()); + set(reg, false); } + bool get(FPRReg reg) const { return m_vector.get(MacroAssembler::registerIndex(reg)); } + void merge(const RegisterSet& other) { m_vector.merge(other.m_vector); } void filter(const RegisterSet& other) { m_vector.filter(other.m_vector); } void exclude(const RegisterSet& other) { m_vector.exclude(other.m_vector); } - size_t numberOfSetGPRs() const; - size_t numberOfSetFPRs() const; size_t numberOfSetRegisters() const { return m_vector.bitCount(); } void dump(PrintStream&) const; @@ -109,16 +108,6 @@ public: unsigned hash() const { return m_vector.hash(); } private: - void setAny(Reg reg) { set(reg); } - void setAny(const RegisterSet& set) { merge(set); } - void setMany() { } - template<typename RegType, typename... Regs> - void setMany(RegType reg, Regs... regs) - { - setAny(reg); - setMany(regs...); - } - BitVector m_vector; }; diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp index 762f39145..9c31722e8 100644 --- a/Source/JavaScriptCore/jit/Repatch.cpp +++ b/Source/JavaScriptCore/jit/Repatch.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011-2015 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,29 +28,21 @@ #if ENABLE(JIT) -#include "AccessorCallJITStubRoutine.h" -#include "BinarySwitch.h" #include "CCallHelpers.h" +#include "CallFrameInlines.h" #include "DFGOperations.h" #include "DFGSpeculativeJIT.h" #include "FTLThunks.h" #include "GCAwareJITStubRoutine.h" -#include "GetterSetter.h" -#include "JIT.h" -#include "JITInlines.h" #include "LinkBuffer.h" -#include "JSCInlines.h" -#include "PolymorphicGetByIdList.h" +#include "Operations.h" #include "PolymorphicPutByIdList.h" -#include "RegExpMatchesArray.h" #include "RepatchBuffer.h" #include "ScratchRegisterAllocator.h" #include "StackAlignment.h" #include "StructureRareDataInlines.h" #include "StructureStubClearingWatchpoint.h" #include "ThunkGenerators.h" -#include <wtf/CommaPrinter.h> -#include <wtf/ListDump.h> #include <wtf/StringPrintStream.h> namespace JSC { @@ -59,6 +51,11 @@ namespace JSC { // that would ordinarily have well-known values: // - tagTypeNumberRegister // - tagMaskRegister +// - callFrameRegister ** +// +// We currently only use the callFrameRegister for closure call patching, and we're not going to +// give the FTL closure call patching support until we switch to the C stack - but when we do that, +// callFrameRegister will disappear. static FunctionPtr readCallTarget(RepatchBuffer& repatchBuffer, CodeLocationCall call) { @@ -100,21 +97,19 @@ static void repatchCall(CodeBlock* codeblock, CodeLocationCall call, FunctionPtr repatchCall(repatchBuffer, call, newCalleeFunction); } -static void repatchByIdSelfAccess( - VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, - const Identifier& propertyName, PropertyOffset offset, const FunctionPtr &slowPathFunction, - bool compact) +static void repatchByIdSelfAccess(VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, const Identifier& propertyName, PropertyOffset offset, + const FunctionPtr &slowPathFunction, bool compact) { - if (structure->needImpurePropertyWatchpoint()) + if (structure->typeInfo().newImpurePropertyFiresWatchpoints()) vm.registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock)); - + RepatchBuffer repatchBuffer(codeBlock); // Only optimize once! repatchCall(repatchBuffer, stubInfo.callReturnLocation, slowPathFunction); // Patch the structure check & the offset of the load. - repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), bitwise_cast<int32_t>(structure->id())); + repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), structure); repatchBuffer.setLoadInstructionIsActive(stubInfo.callReturnLocation.convertibleLoadAtOffset(stubInfo.patch.deltaCallToStorageLoad), isOutOfLineOffset(offset)); #if USE(JSVALUE64) if (compact) @@ -132,41 +127,54 @@ static void repatchByIdSelfAccess( #endif } -static void checkObjectPropertyCondition( - const ObjectPropertyCondition& condition, CodeBlock* codeBlock, StructureStubInfo& stubInfo, +static void addStructureTransitionCheck( + JSCell* object, Structure* structure, CodeBlock* codeBlock, StructureStubInfo& stubInfo, MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR) { - if (condition.isWatchableAssumingImpurePropertyWatchpoint()) { - condition.object()->structure()->addTransitionWatchpoint( - stubInfo.addWatchpoint(codeBlock, condition)); + if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) { + structure->addTransitionWatchpoint(stubInfo.addWatchpoint(codeBlock)); +#if !ASSERT_DISABLED + // If we execute this code, the object must have the structure we expect. Assert + // this in debug modes. + jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR); + MacroAssembler::Jump ok = jit.branchPtr( + MacroAssembler::Equal, + MacroAssembler::Address(scratchGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(structure)); + jit.breakpoint(); + ok.link(&jit); +#endif return; } - - Structure* structure = condition.object()->structure(); - RELEASE_ASSERT(condition.structureEnsuresValidityAssumingImpurePropertyWatchpoint(structure)); - jit.move(MacroAssembler::TrustedImmPtr(condition.object()), scratchGPR); + + jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR); failureCases.append( - branchStructure( - jit, MacroAssembler::NotEqual, - MacroAssembler::Address(scratchGPR, JSCell::structureIDOffset()), structure)); + jit.branchPtr( + MacroAssembler::NotEqual, + MacroAssembler::Address(scratchGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(structure))); } -static void checkObjectPropertyConditions( - const ObjectPropertyConditionSet& set, CodeBlock* codeBlock, StructureStubInfo& stubInfo, +static void addStructureTransitionCheck( + JSValue prototype, CodeBlock* codeBlock, StructureStubInfo& stubInfo, MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR) { - for (const ObjectPropertyCondition& condition : set) { - checkObjectPropertyCondition( - condition, codeBlock, stubInfo, jit, failureCases, scratchGPR); - } + if (prototype.isNull()) + return; + + ASSERT(prototype.isCell()); + + addStructureTransitionCheck( + prototype.asCell(), prototype.asCell()->structure(), codeBlock, stubInfo, jit, + failureCases, scratchGPR); } static void replaceWithJump(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target) { - if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) { + if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) { repatchBuffer.replaceWithJump( - RepatchBuffer::startOfPatchableBranch32WithPatchOnAddress( - stubInfo.callReturnLocation.dataLabel32AtOffset( + RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress( + stubInfo.callReturnLocation.dataLabelPtrAtOffset( -(intptr_t)stubInfo.patch.deltaCheckImmToCall)), CodeLocationLabel(target)); return; @@ -215,554 +223,309 @@ static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratc linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase)); } -enum ByIdStubKind { - GetValue, - GetUndefined, - CallGetter, - CallCustomGetter, - CallSetter, - CallCustomSetter +enum ProtoChainGenerationResult { + ProtoChainGenerationFailed, + ProtoChainGenerationSucceeded }; -static const char* toString(ByIdStubKind kind) -{ - switch (kind) { - case GetValue: - return "GetValue"; - case GetUndefined: - return "GetUndefined"; - case CallGetter: - return "CallGetter"; - case CallCustomGetter: - return "CallCustomGetter"; - case CallSetter: - return "CallSetter"; - case CallCustomSetter: - return "CallCustomSetter"; - default: - RELEASE_ASSERT_NOT_REACHED(); - return nullptr; - } -} - -static ByIdStubKind kindFor(const PropertySlot& slot) -{ - if (slot.isCacheableValue()) - return GetValue; - if (slot.isUnset()) - return GetUndefined; - if (slot.isCacheableCustom()) - return CallCustomGetter; - RELEASE_ASSERT(slot.isCacheableGetter()); - return CallGetter; -} - -static FunctionPtr customFor(const PropertySlot& slot) -{ - if (!slot.isCacheableCustom()) - return FunctionPtr(); - return FunctionPtr(slot.customGetter()); -} - -static ByIdStubKind kindFor(const PutPropertySlot& slot) -{ - RELEASE_ASSERT(!slot.isCacheablePut()); - if (slot.isCacheableSetter()) - return CallSetter; - RELEASE_ASSERT(slot.isCacheableCustom()); - return CallCustomSetter; -} - -static FunctionPtr customFor(const PutPropertySlot& slot) -{ - if (!slot.isCacheableCustom()) - return FunctionPtr(); - return FunctionPtr(slot.customSetter()); -} - -static bool generateByIdStub( - ExecState* exec, ByIdStubKind kind, const Identifier& propertyName, - FunctionPtr custom, StructureStubInfo& stubInfo, const ObjectPropertyConditionSet& conditionSet, - JSObject* alternateBase, PropertyOffset offset, Structure* structure, bool loadTargetFromProxy, - WatchpointSet* watchpointSet, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, - RefPtr<JITStubRoutine>& stubRoutine) +static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState*, const PropertySlot&, const Identifier&, StructureStubInfo&, StructureChain*, size_t, PropertyOffset, Structure*, CodeLocationLabel, CodeLocationLabel, RefPtr<JITStubRoutine>&) WARN_UNUSED_RETURN; +static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState* exec, const PropertySlot& slot, const Identifier& propertyName, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, PropertyOffset offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, RefPtr<JITStubRoutine>& stubRoutine) { - ASSERT(conditionSet.structuresEnsureValidityAssumingImpurePropertyWatchpoint()); - VM* vm = &exec->vm(); GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR); - JSValueRegs valueRegs = JSValueRegs( #if USE(JSVALUE32_64) - static_cast<GPRReg>(stubInfo.patch.valueTagGPR), + GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR); #endif - static_cast<GPRReg>(stubInfo.patch.valueGPR)); + GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR); GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR(); bool needToRestoreScratch = scratchGPR == InvalidGPRReg; - RELEASE_ASSERT(!needToRestoreScratch || (kind == GetValue || kind == GetUndefined)); + if (needToRestoreScratch && !slot.isCacheableValue()) + return ProtoChainGenerationFailed; CCallHelpers stubJit(&exec->vm(), exec->codeBlock()); if (needToRestoreScratch) { - scratchGPR = AssemblyHelpers::selectScratchGPR( - baseGPR, valueRegs.tagGPR(), valueRegs.payloadGPR()); +#if USE(JSVALUE64) + scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR); +#else + scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR); +#endif stubJit.pushToSave(scratchGPR); needToRestoreScratch = true; } MacroAssembler::JumpList failureCases; - - GPRReg baseForGetGPR; - if (loadTargetFromProxy) { - baseForGetGPR = valueRegs.payloadGPR(); - failureCases.append(stubJit.branch8( - MacroAssembler::NotEqual, - MacroAssembler::Address(baseGPR, JSCell::typeInfoTypeOffset()), - MacroAssembler::TrustedImm32(PureForwardingProxyType))); - - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSProxy::targetOffset()), scratchGPR); - - failureCases.append(branchStructure(stubJit, - MacroAssembler::NotEqual, - MacroAssembler::Address(scratchGPR, JSCell::structureIDOffset()), - structure)); - } else { - baseForGetGPR = baseGPR; - - failureCases.append(branchStructure(stubJit, - MacroAssembler::NotEqual, - MacroAssembler::Address(baseForGetGPR, JSCell::structureIDOffset()), - structure)); - } + + failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure))); CodeBlock* codeBlock = exec->codeBlock(); - if (structure->needImpurePropertyWatchpoint() || conditionSet.needImpurePropertyWatchpoint()) + if (structure->typeInfo().newImpurePropertyFiresWatchpoints()) vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock)); - if (watchpointSet) - watchpointSet->add(stubInfo.addWatchpoint(codeBlock)); - - checkObjectPropertyConditions( - conditionSet, codeBlock, stubInfo, stubJit, failureCases, scratchGPR); - - if (isValidOffset(offset)) { - Structure* currStructure; - if (conditionSet.isEmpty()) - currStructure = structure; - else - currStructure = conditionSet.slotBaseCondition().object()->structure(); - currStructure->startWatchingPropertyForReplacements(*vm, offset); + Structure* currStructure = structure; + WriteBarrier<Structure>* it = chain->head(); + JSObject* protoObject = 0; + for (unsigned i = 0; i < count; ++i, ++it) { + protoObject = asObject(currStructure->prototypeForLookup(exec)); + Structure* protoStructure = protoObject->structure(); + if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints()) + vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock)); + addStructureTransitionCheck( + protoObject, protoStructure, codeBlock, stubInfo, stubJit, + failureCases, scratchGPR); + currStructure = it->get(); } - GPRReg baseForAccessGPR = InvalidGPRReg; - if (kind != GetUndefined) { - if (!conditionSet.isEmpty()) { - // We could have clobbered scratchGPR earlier, so we have to reload from baseGPR to get the target. - if (loadTargetFromProxy) - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSProxy::targetOffset()), baseForGetGPR); - stubJit.move(MacroAssembler::TrustedImmPtr(alternateBase), scratchGPR); - baseForAccessGPR = scratchGPR; - } else { - // For proxy objects, we need to do all the Structure checks before moving the baseGPR into - // baseForGetGPR because if we fail any of the checks then we would have the wrong value in baseGPR - // on the slow path. - if (loadTargetFromProxy) - stubJit.move(scratchGPR, baseForGetGPR); - baseForAccessGPR = baseForGetGPR; - } - } + bool isAccessor = slot.isCacheableGetter() || slot.isCacheableCustom(); + if (isAccessor) + stubJit.move(baseGPR, scratchGPR); - GPRReg loadedValueGPR = InvalidGPRReg; - if (kind == GetUndefined) - stubJit.moveTrustedValue(jsUndefined(), valueRegs); - else if (kind != CallCustomGetter && kind != CallCustomSetter) { - if (kind == GetValue) - loadedValueGPR = valueRegs.payloadGPR(); - else - loadedValueGPR = scratchGPR; - - GPRReg storageGPR; - if (isInlineOffset(offset)) - storageGPR = baseForAccessGPR; - else { - stubJit.loadPtr(MacroAssembler::Address(baseForAccessGPR, JSObject::butterflyOffset()), loadedValueGPR); - storageGPR = loadedValueGPR; - } - + if (!slot.isCacheableCustom()) { + if (isInlineOffset(offset)) { #if USE(JSVALUE64) - stubJit.load64(MacroAssembler::Address(storageGPR, offsetRelativeToBase(offset)), loadedValueGPR); -#else - if (kind == GetValue) - stubJit.load32(MacroAssembler::Address(storageGPR, offsetRelativeToBase(offset) + TagOffset), valueRegs.tagGPR()); - stubJit.load32(MacroAssembler::Address(storageGPR, offsetRelativeToBase(offset) + PayloadOffset), loadedValueGPR); + stubJit.load64(protoObject->locationForOffset(offset), resultGPR); +#elif USE(JSVALUE32_64) + stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); #endif + } else { + stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR); +#if USE(JSVALUE64) + stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR); +#elif USE(JSVALUE32_64) + stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); +#endif + } } - - // Stuff for custom getters. MacroAssembler::Call operationCall; MacroAssembler::Call handlerCall; - - // Stuff for JS getters. - MacroAssembler::DataLabelPtr addressOfLinkFunctionCheck; - MacroAssembler::Call fastPathCall; - MacroAssembler::Call slowPathCall; - std::unique_ptr<CallLinkInfo> callLinkInfo; - + FunctionPtr operationFunction; MacroAssembler::Jump success, fail; - if (kind != GetValue && kind != GetUndefined) { + if (isAccessor) { + GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister); + if (slot.isCacheableGetter()) { + stubJit.setupArguments(callFrameRegister, scratchGPR, resultGPR); + operationFunction = operationCallGetter; + } else { + stubJit.move(MacroAssembler::TrustedImmPtr(protoObject), scratchGPR); + stubJit.setupArguments(callFrameRegister, scratchGPR, + MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()), + MacroAssembler::TrustedImmPtr(propertyName.impl())); + operationFunction = operationCallCustomGetter; + } + // Need to make sure that whenever this call is made in the future, we remember the // place that we made it from. It just so happens to be the place that we are at // right now! stubJit.store32(MacroAssembler::TrustedImm32(exec->locationAsRawBits()), CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); - if (kind == CallGetter || kind == CallSetter) { - // Create a JS call using a JS call inline cache. Assume that: - // - // - SP is aligned and represents the extent of the calling compiler's stack usage. - // - // - FP is set correctly (i.e. it points to the caller's call frame header). - // - // - SP - FP is an aligned difference. - // - // - Any byte between FP (exclusive) and SP (inclusive) could be live in the calling - // code. - // - // Therefore, we temporarily grow the stack for the purpose of the call and then - // shrink it after. - - callLinkInfo = std::make_unique<CallLinkInfo>(); - callLinkInfo->setUpCall(CallLinkInfo::Call, stubInfo.codeOrigin, loadedValueGPR); - - MacroAssembler::JumpList done; - - // There is a 'this' argument but nothing else. - unsigned numberOfParameters = 1; - // ... unless we're calling a setter. - if (kind == CallSetter) - numberOfParameters++; - - // Get the accessor; if there ain't one then the result is jsUndefined(). - if (kind == CallSetter) { - stubJit.loadPtr( - MacroAssembler::Address(loadedValueGPR, GetterSetter::offsetOfSetter()), - loadedValueGPR); - } else { - stubJit.loadPtr( - MacroAssembler::Address(loadedValueGPR, GetterSetter::offsetOfGetter()), - loadedValueGPR); - } - MacroAssembler::Jump returnUndefined = stubJit.branchTestPtr( - MacroAssembler::Zero, loadedValueGPR); - - unsigned numberOfRegsForCall = - JSStack::CallFrameHeaderSize + numberOfParameters; - - unsigned numberOfBytesForCall = - numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); - - unsigned alignedNumberOfBytesForCall = - WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); - - stubJit.subPtr( - MacroAssembler::TrustedImm32(alignedNumberOfBytesForCall), - MacroAssembler::stackPointerRegister); - - MacroAssembler::Address calleeFrame = MacroAssembler::Address( - MacroAssembler::stackPointerRegister, - -static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))); - - stubJit.store32( - MacroAssembler::TrustedImm32(numberOfParameters), - calleeFrame.withOffset( - JSStack::ArgumentCount * sizeof(Register) + PayloadOffset)); - - stubJit.storeCell( - loadedValueGPR, calleeFrame.withOffset(JSStack::Callee * sizeof(Register))); - - stubJit.storeCell( - baseForGetGPR, - calleeFrame.withOffset( - virtualRegisterForArgument(0).offset() * sizeof(Register))); - - if (kind == CallSetter) { - stubJit.storeValue( - valueRegs, - calleeFrame.withOffset( - virtualRegisterForArgument(1).offset() * sizeof(Register))); - } - - MacroAssembler::Jump slowCase = stubJit.branchPtrWithPatch( - MacroAssembler::NotEqual, loadedValueGPR, addressOfLinkFunctionCheck, - MacroAssembler::TrustedImmPtr(0)); - - fastPathCall = stubJit.nearCall(); - - stubJit.addPtr( - MacroAssembler::TrustedImm32(alignedNumberOfBytesForCall), - MacroAssembler::stackPointerRegister); - if (kind == CallGetter) - stubJit.setupResults(valueRegs); - - done.append(stubJit.jump()); - slowCase.link(&stubJit); - - stubJit.move(loadedValueGPR, GPRInfo::regT0); -#if USE(JSVALUE32_64) - stubJit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), GPRInfo::regT1); -#endif - stubJit.move(MacroAssembler::TrustedImmPtr(callLinkInfo.get()), GPRInfo::regT2); - slowPathCall = stubJit.nearCall(); - - stubJit.addPtr( - MacroAssembler::TrustedImm32(alignedNumberOfBytesForCall), - MacroAssembler::stackPointerRegister); - if (kind == CallGetter) - stubJit.setupResults(valueRegs); - - done.append(stubJit.jump()); - returnUndefined.link(&stubJit); - - if (kind == CallGetter) - stubJit.moveTrustedValue(jsUndefined(), valueRegs); - - done.link(&stubJit); - } else { - // getter: EncodedJSValue (*GetValueFunc)(ExecState*, JSObject* slotBase, EncodedJSValue thisValue, PropertyName); - // setter: void (*PutValueFunc)(ExecState*, JSObject* base, EncodedJSValue thisObject, EncodedJSValue value); + operationCall = stubJit.call(); #if USE(JSVALUE64) - if (kind == CallCustomGetter) - stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, MacroAssembler::TrustedImmPtr(propertyName.impl())); - else - stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, valueRegs.gpr()); + stubJit.move(GPRInfo::returnValueGPR, resultGPR); #else - if (kind == CallCustomGetter) - stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, MacroAssembler::TrustedImm32(JSValue::CellTag), MacroAssembler::TrustedImmPtr(propertyName.impl())); - else - stubJit.setupArgumentsWithExecState(baseForAccessGPR, baseForGetGPR, MacroAssembler::TrustedImm32(JSValue::CellTag), valueRegs.payloadGPR(), valueRegs.tagGPR()); + stubJit.setupResults(resultGPR, resultTagGPR); #endif - stubJit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame); + MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck); - operationCall = stubJit.call(); - if (kind == CallCustomGetter) - stubJit.setupResults(valueRegs); - MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck); - - stubJit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister); - handlerCall = stubJit.call(); - stubJit.jumpToExceptionHandler(); - - noException.link(&stubJit); - } + stubJit.setupArgumentsExecState(); + handlerCall = stubJit.call(); + stubJit.jumpToExceptionHandler(); + + noException.link(&stubJit); } emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases); - LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail); - if (patchBuffer.didFailToAllocate()) - return false; + LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock()); linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel); - if (kind == CallCustomGetter || kind == CallCustomSetter) { - patchBuffer.link(operationCall, custom); + if (isAccessor) { + patchBuffer.link(operationCall, operationFunction); patchBuffer.link(handlerCall, lookupExceptionHandler); - } else if (kind == CallGetter || kind == CallSetter) { - callLinkInfo->setCallLocations(patchBuffer.locationOfNearCall(slowPathCall), - patchBuffer.locationOf(addressOfLinkFunctionCheck), - patchBuffer.locationOfNearCall(fastPathCall)); - - patchBuffer.link( - slowPathCall, CodeLocationLabel(vm->getCTIStub(linkCallThunkGenerator).code())); - } - - MacroAssemblerCodeRef code = FINALIZE_CODE_FOR( - exec->codeBlock(), patchBuffer, - ("%s access stub for %s, return point %p", - toString(kind), toCString(*exec->codeBlock()).data(), - successLabel.executableAddress())); - - if (kind == CallGetter || kind == CallSetter) - stubRoutine = adoptRef(new AccessorCallJITStubRoutine(code, *vm, WTF::move(callLinkInfo))); - else - stubRoutine = createJITStubRoutine(code, *vm, codeBlock->ownerExecutable(), true); - - return true; -} - -enum InlineCacheAction { - GiveUpOnCache, - RetryCacheLater, - AttemptToCache -}; - -static InlineCacheAction actionForCell(VM& vm, JSCell* cell) -{ - Structure* structure = cell->structure(vm); - - TypeInfo typeInfo = structure->typeInfo(); - if (typeInfo.prohibitsPropertyCaching()) - return GiveUpOnCache; - - if (structure->isUncacheableDictionary()) { - if (structure->hasBeenFlattenedBefore()) - return GiveUpOnCache; - // Flattening could have changed the offset, so return early for another try. - asObject(cell)->flattenDictionaryObject(vm); - return RetryCacheLater; } - - if (!structure->propertyAccessesAreCacheable()) - return GiveUpOnCache; - - return AttemptToCache; + stubRoutine = FINALIZE_CODE_FOR_DFG_STUB( + patchBuffer, + ("DFG prototype chain access stub for %s, return point %p", + toCString(*exec->codeBlock()).data(), successLabel.executableAddress())); + return ProtoChainGenerationSucceeded; } -static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo) +static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo) { - if (Options::forceICFailure()) - return GiveUpOnCache; - // FIXME: Write a test that proves we need to check for recursion here just // like the interpreter does, then add a check for recursion. CodeBlock* codeBlock = exec->codeBlock(); VM* vm = &exec->vm(); - - if ((isJSArray(baseValue) || isJSString(baseValue)) && propertyName == exec->propertyNames().length) { + + if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) { GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR); #if USE(JSVALUE32_64) GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR); #endif GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR); - + GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR(); + bool needToRestoreScratch = false; + MacroAssembler stubJit; - - if (isJSArray(baseValue)) { - GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR(); - bool needToRestoreScratch = false; - - if (scratchGPR == InvalidGPRReg) { + + if (scratchGPR == InvalidGPRReg) { #if USE(JSVALUE64) - scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR); + scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR); #else - scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR); -#endif - stubJit.pushToSave(scratchGPR); - needToRestoreScratch = true; - } - - MacroAssembler::JumpList failureCases; - - stubJit.load8(MacroAssembler::Address(baseGPR, JSCell::indexingTypeOffset()), scratchGPR); - failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray))); - failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask))); - - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); - stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR); - failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0))); - - stubJit.move(scratchGPR, resultGPR); -#if USE(JSVALUE64) - stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR); -#elif USE(JSVALUE32_64) - stubJit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), resultTagGPR); + scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR); #endif - - MacroAssembler::Jump success, fail; - - emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases); - - LinkBuffer patchBuffer(*vm, stubJit, codeBlock, JITCompilationCanFail); - if (patchBuffer.didFailToAllocate()) - return GiveUpOnCache; - - linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases); - - stubInfo.stubRoutine = FINALIZE_CODE_FOR_STUB( - exec->codeBlock(), patchBuffer, - ("GetById array length stub for %s, return point %p", - toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset( - stubInfo.patch.deltaCallToDone).executableAddress())); - - RepatchBuffer repatchBuffer(codeBlock); - replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code()); - repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById); - - return RetryCacheLater; + stubJit.pushToSave(scratchGPR); + needToRestoreScratch = true; } + + MacroAssembler::JumpList failureCases; + + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSCell::structureOffset()), scratchGPR); + stubJit.load8(MacroAssembler::Address(scratchGPR, Structure::indexingTypeOffset()), scratchGPR); + failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray))); + failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask))); + + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); + stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR); + failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0))); - // String.length case - MacroAssembler::Jump failure = stubJit.branch8(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::typeInfoTypeOffset()), MacroAssembler::TrustedImm32(StringType)); - - stubJit.load32(MacroAssembler::Address(baseGPR, JSString::offsetOfLength()), resultGPR); - + stubJit.move(scratchGPR, resultGPR); #if USE(JSVALUE64) stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR); #elif USE(JSVALUE32_64) - stubJit.move(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), resultTagGPR); + stubJit.move(AssemblyHelpers::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag #endif - MacroAssembler::Jump success = stubJit.jump(); - - LinkBuffer patchBuffer(*vm, stubJit, codeBlock, JITCompilationCanFail); - if (patchBuffer.didFailToAllocate()) - return GiveUpOnCache; + MacroAssembler::Jump success, fail; - patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone)); - patchBuffer.link(failure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase)); - - stubInfo.stubRoutine = FINALIZE_CODE_FOR_STUB( - exec->codeBlock(), patchBuffer, - ("GetById string length stub for %s, return point %p", + emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases); + + LinkBuffer patchBuffer(*vm, &stubJit, codeBlock); + + linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases); + + stubInfo.stubRoutine = FINALIZE_CODE_FOR_DFG_STUB( + patchBuffer, + ("DFG GetById array length stub for %s, return point %p", toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset( stubInfo.patch.deltaCallToDone).executableAddress())); - + RepatchBuffer repatchBuffer(codeBlock); replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code()); repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById); - - return RetryCacheLater; + + return true; } + + // FIXME: should support length access for String. // FIXME: Cache property access for immediates. if (!baseValue.isCell()) - return GiveUpOnCache; - - if (!slot.isCacheable() && !slot.isUnset()) - return GiveUpOnCache; - + return false; JSCell* baseCell = baseValue.asCell(); - Structure* structure = baseCell->structure(*vm); - - InlineCacheAction action = actionForCell(*vm, baseCell); - if (action != AttemptToCache) - return action; + Structure* structure = baseCell->structure(); + if (!slot.isCacheable()) + return false; + if (!structure->propertyAccessesAreCacheable()) + return false; // Optimize self access. - if (slot.isCacheableValue() - && slot.slotBase() == baseValue - && !slot.watchpointSet() - && MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) { - structure->startWatchingPropertyForReplacements(*vm, slot.cachedOffset()); + if (slot.slotBase() == baseValue) { + if (!slot.isCacheableValue() + || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) { + repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList); + return true; + } + repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, propertyName, slot.cachedOffset(), operationGetByIdBuildList, true); stubInfo.initGetByIdSelf(*vm, codeBlock->ownerExecutable(), structure); - return RetryCacheLater; + return true; + } + + if (structure->isDictionary()) + return false; + + if (!stubInfo.patch.registersFlushed) { + // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular, + // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus, + // if registers were not flushed, don't do non-Value caching. + if (!slot.isCacheableValue()) + return false; } + + PropertyOffset offset = slot.cachedOffset(); + size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), propertyName, offset); + if (count == InvalidPrototypeChain) + return false; - repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList); - return RetryCacheLater; + StructureChain* prototypeChain = structure->prototypeChain(exec); + if (generateProtoChainAccessStub(exec, slot, propertyName, stubInfo, prototypeChain, count, offset, + structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), stubInfo.stubRoutine) == ProtoChainGenerationFailed) + return false; + + RepatchBuffer repatchBuffer(codeBlock); + replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code()); + repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdBuildList); + + stubInfo.initGetByIdChain(*vm, codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.isCacheableValue()); + return true; } void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo) { GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap); - if (tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo) == GiveUpOnCache) + bool cached = tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo); + if (!cached) repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById); } +static bool getPolymorphicStructureList( + VM* vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, + PolymorphicAccessStructureList*& polymorphicStructureList, int& listIndex, + CodeLocationLabel& slowCase) +{ + slowCase = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase); + + if (stubInfo.accessType == access_unset) { + RELEASE_ASSERT(!stubInfo.stubRoutine); + polymorphicStructureList = new PolymorphicAccessStructureList(); + stubInfo.initGetByIdSelfList(polymorphicStructureList, 0, false); + listIndex = 0; + } else if (stubInfo.accessType == access_get_by_id_self) { + RELEASE_ASSERT(!stubInfo.stubRoutine); + polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), JITStubRoutine::createSelfManagedRoutine(slowCase), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true); + stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, true); + listIndex = 1; + } else if (stubInfo.accessType == access_get_by_id_chain) { + RELEASE_ASSERT(!!stubInfo.stubRoutine); + slowCase = CodeLocationLabel(stubInfo.stubRoutine->code().code()); + polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true); + stubInfo.stubRoutine.clear(); + stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, false); + listIndex = 1; + } else { + RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list); + polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList; + listIndex = stubInfo.u.getByIdSelfList.listSize; + slowCase = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code()); + } + + if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE) + return false; + + RELEASE_ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE); + return true; +} + static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JITStubRoutine* stubRoutine) { - RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_list); + RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list); RepatchBuffer repatchBuffer(codeBlock); - if (stubInfo.u.getByIdList.list->didSelfPatching()) { + if (stubInfo.u.getByIdSelfList.didSelfPatching) { repatchBuffer.relink( stubInfo.callReturnLocation.jumpAtOffset( stubInfo.patch.deltaCallToJump), @@ -773,95 +536,205 @@ static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stub replaceWithJump(repatchBuffer, stubInfo, stubRoutine->code().code()); } -static InlineCacheAction tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo) +static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo) { if (!baseValue.isCell() - || (!slot.isCacheable() && !slot.isUnset())) - return GiveUpOnCache; - - JSCell* baseCell = baseValue.asCell(); - bool loadTargetFromProxy = false; - if (baseCell->type() == PureForwardingProxyType) { - baseValue = jsCast<JSProxy*>(baseCell)->target(); - baseCell = baseValue.asCell(); - loadTargetFromProxy = true; - } + || !slot.isCacheable() + || !baseValue.asCell()->structure()->propertyAccessesAreCacheable()) + return false; - VM* vm = &exec->vm(); CodeBlock* codeBlock = exec->codeBlock(); + VM* vm = &exec->vm(); + JSCell* baseCell = baseValue.asCell(); + Structure* structure = baseCell->structure(); + + if (slot.slotBase() == baseValue) { + if (!stubInfo.patch.registersFlushed) { + // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular, + // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus, + // if registers were not flushed, don't do non-Value caching. + if (!slot.isCacheableValue()) + return false; + } + + PolymorphicAccessStructureList* polymorphicStructureList; + int listIndex; + CodeLocationLabel slowCase; - InlineCacheAction action = actionForCell(*vm, baseCell); - if (action != AttemptToCache) - return action; - - Structure* structure = baseCell->structure(*vm); - TypeInfo typeInfo = structure->typeInfo(); + if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase)) + return false; + + stubInfo.u.getByIdSelfList.listSize++; + + GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister); + GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR); +#if USE(JSVALUE32_64) + GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR); +#endif + GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR); + GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR(); + + CCallHelpers stubJit(vm, codeBlock); + + MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure)); + + // The strategy we use for stubs is as follows: + // 1) Call DFG helper that calls the getter. + // 2) Check if there was an exception, and if there was, call yet another + // helper. + + bool isDirect = false; + MacroAssembler::Call operationCall; + MacroAssembler::Call handlerCall; + FunctionPtr operationFunction; + MacroAssembler::Jump success; + + if (slot.isCacheableGetter() || slot.isCacheableCustom()) { + if (slot.isCacheableGetter()) { + ASSERT(scratchGPR != InvalidGPRReg); + ASSERT(baseGPR != scratchGPR); + if (isInlineOffset(slot.cachedOffset())) { +#if USE(JSVALUE64) + stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); +#else + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); +#endif + } else { + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); +#if USE(JSVALUE64) + stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); +#else + stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); +#endif + } + stubJit.setupArguments(callFrameRegister, baseGPR, scratchGPR); + operationFunction = operationCallGetter; + } else { + stubJit.setupArguments( + callFrameRegister, baseGPR, + MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()), + MacroAssembler::TrustedImmPtr(ident.impl())); + operationFunction = operationCallCustomGetter; + } + + // Need to make sure that whenever this call is made in the future, we remember the + // place that we made it from. It just so happens to be the place that we are at + // right now! + stubJit.store32( + MacroAssembler::TrustedImm32(exec->locationAsRawBits()), + CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount))); + + operationCall = stubJit.call(); +#if USE(JSVALUE64) + stubJit.move(GPRInfo::returnValueGPR, resultGPR); +#else + stubJit.setupResults(resultGPR, resultTagGPR); +#endif + success = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck); + + stubJit.setupArgumentsExecState(); + handlerCall = stubJit.call(); + stubJit.jumpToExceptionHandler(); + } else { + if (isInlineOffset(slot.cachedOffset())) { +#if USE(JSVALUE64) + stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR); +#else + if (baseGPR == resultTagGPR) { + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + } else { + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); + } +#endif + } else { + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR); +#if USE(JSVALUE64) + stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR); +#else + stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); +#endif + } + success = stubJit.jump(); + isDirect = true; + } - if (stubInfo.patch.spillMode == NeedToSpill) { + LinkBuffer patchBuffer(*vm, &stubJit, codeBlock); + + patchBuffer.link(wrongStruct, slowCase); + patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone)); + if (!isDirect) { + patchBuffer.link(operationCall, operationFunction); + patchBuffer.link(handlerCall, lookupExceptionHandler); + } + + RefPtr<JITStubRoutine> stubRoutine = + createJITStubRoutine( + FINALIZE_DFG_CODE( + patchBuffer, + ("DFG GetById polymorphic list access for %s, return point %p", + toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset( + stubInfo.patch.deltaCallToDone).executableAddress())), + *vm, + codeBlock->ownerExecutable(), + slot.isCacheableGetter() || slot.isCacheableCustom()); + + polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect); + + patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get()); + return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1); + } + + if (baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching() + || baseValue.asCell()->structure()->isDictionary()) + return false; + + if (!stubInfo.patch.registersFlushed) { // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular, // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus, // if registers were not flushed, don't do non-Value caching. - if (!slot.isCacheableValue() && !slot.isUnset()) - return GiveUpOnCache; + if (!slot.isCacheableValue()) + return false; } - - PropertyOffset offset = slot.isUnset() ? invalidOffset : slot.cachedOffset(); - ObjectPropertyConditionSet conditionSet; - if (slot.isUnset() || slot.slotBase() != baseValue) { - if (typeInfo.prohibitsPropertyCaching() || structure->isDictionary()) - return GiveUpOnCache; - - if (slot.isUnset()) - conditionSet = generateConditionsForPropertyMiss(*vm, codeBlock->ownerExecutable(), exec, structure, ident.impl()); - else - conditionSet = generateConditionsForPrototypePropertyHit(*vm, codeBlock->ownerExecutable(), exec, structure, slot.slotBase(), ident.impl()); - if (!conditionSet.isValid()) - return GiveUpOnCache; + PropertyOffset offset = slot.cachedOffset(); + size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), ident, offset); + if (count == InvalidPrototypeChain) + return false; - offset = slot.isUnset() ? invalidOffset : conditionSet.slotBaseCondition().offset(); - } + StructureChain* prototypeChain = structure->prototypeChain(exec); - PolymorphicGetByIdList* list = PolymorphicGetByIdList::from(stubInfo); - if (list->isFull()) { - // We need this extra check because of recursion. - return GiveUpOnCache; - } + PolymorphicAccessStructureList* polymorphicStructureList; + int listIndex; + CodeLocationLabel slowCase; + if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase)) + return false; + + stubInfo.u.getByIdProtoList.listSize++; RefPtr<JITStubRoutine> stubRoutine; - bool result = generateByIdStub( - exec, kindFor(slot), ident, customFor(slot), stubInfo, conditionSet, slot.slotBase(), offset, - structure, loadTargetFromProxy, slot.watchpointSet(), + + if (generateProtoChainAccessStub(exec, slot, ident, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), - CodeLocationLabel(list->currentSlowPathTarget(stubInfo)), stubRoutine); - if (!result) - return GiveUpOnCache; - - GetByIdAccess::AccessType accessType; - if (slot.isCacheableValue()) - accessType = slot.watchpointSet() ? GetByIdAccess::WatchedStub : GetByIdAccess::SimpleStub; - else if (slot.isUnset()) - accessType = GetByIdAccess::SimpleMiss; - else if (slot.isCacheableGetter()) - accessType = GetByIdAccess::Getter; - else - accessType = GetByIdAccess::CustomGetter; + slowCase, stubRoutine) == ProtoChainGenerationFailed) + return false; - list->addAccess(GetByIdAccess( - *vm, codeBlock->ownerExecutable(), accessType, stubRoutine, structure, - conditionSet)); + polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, slot.isCacheableValue()); patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get()); - return list->isFull() ? GiveUpOnCache : RetryCacheLater; + return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1); } void buildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo) { GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap); - if (tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo) == GiveUpOnCache) + bool dontChangeCall = tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo); + if (!dontChangeCall) repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById); } @@ -889,16 +762,79 @@ static V_JITOperation_ESsiJJI appropriateListBuildingPutByIdFunction(const PutPr return operationPutByIdNonStrictBuildList; } -static bool emitPutReplaceStub( +#if ENABLE(GGC) +static MacroAssembler::Call storeToWriteBarrierBuffer(CCallHelpers& jit, GPRReg cell, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator) +{ + ASSERT(scratch1 != scratch2); + WriteBarrierBuffer* writeBarrierBuffer = &jit.vm()->heap.writeBarrierBuffer(); + jit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer), scratch1); + jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2); + MacroAssembler::Jump needToFlush = jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset())); + + jit.add32(MacroAssembler::TrustedImm32(1), scratch2); + jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset())); + + jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1); + // We use an offset of -sizeof(void*) because we already added 1 to scratch2. + jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*)))); + + MacroAssembler::Jump done = jit.jump(); + needToFlush.link(&jit); + + ScratchBuffer* scratchBuffer = jit.vm()->scratchBufferForSize(allocator.desiredScratchBufferSize()); + allocator.preserveUsedRegistersToScratchBuffer(jit, scratchBuffer, scratch1); + + unsigned bytesFromBase = allocator.numberOfReusedRegisters() * sizeof(void*); + unsigned bytesToSubtract = 0; +#if CPU(X86) + bytesToSubtract += 2 * sizeof(void*); + bytesFromBase += bytesToSubtract; +#endif + unsigned currentAlignment = bytesFromBase % stackAlignmentBytes(); + bytesToSubtract += currentAlignment; + + if (bytesToSubtract) + jit.subPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister); + + jit.setupArguments(callFrameRegister, cell); + MacroAssembler::Call call = jit.call(); + + if (bytesToSubtract) + jit.addPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister); + allocator.restoreUsedRegistersFromScratchBuffer(jit, scratchBuffer, scratch1); + + done.link(&jit); + + return call; +} + +static MacroAssembler::Call writeBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator) +{ + ASSERT(owner != scratch1); + ASSERT(owner != scratch2); + + MacroAssembler::Jump definitelyNotMarked = DFG::SpeculativeJIT::genericWriteBarrier(jit, owner, scratch1, scratch2); + MacroAssembler::Call call = storeToWriteBarrierBuffer(jit, owner, scratch1, scratch2, callFrameRegister, allocator); + definitelyNotMarked.link(&jit); + return call; +} +#endif // ENABLE(GGC) + +static void emitPutReplaceStub( ExecState* exec, + JSValue, const Identifier&, const PutPropertySlot& slot, StructureStubInfo& stubInfo, + PutKind, Structure* structure, CodeLocationLabel failureLabel, RefPtr<JITStubRoutine>& stubRoutine) { VM* vm = &exec->vm(); +#if ENABLE(GGC) + GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister); +#endif GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR); #if USE(JSVALUE32_64) GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR); @@ -913,15 +849,18 @@ static bool emitPutReplaceStub( allocator.lock(valueGPR); GPRReg scratchGPR1 = allocator.allocateScratchGPR(); +#if ENABLE(GGC) + GPRReg scratchGPR2 = allocator.allocateScratchGPR(); +#endif CCallHelpers stubJit(vm, exec->codeBlock()); allocator.preserveReusedRegistersByPushing(stubJit); - MacroAssembler::Jump badStructure = branchStructure(stubJit, + MacroAssembler::Jump badStructure = stubJit.branchPtr( MacroAssembler::NotEqual, - MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), - structure); + MacroAssembler::Address(baseGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(structure)); #if USE(JSVALUE64) if (isInlineOffset(slot.cachedOffset())) @@ -941,6 +880,10 @@ static bool emitPutReplaceStub( } #endif +#if ENABLE(GGC) + MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator); +#endif + MacroAssembler::Jump success; MacroAssembler::Jump failure; @@ -956,63 +899,36 @@ static bool emitPutReplaceStub( failure = badStructure; } - LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail); - if (patchBuffer.didFailToAllocate()) - return false; - + LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock()); +#if ENABLE(GGC) + patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer); +#endif patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone)); patchBuffer.link(failure, failureLabel); - stubRoutine = FINALIZE_CODE_FOR_STUB( - exec->codeBlock(), patchBuffer, - ("PutById replace stub for %s, return point %p", + stubRoutine = FINALIZE_CODE_FOR_DFG_STUB( + patchBuffer, + ("DFG PutById replace stub for %s, return point %p", toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset( stubInfo.patch.deltaCallToDone).executableAddress())); - - return true; } -static bool emitPutTransitionStub( - ExecState* exec, VM* vm, Structure*& structure, const Identifier& ident, - const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind, - Structure*& oldStructure, ObjectPropertyConditionSet& conditionSet) +static void emitPutTransitionStub( + ExecState* exec, + JSValue, + const Identifier&, + const PutPropertySlot& slot, + StructureStubInfo& stubInfo, + PutKind putKind, + Structure* structure, + Structure* oldStructure, + StructureChain* prototypeChain, + CodeLocationLabel failureLabel, + RefPtr<JITStubRoutine>& stubRoutine) { - PropertyName pname(ident); - oldStructure = structure; - if (!oldStructure->isObject() || oldStructure->isDictionary() || parseIndex(pname)) - return false; - - PropertyOffset propertyOffset; - structure = Structure::addPropertyTransitionToExistingStructureConcurrently(oldStructure, ident.impl(), 0, propertyOffset); - - if (!structure || !structure->isObject() || structure->isDictionary() || !structure->propertyAccessesAreCacheable()) - return false; - - // Skip optimizing the case where we need a realloc, if we don't have - // enough registers to make it happen. - if (GPRInfo::numberOfRegisters < 6 - && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity() - && oldStructure->outOfLineCapacity()) { - return false; - } - - // Skip optimizing the case where we need realloc, and the structure has - // indexing storage. - // FIXME: We shouldn't skip this! Implement it! - // https://bugs.webkit.org/show_bug.cgi?id=130914 - if (oldStructure->couldHaveIndexingHeader()) - return false; - - if (putKind == NotDirect) { - conditionSet = generateConditionsForPropertySetterMiss( - *vm, exec->codeBlock()->ownerExecutable(), exec, structure, ident.impl()); - if (!conditionSet.isValid()) - return false; - } - - CodeLocationLabel failureLabel = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase); - RefPtr<JITStubRoutine>& stubRoutine = stubInfo.stubRoutine; + VM* vm = &exec->vm(); + GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister); GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR); #if USE(JSVALUE32_64) GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR); @@ -1059,13 +975,19 @@ static bool emitPutTransitionStub( ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated()); - failureCases.append(branchStructure(stubJit, - MacroAssembler::NotEqual, - MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), - oldStructure)); + failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure))); - checkObjectPropertyConditions( - conditionSet, exec->codeBlock(), stubInfo, stubJit, failureCases, scratchGPR1); + addStructureTransitionCheck( + oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases, + scratchGPR1); + + if (putKind == NotDirect) { + for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) { + addStructureTransitionCheck( + (*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases, + scratchGPR1); + } + } MacroAssembler::JumpList slowPath; @@ -1104,15 +1026,7 @@ static bool emitPutTransitionStub( scratchGPR1HasStorage = true; } - ASSERT(oldStructure->typeInfo().type() == structure->typeInfo().type()); - ASSERT(oldStructure->typeInfo().inlineTypeFlags() == structure->typeInfo().inlineTypeFlags()); - ASSERT(oldStructure->indexingType() == structure->indexingType()); -#if USE(JSVALUE64) - uint32_t val = structure->id(); -#else - uint32_t val = reinterpret_cast<uint32_t>(structure->id()); -#endif - stubJit.store32(MacroAssembler::TrustedImm32(val), MacroAssembler::Address(baseGPR, JSCell::structureIDOffset())); + stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset())); #if USE(JSVALUE64) if (isInlineOffset(slot.cachedOffset())) stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue))); @@ -1133,38 +1047,10 @@ static bool emitPutTransitionStub( } #endif - ScratchBuffer* scratchBuffer = nullptr; - #if ENABLE(GGC) - MacroAssembler::Call callFlushWriteBarrierBuffer; - MacroAssembler::Jump ownerIsRememberedOrInEden = stubJit.jumpIfIsRememberedOrInEden(baseGPR); - { - WriteBarrierBuffer& writeBarrierBuffer = stubJit.vm()->heap.writeBarrierBuffer(); - stubJit.load32(writeBarrierBuffer.currentIndexAddress(), scratchGPR2); - MacroAssembler::Jump needToFlush = - stubJit.branch32(MacroAssembler::AboveOrEqual, scratchGPR2, MacroAssembler::TrustedImm32(writeBarrierBuffer.capacity())); - - stubJit.add32(MacroAssembler::TrustedImm32(1), scratchGPR2); - stubJit.store32(scratchGPR2, writeBarrierBuffer.currentIndexAddress()); - - stubJit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer.buffer()), scratchGPR1); - // We use an offset of -sizeof(void*) because we already added 1 to scratchGPR2. - stubJit.storePtr(baseGPR, MacroAssembler::BaseIndex(scratchGPR1, scratchGPR2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*)))); - - MacroAssembler::Jump doneWithBarrier = stubJit.jump(); - needToFlush.link(&stubJit); - - scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSizeForCall()); - allocator.preserveUsedRegistersToScratchBufferForCall(stubJit, scratchBuffer, scratchGPR2); - stubJit.setupArgumentsWithExecState(baseGPR); - callFlushWriteBarrierBuffer = stubJit.call(); - allocator.restoreUsedRegistersFromScratchBufferForCall(stubJit, scratchBuffer, scratchGPR2); - - doneWithBarrier.link(&stubJit); - } - ownerIsRememberedOrInEden.link(&stubJit); + MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator); #endif - + MacroAssembler::Jump success; MacroAssembler::Jump failure; @@ -1185,31 +1071,27 @@ static bool emitPutTransitionStub( slowPath.link(&stubJit); allocator.restoreReusedRegistersByPopping(stubJit); - if (!scratchBuffer) - scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSizeForCall()); - allocator.preserveUsedRegistersToScratchBufferForCall(stubJit, scratchBuffer, scratchGPR1); + ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSize()); + allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1); #if USE(JSVALUE64) - stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR); + stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR); #else - stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR); + stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR); #endif operationCall = stubJit.call(); - allocator.restoreUsedRegistersFromScratchBufferForCall(stubJit, scratchBuffer, scratchGPR1); + allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1); successInSlowPath = stubJit.jump(); } - LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail); - if (patchBuffer.didFailToAllocate()) - return false; - + LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock()); +#if ENABLE(GGC) + patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer); +#endif patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone)); if (allocator.didReuseRegisters()) patchBuffer.link(failure, failureLabel); else patchBuffer.link(failureCases, failureLabel); -#if ENABLE(GGC) - patchBuffer.link(callFlushWriteBarrierBuffer, operationFlushWriteBarrierBuffer); -#endif if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) { patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut); patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone)); @@ -1217,9 +1099,9 @@ static bool emitPutTransitionStub( stubRoutine = createJITStubRoutine( - FINALIZE_CODE_FOR( - exec->codeBlock(), patchBuffer, - ("PutById %stransition stub (%p -> %p) for %s, return point %p", + FINALIZE_DFG_CODE( + patchBuffer, + ("DFG PutById %stransition stub (%p -> %p) for %s, return point %p", structure->outOfLineCapacity() != oldStructure->outOfLineCapacity() ? "reallocating " : "", oldStructure, structure, toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset( @@ -1228,35 +1110,52 @@ static bool emitPutTransitionStub( exec->codeBlock()->ownerExecutable(), structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(), structure); - - return true; } -static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) +static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) { - if (Options::forceICFailure()) - return GiveUpOnCache; - CodeBlock* codeBlock = exec->codeBlock(); VM* vm = &exec->vm(); if (!baseValue.isCell()) - return GiveUpOnCache; + return false; + JSCell* baseCell = baseValue.asCell(); + Structure* structure = baseCell->structure(); + Structure* oldStructure = structure->previousID(); - if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter()) - return GiveUpOnCache; - + if (!slot.isCacheable()) + return false; if (!structure->propertyAccessesAreCacheable()) - return GiveUpOnCache; + return false; // Optimize self access. - if (slot.base() == baseValue && slot.isCacheablePut()) { + if (slot.base() == baseValue) { if (slot.type() == PutPropertySlot::NewProperty) { - - Structure* oldStructure; - ObjectPropertyConditionSet conditionSet; - if (!emitPutTransitionStub(exec, vm, structure, ident, slot, stubInfo, putKind, oldStructure, conditionSet)) - return GiveUpOnCache; + if (structure->isDictionary()) + return false; + + // Skip optimizing the case where we need a realloc, if we don't have + // enough registers to make it happen. + if (GPRInfo::numberOfRegisters < 6 + && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity() + && oldStructure->outOfLineCapacity()) + return false; + + // Skip optimizing the case where we need realloc, and the structure has + // indexing storage. + if (oldStructure->couldHaveIndexingHeader()) + return false; + + if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain) + return false; + + StructureChain* prototypeChain = structure->prototypeChain(exec); + + emitPutTransitionStub( + exec, baseValue, ident, slot, stubInfo, putKind, + structure, oldStructure, prototypeChain, + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), + stubInfo.stubRoutine); RepatchBuffer repatchBuffer(codeBlock); repatchBuffer.relink( @@ -1265,232 +1164,146 @@ static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Str CodeLocationLabel(stubInfo.stubRoutine->code().code())); repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind)); - stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, conditionSet, putKind == Direct); + stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct); - return RetryCacheLater; + return true; } if (!MacroAssembler::isPtrAlignedAddressOffset(offsetRelativeToPatchedStorage(slot.cachedOffset()))) - return GiveUpOnCache; + return false; - structure->didCachePropertyReplacement(*vm, slot.cachedOffset()); repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, ident, slot.cachedOffset(), appropriateListBuildingPutByIdFunction(slot, putKind), false); stubInfo.initPutByIdReplace(*vm, codeBlock->ownerExecutable(), structure); - return RetryCacheLater; - } - - if ((slot.isCacheableCustom() || slot.isCacheableSetter()) - && stubInfo.patch.spillMode == DontSpill) { - RefPtr<JITStubRoutine> stubRoutine; - - ObjectPropertyConditionSet conditionSet; - PropertyOffset offset; - if (slot.base() != baseValue) { - if (slot.isCacheableCustom()) { - conditionSet = - generateConditionsForPrototypePropertyHitCustom( - *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(), - ident.impl()); - } else { - conditionSet = - generateConditionsForPrototypePropertyHit( - *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(), - ident.impl()); - } - if (!conditionSet.isValid()) - return GiveUpOnCache; - offset = slot.isCacheableCustom() ? invalidOffset : conditionSet.slotBaseCondition().offset(); - } else - offset = slot.cachedOffset(); - - PolymorphicPutByIdList* list; - list = PolymorphicPutByIdList::from(putKind, stubInfo); - - bool result = generateByIdStub( - exec, kindFor(slot), ident, customFor(slot), stubInfo, conditionSet, slot.base(), - offset, structure, false, nullptr, - stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), - stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), - stubRoutine); - if (!result) - return GiveUpOnCache; - - list->addAccess(PutByIdAccess::setter( - *vm, codeBlock->ownerExecutable(), - slot.isCacheableSetter() ? PutByIdAccess::Setter : PutByIdAccess::CustomSetter, - structure, conditionSet, slot.customSetter(), stubRoutine)); - - RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code())); - repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind)); - RELEASE_ASSERT(!list->isFull()); - return RetryCacheLater; + return true; } - return GiveUpOnCache; + return false; } -void repatchPutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) +void repatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) { GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap); - if (tryCachePutByID(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache) + bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind); + if (!cached) repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind)); } -static InlineCacheAction tryBuildPutByIdList(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) +static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) { CodeBlock* codeBlock = exec->codeBlock(); VM* vm = &exec->vm(); if (!baseValue.isCell()) - return GiveUpOnCache; - - if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter()) - return GiveUpOnCache; - + return false; + JSCell* baseCell = baseValue.asCell(); + Structure* structure = baseCell->structure(); + Structure* oldStructure = structure->previousID(); + + if (!slot.isCacheable()) + return false; if (!structure->propertyAccessesAreCacheable()) - return GiveUpOnCache; + return false; // Optimize self access. - if (slot.base() == baseValue && slot.isCacheablePut()) { + if (slot.base() == baseValue) { PolymorphicPutByIdList* list; RefPtr<JITStubRoutine> stubRoutine; if (slot.type() == PutPropertySlot::NewProperty) { - list = PolymorphicPutByIdList::from(putKind, stubInfo); - if (list->isFull()) - return GiveUpOnCache; // Will get here due to recursion. - - Structure* oldStructure; - ObjectPropertyConditionSet conditionSet; - if (!emitPutTransitionStub(exec, vm, structure, propertyName, slot, stubInfo, putKind, oldStructure, conditionSet)) - return GiveUpOnCache; - - stubRoutine = stubInfo.stubRoutine; + if (structure->isDictionary()) + return false; + + // Skip optimizing the case where we need a realloc, if we don't have + // enough registers to make it happen. + if (GPRInfo::numberOfRegisters < 6 + && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity() + && oldStructure->outOfLineCapacity()) + return false; + + // Skip optimizing the case where we need realloc, and the structure has + // indexing storage. + if (oldStructure->couldHaveIndexingHeader()) + return false; + + if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain) + return false; + + StructureChain* prototypeChain = structure->prototypeChain(exec); + + // We're now committed to creating the stub. Mogrify the meta-data accordingly. + list = PolymorphicPutByIdList::from( + putKind, stubInfo, + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase)); + + emitPutTransitionStub( + exec, baseValue, propertyName, slot, stubInfo, putKind, + structure, oldStructure, prototypeChain, + CodeLocationLabel(list->currentSlowPathTarget()), + stubRoutine); + list->addAccess( PutByIdAccess::transition( *vm, codeBlock->ownerExecutable(), - oldStructure, structure, conditionSet, + oldStructure, structure, prototypeChain, stubRoutine)); - } else { - list = PolymorphicPutByIdList::from(putKind, stubInfo); - if (list->isFull()) - return GiveUpOnCache; // Will get here due to recursion. - - structure->didCachePropertyReplacement(*vm, slot.cachedOffset()); - // We're now committed to creating the stub. Mogrify the meta-data accordingly. - bool result = emitPutReplaceStub( - exec, propertyName, slot, stubInfo, + list = PolymorphicPutByIdList::from( + putKind, stubInfo, + stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase)); + + emitPutReplaceStub( + exec, baseValue, propertyName, slot, stubInfo, putKind, structure, CodeLocationLabel(list->currentSlowPathTarget()), stubRoutine); - if (!result) - return GiveUpOnCache; list->addAccess( PutByIdAccess::replace( *vm, codeBlock->ownerExecutable(), structure, stubRoutine)); } - RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code())); - if (list->isFull()) - repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind)); - - return RetryCacheLater; - } - - if ((slot.isCacheableCustom() || slot.isCacheableSetter()) - && stubInfo.patch.spillMode == DontSpill) { - RefPtr<JITStubRoutine> stubRoutine; - - ObjectPropertyConditionSet conditionSet; - PropertyOffset offset; - if (slot.base() != baseValue) { - if (slot.isCacheableCustom()) { - conditionSet = - generateConditionsForPrototypePropertyHitCustom( - *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(), - propertyName.impl()); - } else { - conditionSet = - generateConditionsForPrototypePropertyHit( - *vm, codeBlock->ownerExecutable(), exec, structure, slot.base(), - propertyName.impl()); - } - if (!conditionSet.isValid()) - return GiveUpOnCache; - offset = slot.isCacheableCustom() ? invalidOffset : conditionSet.slotBaseCondition().offset(); - } else - offset = slot.cachedOffset(); - - PolymorphicPutByIdList* list; - list = PolymorphicPutByIdList::from(putKind, stubInfo); - - bool result = generateByIdStub( - exec, kindFor(slot), propertyName, customFor(slot), stubInfo, conditionSet, slot.base(), - offset, structure, false, nullptr, - stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), - CodeLocationLabel(list->currentSlowPathTarget()), - stubRoutine); - if (!result) - return GiveUpOnCache; - list->addAccess(PutByIdAccess::setter( - *vm, codeBlock->ownerExecutable(), - slot.isCacheableSetter() ? PutByIdAccess::Setter : PutByIdAccess::CustomSetter, - structure, conditionSet, slot.customSetter(), stubRoutine)); - RepatchBuffer repatchBuffer(codeBlock); repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code())); + if (list->isFull()) repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind)); - - return RetryCacheLater; + + return true; } - return GiveUpOnCache; + + return false; } -void buildPutByIdList(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) +void buildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind) { GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap); - if (tryBuildPutByIdList(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache) + bool cached = tryBuildPutByIdList(exec, baseValue, propertyName, slot, stubInfo, putKind); + if (!cached) repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind)); } -static InlineCacheAction tryRepatchIn( +static bool tryRepatchIn( ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound, const PropertySlot& slot, StructureStubInfo& stubInfo) { - if (Options::forceICFailure()) - return GiveUpOnCache; - if (!base->structure()->propertyAccessesAreCacheable()) - return GiveUpOnCache; + return false; if (wasFound) { if (!slot.isCacheable()) - return GiveUpOnCache; + return false; } CodeBlock* codeBlock = exec->codeBlock(); VM* vm = &exec->vm(); - Structure* structure = base->structure(*vm); + Structure* structure = base->structure(); - ObjectPropertyConditionSet conditionSet; - if (wasFound) { - if (slot.slotBase() != base) { - conditionSet = generateConditionsForPrototypePropertyHit( - *vm, codeBlock->ownerExecutable(), exec, structure, slot.slotBase(), ident.impl()); - } - } else { - conditionSet = generateConditionsForPropertyMiss( - *vm, codeBlock->ownerExecutable(), exec, structure, ident.impl()); - } - if (!conditionSet.isValid()) - return GiveUpOnCache; + PropertyOffset offsetIgnored; + size_t count = normalizePrototypeChainForChainAccess(exec, base, wasFound ? slot.slotBase() : JSValue(), ident, offsetIgnored); + if (count == InvalidPrototypeChain) + return false; PolymorphicAccessStructureList* polymorphicStructureList; int listIndex; @@ -1511,9 +1324,10 @@ static InlineCacheAction tryRepatchIn( slowCaseLabel = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code()); if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE) - return GiveUpOnCache; + return false; } + StructureChain* chain = structure->prototypeChain(exec); RefPtr<JITStubRoutine> stubRoutine; { @@ -1532,20 +1346,27 @@ static InlineCacheAction tryRepatchIn( needToRestoreScratch = false; MacroAssembler::JumpList failureCases; - failureCases.append(branchStructure(stubJit, + failureCases.append(stubJit.branchPtr( MacroAssembler::NotEqual, - MacroAssembler::Address(baseGPR, JSCell::structureIDOffset()), - structure)); + MacroAssembler::Address(baseGPR, JSCell::structureOffset()), + MacroAssembler::TrustedImmPtr(structure))); CodeBlock* codeBlock = exec->codeBlock(); if (structure->typeInfo().newImpurePropertyFiresWatchpoints()) vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock)); - if (slot.watchpointSet()) - slot.watchpointSet()->add(stubInfo.addWatchpoint(codeBlock)); - - checkObjectPropertyConditions( - conditionSet, exec->codeBlock(), stubInfo, stubJit, failureCases, scratchGPR); + Structure* currStructure = structure; + WriteBarrier<Structure>* it = chain->head(); + for (unsigned i = 0; i < count; ++i, ++it) { + JSObject* prototype = asObject(currStructure->prototypeForLookup(exec)); + Structure* protoStructure = prototype->structure(); + addStructureTransitionCheck( + prototype, protoStructure, exec->codeBlock(), stubInfo, stubJit, + failureCases, scratchGPR); + if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints()) + vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock)); + currStructure = it->get(); + } #if USE(JSVALUE64) stubJit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(wasFound))), resultGPR); @@ -1557,15 +1378,13 @@ static InlineCacheAction tryRepatchIn( emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases); - LinkBuffer patchBuffer(*vm, stubJit, exec->codeBlock(), JITCompilationCanFail); - if (patchBuffer.didFailToAllocate()) - return GiveUpOnCache; - + LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock()); + linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel); - stubRoutine = FINALIZE_CODE_FOR_STUB( - exec->codeBlock(), patchBuffer, - ("In (found = %s) stub for %s, return point %p", + stubRoutine = FINALIZE_CODE_FOR_DFG_STUB( + patchBuffer, + ("DFG In (found = %s) stub for %s, return point %p", wasFound ? "yes" : "no", toCString(*exec->codeBlock()).data(), successLabel.executableAddress())); } @@ -1576,375 +1395,171 @@ static InlineCacheAction tryRepatchIn( RepatchBuffer repatchBuffer(codeBlock); repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code())); - return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1) ? RetryCacheLater : GiveUpOnCache; + return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1); } void repatchIn( ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound, const PropertySlot& slot, StructureStubInfo& stubInfo) { - if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo) == GiveUpOnCache) - repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn); -} - -static void linkSlowFor( - RepatchBuffer& repatchBuffer, VM*, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef) -{ - repatchBuffer.relink( - callLinkInfo.callReturnLocation(), codeRef.code()); -} - -static void linkSlowFor( - RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator) -{ - linkSlowFor(repatchBuffer, vm, callLinkInfo, vm->getCTIStub(generator)); + if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo)) + return; + repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn); } -static void linkSlowFor( - RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo) +static void linkSlowFor(RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind) { - MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo); - linkSlowFor(repatchBuffer, vm, callLinkInfo, virtualThunk); - callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true)); + if (kind == CodeForCall) { + repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code()); + return; + } + ASSERT(kind == CodeForConstruct); + repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code()); } -void linkFor( - ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, - JSFunction* callee, MacroAssemblerCodePtr codePtr) +void linkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind) { - ASSERT(!callLinkInfo.stub()); + ASSERT(!callLinkInfo.stub); + + // If you're being call-linked from a DFG caller then you obviously didn't get inlined. + if (calleeCodeBlock) + calleeCodeBlock->m_shouldAlwaysBeInlined = false; CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock(); - VM* vm = callerCodeBlock->vm(); RepatchBuffer repatchBuffer(callerCodeBlock); ASSERT(!callLinkInfo.isLinked()); - callLinkInfo.setCallee(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin(), callerCodeBlock->ownerExecutable(), callee); - callLinkInfo.setLastSeenCallee(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee); - if (shouldShowDisassemblyFor(callerCodeBlock)) - dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n"); - repatchBuffer.relink(callLinkInfo.hotPathOther(), codePtr); + callLinkInfo.callee.set(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin, callerCodeBlock->ownerExecutable(), callee); + callLinkInfo.lastSeenCallee.set(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee); + repatchBuffer.relink(callLinkInfo.hotPathOther, codePtr); if (calleeCodeBlock) calleeCodeBlock->linkIncomingCall(exec->callerFrame(), &callLinkInfo); - if (callLinkInfo.specializationKind() == CodeForCall) { - linkSlowFor( - repatchBuffer, vm, callLinkInfo, linkPolymorphicCallThunkGenerator); + if (kind == CodeForCall) { + repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code()); return; } - ASSERT(callLinkInfo.specializationKind() == CodeForConstruct); - linkSlowFor(repatchBuffer, vm, callLinkInfo); + ASSERT(kind == CodeForConstruct); + linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForConstruct); } -void linkSlowFor( - ExecState* exec, CallLinkInfo& callLinkInfo) +void linkSlowFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind) { CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock(); VM* vm = callerCodeBlock->vm(); RepatchBuffer repatchBuffer(callerCodeBlock); - linkSlowFor(repatchBuffer, vm, callLinkInfo); + linkSlowFor(repatchBuffer, vm, callLinkInfo, kind); } -static void revertCall( - RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef) +void linkClosureCall(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, Structure* structure, ExecutableBase* executable, MacroAssemblerCodePtr codePtr) { - repatchBuffer.revertJumpReplacementToBranchPtrWithPatch( - RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()), - static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0); - linkSlowFor(repatchBuffer, vm, callLinkInfo, codeRef); - callLinkInfo.clearSeen(); - callLinkInfo.clearCallee(); - callLinkInfo.clearStub(); - callLinkInfo.clearSlowStub(); - if (callLinkInfo.isOnList()) - callLinkInfo.remove(); -} - -void unlinkFor( - RepatchBuffer& repatchBuffer, CallLinkInfo& callLinkInfo) -{ - if (Options::showDisassembly()) - dataLog("Unlinking call from ", callLinkInfo.callReturnLocation(), " in request from ", pointerDump(repatchBuffer.codeBlock()), "\n"); + ASSERT(!callLinkInfo.stub); - VM* vm = repatchBuffer.codeBlock()->vm(); - revertCall(repatchBuffer, vm, callLinkInfo, vm->getCTIStub(linkCallThunkGenerator)); -} - -void linkVirtualFor( - ExecState* exec, CallLinkInfo& callLinkInfo) -{ CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock(); VM* vm = callerCodeBlock->vm(); - if (shouldShowDisassemblyFor(callerCodeBlock)) - dataLog("Linking virtual call at ", *callerCodeBlock, " ", exec->callerFrame()->codeOrigin(), "\n"); - - RepatchBuffer repatchBuffer(callerCodeBlock); - MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo); - revertCall(repatchBuffer, vm, callLinkInfo, virtualThunk); - callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true)); -} - -namespace { -struct CallToCodePtr { - CCallHelpers::Call call; - MacroAssemblerCodePtr codePtr; -}; -} // annonymous namespace - -void linkPolymorphicCall( - ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant) -{ - // Currently we can't do anything for non-function callees. - // https://bugs.webkit.org/show_bug.cgi?id=140685 - if (!newVariant || !newVariant.executable()) { - linkVirtualFor(exec, callLinkInfo); - return; - } - - CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock(); - VM* vm = callerCodeBlock->vm(); - - CallVariantList list; - if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub()) - list = stub->variants(); - else if (JSFunction* oldCallee = callLinkInfo.callee()) - list = CallVariantList{ CallVariant(oldCallee) }; - - list = variantListWithVariant(list, newVariant); - - // If there are any closure calls then it makes sense to treat all of them as closure calls. - // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG; - // the DFG doesn't really want to deal with a combination of closure and non-closure callees. - bool isClosureCall = false; - for (CallVariant variant : list) { - if (variant.isClosureCall()) { - list = despecifiedVariantList(list); - isClosureCall = true; - break; - } - } - - if (isClosureCall) - callLinkInfo.setHasSeenClosure(); - - Vector<PolymorphicCallCase> callCases; - - // Figure out what our cases are. - for (CallVariant variant : list) { - CodeBlock* codeBlock; - if (variant.executable()->isHostFunction()) - codeBlock = nullptr; - else { - codeBlock = jsCast<FunctionExecutable*>(variant.executable())->codeBlockForCall(); - - // If we cannot handle a callee, assume that it's better for this whole thing to be a - // virtual call. - if (exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.callType() == CallLinkInfo::CallVarargs || callLinkInfo.callType() == CallLinkInfo::ConstructVarargs) { - linkVirtualFor(exec, callLinkInfo); - return; - } - } - - callCases.append(PolymorphicCallCase(variant, codeBlock)); - } - - // If we are over the limit, just use a normal virtual call. - unsigned maxPolymorphicCallVariantListSize; - if (callerCodeBlock->jitType() == JITCode::topTierJIT()) - maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier(); - else - maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize(); - if (list.size() > maxPolymorphicCallVariantListSize) { - linkVirtualFor(exec, callLinkInfo); - return; - } - - GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR()); + GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR); CCallHelpers stubJit(vm, callerCodeBlock); CCallHelpers::JumpList slowPath; - ptrdiff_t offsetToFrame = -sizeof(CallerFrameAndPC); - - if (!ASSERT_DISABLED) { - CCallHelpers::Jump okArgumentCount = stubJit.branch32( - CCallHelpers::Below, CCallHelpers::Address(CCallHelpers::stackPointerRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ArgumentCount) + offsetToFrame + PayloadOffset), CCallHelpers::TrustedImm32(10000000)); - stubJit.abortWithReason(RepatchInsaneArgumentCount); - okArgumentCount.link(&stubJit); - } - - GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR); - GPRReg comparisonValueGPR; - - if (isClosureCall) { - // Verify that we have a function and stash the executable in scratch. - #if USE(JSVALUE64) - // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister - // being set. So we do this the hard way. - stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch); - slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch)); + // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister + // being set. So we do this the hard way. + GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR); + stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch); + slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch)); #else - // We would have already checked that the callee is a cell. + // We would have already checked that the callee is a cell. #endif - slowPath.append( - stubJit.branch8( - CCallHelpers::NotEqual, - CCallHelpers::Address(calleeGPR, JSCell::typeInfoTypeOffset()), - CCallHelpers::TrustedImm32(JSFunctionType))); + slowPath.append( + stubJit.branchPtr( + CCallHelpers::NotEqual, + CCallHelpers::Address(calleeGPR, JSCell::structureOffset()), + CCallHelpers::TrustedImmPtr(structure))); - stubJit.loadPtr( + slowPath.append( + stubJit.branchPtr( + CCallHelpers::NotEqual, CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()), - scratch); - - comparisonValueGPR = scratch; - } else - comparisonValueGPR = calleeGPR; - - Vector<int64_t> caseValues(callCases.size()); - Vector<CallToCodePtr> calls(callCases.size()); - std::unique_ptr<uint32_t[]> fastCounts; + CCallHelpers::TrustedImmPtr(executable))); - if (callerCodeBlock->jitType() != JITCode::topTierJIT()) - fastCounts = std::make_unique<uint32_t[]>(callCases.size()); - - for (size_t i = 0; i < callCases.size(); ++i) { - if (fastCounts) - fastCounts[i] = 0; - - CallVariant variant = callCases[i].variant(); - int64_t newCaseValue; - if (isClosureCall) - newCaseValue = bitwise_cast<intptr_t>(variant.executable()); - else - newCaseValue = bitwise_cast<intptr_t>(variant.function()); - - if (!ASSERT_DISABLED) { - for (size_t j = 0; j < i; ++j) { - if (caseValues[j] != newCaseValue) - continue; - - dataLog("ERROR: Attempt to add duplicate case value.\n"); - dataLog("Existing case values: "); - CommaPrinter comma; - for (size_t k = 0; k < i; ++k) - dataLog(comma, caseValues[k]); - dataLog("\n"); - dataLog("Attempting to add: ", newCaseValue, "\n"); - dataLog("Variant list: ", listDump(callCases), "\n"); - RELEASE_ASSERT_NOT_REACHED(); - } - } - - caseValues[i] = newCaseValue; - } + stubJit.loadPtr( + CCallHelpers::Address(calleeGPR, JSFunction::offsetOfScopeChain()), + GPRInfo::returnValueGPR); + +#if USE(JSVALUE64) + stubJit.store64( + GPRInfo::returnValueGPR, + CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain))); +#else + stubJit.storePtr( + GPRInfo::returnValueGPR, + CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + stubJit.store32( + CCallHelpers::TrustedImm32(JSValue::CellTag), + CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); +#endif - GPRReg fastCountsBaseGPR = - AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3); - stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR); - - BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr); - CCallHelpers::JumpList done; - while (binarySwitch.advance(stubJit)) { - size_t caseIndex = binarySwitch.caseIndex(); - - CallVariant variant = callCases[caseIndex].variant(); - - ASSERT(variant.executable()->hasJITCodeForCall()); - MacroAssemblerCodePtr codePtr = - variant.executable()->generatedJITCodeForCall()->addressForCall( - *vm, variant.executable(), ArityCheckNotRequired, callLinkInfo.registerPreservationMode()); - - if (fastCounts) { - stubJit.add32( - CCallHelpers::TrustedImm32(1), - CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t))); - } - calls[caseIndex].call = stubJit.nearCall(); - calls[caseIndex].codePtr = codePtr; - done.append(stubJit.jump()); - } + AssemblyHelpers::Call call = stubJit.nearCall(); + AssemblyHelpers::Jump done = stubJit.jump(); slowPath.link(&stubJit); - binarySwitch.fallThrough().link(&stubJit); stubJit.move(calleeGPR, GPRInfo::regT0); #if USE(JSVALUE32_64) stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1); #endif - stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2); - stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4); - - stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4); + stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation.executableAddress()), GPRInfo::nonArgGPR2); + stubJit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR2); AssemblyHelpers::Jump slow = stubJit.jump(); - - LinkBuffer patchBuffer(*vm, stubJit, callerCodeBlock, JITCompilationCanFail); - if (patchBuffer.didFailToAllocate()) { - linkVirtualFor(exec, callLinkInfo); - return; - } - RELEASE_ASSERT(callCases.size() == calls.size()); - for (CallToCodePtr callToCodePtr : calls) { - patchBuffer.link( - callToCodePtr.call, FunctionPtr(callToCodePtr.codePtr.executableAddress())); - } - if (JITCode::isOptimizingJIT(callerCodeBlock->jitType())) - patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0)); - else - patchBuffer.link(done, callLinkInfo.hotPathOther().labelAtOffset(0)); - patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(linkPolymorphicCallThunkGenerator).code())); - - RefPtr<PolymorphicCallStubRoutine> stubRoutine = adoptRef(new PolymorphicCallStubRoutine( - FINALIZE_CODE_FOR( - callerCodeBlock, patchBuffer, - ("Polymorphic call stub for %s, return point %p, targets %s", - toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(), - toCString(listDump(callCases)).data())), - *vm, callerCodeBlock->ownerExecutable(), exec->callerFrame(), callLinkInfo, callCases, - WTF::move(fastCounts))); + LinkBuffer patchBuffer(*vm, &stubJit, callerCodeBlock); + + patchBuffer.link(call, FunctionPtr(codePtr.executableAddress())); + patchBuffer.link(done, callLinkInfo.callReturnLocation.labelAtOffset(0)); + patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(virtualCallThunkGenerator).code())); + + RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine( + FINALIZE_DFG_CODE( + patchBuffer, + ("DFG closure call stub for %s, return point %p, target %p (%s)", + toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation.labelAtOffset(0).executableAddress(), + codePtr.executableAddress(), toCString(pointerDump(calleeCodeBlock)).data())), + *vm, callerCodeBlock->ownerExecutable(), structure, executable, callLinkInfo.codeOrigin)); RepatchBuffer repatchBuffer(callerCodeBlock); repatchBuffer.replaceWithJump( - RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()), + RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin), CodeLocationLabel(stubRoutine->code().code())); - // The original slow path is unreachable on 64-bits, but still - // reachable on 32-bits since a non-cell callee will always - // trigger the slow path - linkSlowFor(repatchBuffer, vm, callLinkInfo); - - // If there had been a previous stub routine, that one will die as soon as the GC runs and sees - // that it's no longer on stack. - callLinkInfo.setStub(stubRoutine.release()); - - // The call link info no longer has a call cache apart from the jump to the polymorphic call - // stub. - if (callLinkInfo.isOnList()) - callLinkInfo.remove(); + linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForCall); + + callLinkInfo.stub = stubRoutine.release(); + + ASSERT(!calleeCodeBlock || calleeCodeBlock->isIncomingCallAlreadyLinked(&callLinkInfo)); } void resetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo) { repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdOptimize); - CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall); - if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) { - repatchBuffer.revertJumpReplacementToPatchableBranch32WithPatch( - RepatchBuffer::startOfPatchableBranch32WithPatchOnAddress(structureLabel), + CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall); + if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) { + repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch( + RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel), MacroAssembler::Address( static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR), - JSCell::structureIDOffset()), - static_cast<int32_t>(unusedPointer)); + JSCell::structureOffset()), + reinterpret_cast<void*>(unusedPointer)); } - repatchBuffer.repatch(structureLabel, static_cast<int32_t>(unusedPointer)); + repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer)); #if USE(JSVALUE64) repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0); #else @@ -1969,16 +1584,16 @@ void resetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo) optimizedFunction = operationPutByIdDirectNonStrictOptimize; } repatchCall(repatchBuffer, stubInfo.callReturnLocation, optimizedFunction); - CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall); - if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) { - repatchBuffer.revertJumpReplacementToPatchableBranch32WithPatch( - RepatchBuffer::startOfPatchableBranch32WithPatchOnAddress(structureLabel), + CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall); + if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) { + repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch( + RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel), MacroAssembler::Address( static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR), - JSCell::structureIDOffset()), - static_cast<int32_t>(unusedPointer)); + JSCell::structureOffset()), + reinterpret_cast<void*>(unusedPointer)); } - repatchBuffer.repatch(structureLabel, static_cast<int32_t>(unusedPointer)); + repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer)); #if USE(JSVALUE64) repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0); #else diff --git a/Source/JavaScriptCore/jit/Repatch.h b/Source/JavaScriptCore/jit/Repatch.h index 02bade1ff..faa787613 100644 --- a/Source/JavaScriptCore/jit/Repatch.h +++ b/Source/JavaScriptCore/jit/Repatch.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. + * Copyright (C) 2011 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,10 +26,11 @@ #ifndef Repatch_h #define Repatch_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "CCallHelpers.h" -#include "CallVariant.h" #include "JITOperations.h" namespace JSC { @@ -37,14 +38,12 @@ namespace JSC { void repatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&); void buildGetByIDList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&); void buildGetByIDProtoList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&); -void repatchPutByID(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind); -void buildPutByIdList(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind); +void repatchPutByID(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind); +void buildPutByIdList(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind); void repatchIn(ExecState*, JSCell*, const Identifier&, bool wasFound, const PropertySlot&, StructureStubInfo&); -void linkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr); -void linkSlowFor(ExecState*, CallLinkInfo&); -void unlinkFor(RepatchBuffer&, CallLinkInfo&); -void linkVirtualFor(ExecState*, CallLinkInfo&); -void linkPolymorphicCall(ExecState*, CallLinkInfo&, CallVariant); +void linkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr, CodeSpecializationKind); +void linkSlowFor(ExecState*, CallLinkInfo&, CodeSpecializationKind); +void linkClosureCall(ExecState*, CallLinkInfo&, CodeBlock*, Structure*, ExecutableBase*, MacroAssemblerCodePtr); void resetGetByID(RepatchBuffer&, StructureStubInfo&); void resetPutByID(RepatchBuffer&, StructureStubInfo&); void resetIn(RepatchBuffer&, StructureStubInfo&); diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp deleted file mode 100644 index b99f9400b..000000000 --- a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp +++ /dev/null @@ -1,208 +0,0 @@ -/* - * Copyright (C) 2014 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "ScratchRegisterAllocator.h" - -#if ENABLE(JIT) - -#include "JSCInlines.h" -#include "VM.h" - -namespace JSC { - -ScratchRegisterAllocator::ScratchRegisterAllocator(const RegisterSet& usedRegisters) - : m_usedRegisters(usedRegisters) - , m_numberOfReusedRegisters(0) -{ -} - -ScratchRegisterAllocator::~ScratchRegisterAllocator() { } - -void ScratchRegisterAllocator::lock(GPRReg reg) -{ - unsigned index = GPRInfo::toIndex(reg); - if (index == GPRInfo::InvalidIndex) - return; - m_lockedRegisters.setGPRByIndex(index); -} - -void ScratchRegisterAllocator::lock(FPRReg reg) -{ - unsigned index = FPRInfo::toIndex(reg); - if (index == FPRInfo::InvalidIndex) - return; - m_lockedRegisters.setFPRByIndex(index); -} - -template<typename BankInfo> -typename BankInfo::RegisterType ScratchRegisterAllocator::allocateScratch() -{ - // First try to allocate a register that is totally free. - for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) { - typename BankInfo::RegisterType reg = BankInfo::toRegister(i); - if (!m_lockedRegisters.get(reg) - && !m_usedRegisters.get(reg) - && !m_scratchRegisters.get(reg)) { - m_scratchRegisters.set(reg); - return reg; - } - } - - // Since that failed, try to allocate a register that is not yet - // locked or used for scratch. - for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) { - typename BankInfo::RegisterType reg = BankInfo::toRegister(i); - if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) { - m_scratchRegisters.set(reg); - m_numberOfReusedRegisters++; - return reg; - } - } - - // We failed. - CRASH(); - // Make some silly compilers happy. - return static_cast<typename BankInfo::RegisterType>(-1); -} - -GPRReg ScratchRegisterAllocator::allocateScratchGPR() { return allocateScratch<GPRInfo>(); } -FPRReg ScratchRegisterAllocator::allocateScratchFPR() { return allocateScratch<FPRInfo>(); } - -void ScratchRegisterAllocator::preserveReusedRegistersByPushing(MacroAssembler& jit) -{ - if (!didReuseRegisters()) - return; - - for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { - FPRReg reg = FPRInfo::toRegister(i); - if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg)) - jit.pushToSave(reg); - } - for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { - GPRReg reg = GPRInfo::toRegister(i); - if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg)) - jit.pushToSave(reg); - } -} - -void ScratchRegisterAllocator::restoreReusedRegistersByPopping(MacroAssembler& jit) -{ - if (!didReuseRegisters()) - return; - - for (unsigned i = GPRInfo::numberOfRegisters; i--;) { - GPRReg reg = GPRInfo::toRegister(i); - if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg)) - jit.popToRestore(reg); - } - for (unsigned i = FPRInfo::numberOfRegisters; i--;) { - FPRReg reg = FPRInfo::toRegister(i); - if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg)) - jit.popToRestore(reg); - } -} - -RegisterSet ScratchRegisterAllocator::usedRegistersForCall() const -{ - RegisterSet result = m_usedRegisters; - result.exclude(RegisterSet::calleeSaveRegisters()); - result.exclude(RegisterSet::stackRegisters()); - result.exclude(RegisterSet::reservedHardwareRegisters()); - return result; -} - -unsigned ScratchRegisterAllocator::desiredScratchBufferSizeForCall() const -{ - return usedRegistersForCall().numberOfSetRegisters() * sizeof(JSValue); -} - -void ScratchRegisterAllocator::preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR) -{ - RegisterSet usedRegisters = usedRegistersForCall(); - if (!usedRegisters.numberOfSetRegisters()) - return; - - unsigned count = 0; - for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) { - if (usedRegisters.get(reg)) - jit.storePtr(reg, static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)); - if (GPRInfo::toIndex(reg) != GPRInfo::InvalidIndex - && scratchGPR == InvalidGPRReg - && !m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) - scratchGPR = reg; - } - RELEASE_ASSERT(scratchGPR != InvalidGPRReg); - for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) { - if (usedRegisters.get(reg)) { - jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR); - jit.storeDouble(reg, scratchGPR); - } - } - RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSizeForCall()); - - jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR); - jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR); -} - -void ScratchRegisterAllocator::restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR) -{ - RegisterSet usedRegisters = usedRegistersForCall(); - if (!usedRegisters.numberOfSetRegisters()) - return; - - if (scratchGPR == InvalidGPRReg) { - // Find a scratch register. - for (unsigned i = GPRInfo::numberOfRegisters; i--;) { - if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i)) - continue; - scratchGPR = GPRInfo::toRegister(i); - break; - } - } - RELEASE_ASSERT(scratchGPR != InvalidGPRReg); - - jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR); - jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR); - - // Restore double registers first. - unsigned count = usedRegisters.numberOfSetGPRs(); - for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) { - if (usedRegisters.get(reg)) { - jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR); - jit.loadDouble(scratchGPR, reg); - } - } - - count = 0; - for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) { - if (usedRegisters.get(reg)) - jit.loadPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), reg); - } -} - -} // namespace JSC - -#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h index f6b94f6c8..1967226c5 100644 --- a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h +++ b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,31 +26,73 @@ #ifndef ScratchRegisterAllocator_h #define ScratchRegisterAllocator_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "MacroAssembler.h" -#include "RegisterSet.h" #include "TempRegisterSet.h" namespace JSC { -struct ScratchBuffer; - // This class provides a low-level register allocator for use in stubs. class ScratchRegisterAllocator { public: - ScratchRegisterAllocator(const RegisterSet& usedRegisters); - ~ScratchRegisterAllocator(); + ScratchRegisterAllocator(const TempRegisterSet& usedRegisters) + : m_usedRegisters(usedRegisters) + , m_numberOfReusedRegisters(0) + { + } - void lock(GPRReg reg); - void lock(FPRReg reg); + void lock(GPRReg reg) + { + unsigned index = GPRInfo::toIndex(reg); + if (index == GPRInfo::InvalidIndex) + return; + m_lockedRegisters.setGPRByIndex(index); + } + void lock(FPRReg reg) + { + unsigned index = FPRInfo::toIndex(reg); + if (index == FPRInfo::InvalidIndex) + return; + m_lockedRegisters.setFPRByIndex(index); + } template<typename BankInfo> - typename BankInfo::RegisterType allocateScratch(); + typename BankInfo::RegisterType allocateScratch() + { + // First try to allocate a register that is totally free. + for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) { + typename BankInfo::RegisterType reg = BankInfo::toRegister(i); + if (!m_lockedRegisters.get(reg) + && !m_usedRegisters.get(reg) + && !m_scratchRegisters.get(reg)) { + m_scratchRegisters.set(reg); + return reg; + } + } + + // Since that failed, try to allocate a register that is not yet + // locked or used for scratch. + for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) { + typename BankInfo::RegisterType reg = BankInfo::toRegister(i); + if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) { + m_scratchRegisters.set(reg); + m_numberOfReusedRegisters++; + return reg; + } + } + + // We failed. + CRASH(); + // Make some silly compilers happy. + return static_cast<typename BankInfo::RegisterType>(-1); + } - GPRReg allocateScratchGPR(); - FPRReg allocateScratchFPR(); + GPRReg allocateScratchGPR() { return allocateScratch<GPRInfo>(); } + FPRReg allocateScratchFPR() { return allocateScratch<FPRInfo>(); } bool didReuseRegisters() const { @@ -62,19 +104,104 @@ public: return m_numberOfReusedRegisters; } - void preserveReusedRegistersByPushing(MacroAssembler& jit); - void restoreReusedRegistersByPopping(MacroAssembler& jit); + void preserveReusedRegistersByPushing(MacroAssembler& jit) + { + if (!didReuseRegisters()) + return; + + for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { + if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i)) + jit.pushToSave(FPRInfo::toRegister(i)); + } + for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { + if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i)) + jit.pushToSave(GPRInfo::toRegister(i)); + } + } - RegisterSet usedRegistersForCall() const; + void restoreReusedRegistersByPopping(MacroAssembler& jit) + { + if (!didReuseRegisters()) + return; + + for (unsigned i = GPRInfo::numberOfRegisters; i--;) { + if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i)) + jit.popToRestore(GPRInfo::toRegister(i)); + } + for (unsigned i = FPRInfo::numberOfRegisters; i--;) { + if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i)) + jit.popToRestore(FPRInfo::toRegister(i)); + } + } - unsigned desiredScratchBufferSizeForCall() const; + unsigned desiredScratchBufferSize() const { return m_usedRegisters.numberOfSetRegisters() * sizeof(JSValue); } - void preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg); + void preserveUsedRegistersToScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg) + { + unsigned count = 0; + for (unsigned i = GPRInfo::numberOfRegisters; i--;) { + if (m_usedRegisters.getGPRByIndex(i)) { +#if USE(JSVALUE64) + jit.store64(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)); +#else + jit.store32(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)); +#endif + } + if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i)) + scratchGPR = GPRInfo::toRegister(i); + } + RELEASE_ASSERT(scratchGPR != InvalidGPRReg); + for (unsigned i = FPRInfo::numberOfRegisters; i--;) { + if (m_usedRegisters.getFPRByIndex(i)) { + jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR); + jit.storeDouble(FPRInfo::toRegister(i), scratchGPR); + } + } + RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSize()); + + jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR); + jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR); + } - void restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg); + void restoreUsedRegistersFromScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg) + { + if (scratchGPR == InvalidGPRReg) { + // Find a scratch register. + for (unsigned i = GPRInfo::numberOfRegisters; i--;) { + if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i)) + continue; + scratchGPR = GPRInfo::toRegister(i); + break; + } + } + RELEASE_ASSERT(scratchGPR != InvalidGPRReg); + + jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR); + jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR); + + // Restore double registers first. + unsigned count = m_usedRegisters.numberOfSetGPRs(); + for (unsigned i = FPRInfo::numberOfRegisters; i--;) { + if (m_usedRegisters.getFPRByIndex(i)) { + jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR); + jit.loadDouble(scratchGPR, FPRInfo::toRegister(i)); + } + } + + count = 0; + for (unsigned i = GPRInfo::numberOfRegisters; i--;) { + if (m_usedRegisters.getGPRByIndex(i)) { +#if USE(JSVALUE64) + jit.load64(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i)); +#else + jit.load32(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i)); +#endif + } + } + } private: - RegisterSet m_usedRegisters; + TempRegisterSet m_usedRegisters; TempRegisterSet m_lockedRegisters; TempRegisterSet m_scratchRegisters; unsigned m_numberOfReusedRegisters; @@ -85,3 +212,4 @@ private: #endif // ENABLE(JIT) #endif // ScratchRegisterAllocator_h + diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp b/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp deleted file mode 100644 index 9e133ccda..000000000 --- a/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp +++ /dev/null @@ -1,134 +0,0 @@ -/* - * Copyright (C) 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "SetupVarargsFrame.h" - -#if ENABLE(JIT) - -#include "Interpreter.h" -#include "JSCInlines.h" -#include "StackAlignment.h" - -namespace JSC { - -void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR) -{ - jit.move(numUsedSlotsGPR, resultGPR); - jit.addPtr(lengthGPR, resultGPR); - jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR); - - // resultGPR now has the required frame size in Register units - // Round resultGPR to next multiple of stackAlignmentRegisters() - jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR); - jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR); - - // Now resultGPR has the right stack frame offset in Register units. - jit.negPtr(resultGPR); - jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR); - jit.addPtr(GPRInfo::callFrameRegister, resultGPR); -} - -void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase) -{ - CCallHelpers::JumpList end; - - if (argCountRecovery.isConstant()) { - // FIXME: We could constant-fold a lot of the computation below in this case. - // https://bugs.webkit.org/show_bug.cgi?id=141486 - jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1); - } else - jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1); - if (firstVarArgOffset) { - CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1)); - jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1); - CCallHelpers::Jump endVarArgs = jit.jump(); - sufficientArguments.link(&jit); - jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1); - endVarArgs.link(&jit); - } - slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1))); - - emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2); - - slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2)); - - // Initialize ArgumentCount. - jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset)); - - // Copy arguments. - jit.signExtend32ToPtr(scratchGPR1, scratchGPR1); - CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1); - // scratchGPR1: argumentCount - - CCallHelpers::Label copyLoop = jit.label(); - int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register)); -#if USE(JSVALUE64) - jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3); - jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))); -#else // USE(JSVALUE64), so this begins the 32-bit case - jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3); - jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset)); - jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3); - jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset)); -#endif // USE(JSVALUE64), end of 32-bit case - jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit); - - done.link(&jit); -} - -void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase) -{ - emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, nullptr, firstVarArgOffset, slowCase); -} - -void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame* inlineCallFrame, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase) -{ - ValueRecovery argumentCountRecovery; - VirtualRegister firstArgumentReg; - if (inlineCallFrame) { - if (inlineCallFrame->isVarargs()) { - argumentCountRecovery = ValueRecovery::displacedInJSStack( - inlineCallFrame->argumentCountRegister, DataFormatInt32); - } else { - argumentCountRecovery = ValueRecovery::constant( - jsNumber(inlineCallFrame->arguments.size())); - } - if (inlineCallFrame->arguments.size() > 1) - firstArgumentReg = inlineCallFrame->arguments[1].virtualRegister(); - else - firstArgumentReg = VirtualRegister(0); - } else { - argumentCountRecovery = ValueRecovery::displacedInJSStack( - VirtualRegister(JSStack::ArgumentCount), DataFormatInt32); - firstArgumentReg = VirtualRegister(CallFrame::argumentOffset(0)); - } - emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, argumentCountRecovery, firstArgumentReg, firstVarArgOffset, slowCase); -} - -} // namespace JSC - -#endif // ENABLE(JIT) - diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.h b/Source/JavaScriptCore/jit/SetupVarargsFrame.h deleted file mode 100644 index 0e8933a29..000000000 --- a/Source/JavaScriptCore/jit/SetupVarargsFrame.h +++ /dev/null @@ -1,53 +0,0 @@ -/* - * Copyright (C) 2015 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef SetupVarargsFrame_h -#define SetupVarargsFrame_h - -#if ENABLE(JIT) - -#include "CCallHelpers.h" -#include "VirtualRegister.h" - -namespace JSC { - -void emitSetVarargsFrame(CCallHelpers&, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR); - -// Assumes that SP refers to the last in-use stack location, and after this returns SP will point to -// the newly created frame plus the native header. scratchGPR2 may be the same as numUsedSlotsGPR. -void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase); - -// Variant that assumes normal stack frame. -void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase); - -// Variant for potentially inlined stack frames. -void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame*, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase); - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // SetupVarargsFrame_h - diff --git a/Source/JavaScriptCore/jit/SlowPathCall.h b/Source/JavaScriptCore/jit/SlowPathCall.h index 55da60cd0..f0aa28e83 100644 --- a/Source/JavaScriptCore/jit/SlowPathCall.h +++ b/Source/JavaScriptCore/jit/SlowPathCall.h @@ -45,7 +45,7 @@ public: JIT::Call call() { #if ENABLE(OPCODE_SAMPLING) - if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max()) + if (m_jit->m_bytecodeOffset != (unsigned)-1) m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true); #endif m_jit->updateTopCallFrame(); @@ -73,7 +73,7 @@ public: #endif #if ENABLE(OPCODE_SAMPLING) - if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max()) + if (m_jit->m_bytecodeOffset != (unsigned)-1) m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false); #endif diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h index df27face9..6ec1e71a7 100644 --- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h +++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h @@ -29,8 +29,6 @@ #if ENABLE(JIT) #include "Executable.h" -#include "JIT.h" -#include "JITInlines.h" #include "JSInterfaceJIT.h" #include "JSStack.h" #include "LinkBuffer.h" @@ -43,7 +41,6 @@ namespace JSC { SpecializedThunkJIT(VM* vm, int expectedArgCount) : JSInterfaceJIT(vm) { - emitFunctionPrologue(); // Check that we have the expected number of arguments m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1))); } @@ -51,7 +48,6 @@ namespace JSC { explicit SpecializedThunkJIT(VM* vm) : JSInterfaceJIT(vm) { - emitFunctionPrologue(); } void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch) @@ -69,18 +65,14 @@ namespace JSC { void loadJSStringArgument(VM& vm, int argument, RegisterID dst) { loadCellArgument(argument, dst); - m_failures.append(branchStructure(*this, NotEqual, - Address(dst, JSCell::structureIDOffset()), - vm.stringStructure.get())); + m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::structureOffset()), TrustedImmPtr(vm.stringStructure.get()))); } void loadArgumentWithSpecificClass(const ClassInfo* classInfo, int argument, RegisterID dst, RegisterID scratch) { loadCellArgument(argument, dst); - emitLoadStructure(dst, scratch, dst); + loadPtr(Address(dst, JSCell::structureOffset()), scratch); appendFailure(branchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfo))); - // We have to reload the argument since emitLoadStructure clobbered it. - loadCellArgument(argument, dst); } void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget) @@ -105,7 +97,7 @@ namespace JSC { { if (src != regT0) move(src, regT0); - emitFunctionEpilogue(); + loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister); ret(); } #else @@ -113,7 +105,7 @@ namespace JSC { { ASSERT_UNUSED(payload, payload == regT0); ASSERT_UNUSED(tag, tag == regT1); - emitFunctionEpilogue(); + loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister); ret(); } #endif @@ -129,7 +121,14 @@ namespace JSC { move(tagTypeNumberRegister, regT0); done.link(this); #else +#if !CPU(X86) + // The src register is not clobbered by moveDoubleToInts with ARM, MIPS and SH4 macro assemblers, so let's use it. moveDoubleToInts(src, regT0, regT1); +#else + storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double))); + loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1); + loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0); +#endif Jump lowNonZero = branchTestPtr(NonZero, regT1); Jump highNonZero = branchTestPtr(NonZero, regT0); move(TrustedImm32(0), regT0); @@ -137,7 +136,7 @@ namespace JSC { lowNonZero.link(this); highNonZero.link(this); #endif - emitFunctionEpilogue(); + loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister); ret(); } @@ -146,7 +145,7 @@ namespace JSC { if (src != regT0) move(src, regT0); tagReturnAsInt32(); - emitFunctionEpilogue(); + loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister); ret(); } @@ -155,13 +154,13 @@ namespace JSC { if (src != regT0) move(src, regT0); tagReturnAsJSCell(); - emitFunctionEpilogue(); + loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister); ret(); } MacroAssemblerCodeRef finalize(MacroAssemblerCodePtr fallback, const char* thunkKind) { - LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID); patchBuffer.link(m_failures, CodeLocationLabel(fallback)); for (unsigned i = 0; i < m_calls.size(); i++) patchBuffer.link(m_calls[i].first, m_calls[i].second); diff --git a/Source/JavaScriptCore/jit/SpillRegistersMode.h b/Source/JavaScriptCore/jit/SpillRegistersMode.h deleted file mode 100644 index 160df2c2e..000000000 --- a/Source/JavaScriptCore/jit/SpillRegistersMode.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Copyright (C) 2014 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef SpillRegistersMode_h -#define SpillRegistersMode_h - -namespace JSC { - -enum SpillRegistersMode { NeedToSpill, DontSpill }; - -} - -#endif diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.cpp b/Source/JavaScriptCore/jit/TempRegisterSet.cpp index 9c2e73d43..9d80bbc57 100644 --- a/Source/JavaScriptCore/jit/TempRegisterSet.cpp +++ b/Source/JavaScriptCore/jit/TempRegisterSet.cpp @@ -28,15 +28,12 @@ #if ENABLE(JIT) -#include "JSCInlines.h" #include "RegisterSet.h" namespace JSC { TempRegisterSet::TempRegisterSet(const RegisterSet& other) { - clearAll(); - for (unsigned i = GPRInfo::numberOfRegisters; i--;) { GPRReg reg = GPRInfo::toRegister(i); if (other.get(reg)) diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.h b/Source/JavaScriptCore/jit/TempRegisterSet.h index 0b2edf9d9..0915e796a 100644 --- a/Source/JavaScriptCore/jit/TempRegisterSet.h +++ b/Source/JavaScriptCore/jit/TempRegisterSet.h @@ -26,6 +26,8 @@ #ifndef TempRegisterSet_h #define TempRegisterSet_h +#include <wtf/Platform.h> + #if ENABLE(JIT) #include "FPRInfo.h" @@ -39,7 +41,8 @@ class TempRegisterSet { public: TempRegisterSet() { - clearAll(); + for (unsigned i = numberOfBytesInTempRegisterSet; i--;) + m_set[i] = 0; } TempRegisterSet(const RegisterSet&); @@ -161,12 +164,6 @@ public: } private: - void clearAll() - { - for (unsigned i = numberOfBytesInTempRegisterSet; i--;) - m_set[i] = 0; - } - void setBit(unsigned i) { ASSERT(i < totalNumberOfRegisters); diff --git a/Source/JavaScriptCore/jit/ThunkGenerator.h b/Source/JavaScriptCore/jit/ThunkGenerator.h index 031748cbe..a9d7e04ee 100644 --- a/Source/JavaScriptCore/jit/ThunkGenerator.h +++ b/Source/JavaScriptCore/jit/ThunkGenerator.h @@ -26,6 +26,8 @@ #ifndef ThunkGenerator_h #define ThunkGenerator_h +#include <wtf/Platform.h> + #if ENABLE(JIT) namespace JSC { diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp index 34c6504dd..f8f5cbaf5 100644 --- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp +++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved. + * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -27,14 +27,11 @@ #include "ThunkGenerators.h" #include "CodeBlock.h" -#include "DFGSpeculativeJIT.h" #include "JITOperations.h" #include "JSArray.h" #include "JSArrayIterator.h" #include "JSStack.h" -#include "MathCommon.h" -#include "MaxFrameExtentForSlowPathCall.h" -#include "JSCInlines.h" +#include "Operations.h" #include "SpecializedThunkJIT.h" #include <wtf/InlineASM.h> #include <wtf/StringPrintStream.h> @@ -46,14 +43,17 @@ namespace JSC { inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR) { - if (ASSERT_DISABLED) - return; +#if !ASSERT_DISABLED CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR); - jit.abortWithReason(TGInvalidPointer); + jit.breakpoint(); isNonZero.link(&jit); jit.pushToSave(pointerGPR); jit.load8(pointerGPR, pointerGPR); jit.popToRestore(pointerGPR); +#else + UNUSED_PARAM(jit); + UNUSED_PARAM(pointerGPR); +#endif } // We will jump here if the JIT code tries to make a call, but the @@ -66,40 +66,45 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm) // even though we won't use it. jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR); - jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister); + // The CallFrame register points to the (failed) callee frame, so we need to pop back one frame. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister); + + jit.setupArgumentsExecState(); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0); emitPointerValidation(jit, GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); jit.jumpToExceptionHandler(); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk")); } static void slowPathFor( - CCallHelpers& jit, VM* vm, P_JITOperation_ECli slowPathFunction) + CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction) { - jit.emitFunctionPrologue(); + jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2); + emitPointerValidation(jit, GPRInfo::nonArgGPR2); + jit.emitPutReturnPCToCallFrameHeader(GPRInfo::nonArgGPR2); jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame); - if (maxFrameExtentForSlowPathCall) - jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); - jit.setupArgumentsWithExecState(GPRInfo::regT2); + jit.setupArgumentsExecState(); jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0); emitPointerValidation(jit, GPRInfo::nonArgGPR0); jit.call(GPRInfo::nonArgGPR0); - if (maxFrameExtentForSlowPathCall) - jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister); // This slow call will return the address of one of the following: // 1) Exception throwing thunk. // 2) Host call return value returner thingy. // 3) The function to call. + jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::nonPreservedNonReturnGPR); + jit.emitPutReturnPCToCallFrameHeader(CCallHelpers::TrustedImmPtr(0)); + emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR); + jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR); emitPointerValidation(jit, GPRInfo::returnValueGPR); - jit.emitFunctionEpilogue(); jit.jump(GPRInfo::returnValueGPR); } -MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) +static MacroAssemblerCodeRef linkForThunkGenerator( + VM* vm, CodeSpecializationKind kind) { // The return address is on the stack or in the link register. We will hence // save the return address to the call frame while we make a C++ function call @@ -109,25 +114,38 @@ MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) CCallHelpers jit(vm); - slowPathFor(jit, vm, operationLinkCall); + slowPathFor(jit, vm, kind == CodeForCall ? operationLinkCall : operationLinkConstruct); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk")); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + return FINALIZE_CODE( + patchBuffer, + ("Link %s slow path thunk", kind == CodeForCall ? "call" : "construct")); +} + +MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm) +{ + return linkForThunkGenerator(vm, CodeForCall); +} + +MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm) +{ + return linkForThunkGenerator(vm, CodeForConstruct); } // For closure optimizations, we only include calls, since if you're using closures for // object construction then you're going to lose big time anyway. -MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm) +MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm) { CCallHelpers jit(vm); - slowPathFor(jit, vm, operationLinkPolymorphicCall); + slowPathFor(jit, vm, operationLinkClosureCall); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk")); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + return FINALIZE_CODE(patchBuffer, ("Link closure call slow path thunk")); } -MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo) +static MacroAssemblerCodeRef virtualForThunkGenerator( + VM* vm, CodeSpecializationKind kind) { // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1). // The return address is on the stack, or in the link register. We will hence @@ -137,121 +155,166 @@ MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo) CCallHelpers jit(vm); CCallHelpers::JumpList slowCase; - - // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the - // slow path execution for the profiler. - jit.add32( - CCallHelpers::TrustedImm32(1), - CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount())); // FIXME: we should have a story for eliminating these checks. In many cases, // the DFG knows that the value is definitely a cell, or definitely a function. #if USE(JSVALUE64) - jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4); - slowCase.append( jit.branchTest64( - CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4)); + CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister)); #else slowCase.append( jit.branch32( CCallHelpers::NotEqual, GPRInfo::regT1, CCallHelpers::TrustedImm32(JSValue::CellTag))); #endif - AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1); + jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSCell::structureOffset()), GPRInfo::nonArgGPR2); slowCase.append( jit.branchPtr( CCallHelpers::NotEqual, - CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()), + CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()), CCallHelpers::TrustedImmPtr(JSFunction::info()))); // Now we know we have a JSFunction. jit.loadPtr( CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()), - GPRInfo::regT4); - jit.loadPtr( - CCallHelpers::Address( - GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor( - callLinkInfo.specializationKind(), callLinkInfo.registerPreservationMode())), - GPRInfo::regT4); - slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4)); + GPRInfo::nonArgGPR2); + slowCase.append( + jit.branch32( + CCallHelpers::LessThan, + CCallHelpers::Address( + GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)), + CCallHelpers::TrustedImm32(0))); // Now we know that we have a CodeBlock, and we're committed to making a fast // call. + jit.loadPtr( + CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()), + GPRInfo::regT1); +#if USE(JSVALUE64) + jit.store64( + GPRInfo::regT1, + CCallHelpers::Address( + GPRInfo::callFrameRegister, + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain)); +#else + jit.storePtr( + GPRInfo::regT1, + CCallHelpers::Address( + GPRInfo::callFrameRegister, + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + jit.store32( + CCallHelpers::TrustedImm32(JSValue::CellTag), + CCallHelpers::Address( + GPRInfo::callFrameRegister, + static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain + + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); +#endif + + jit.loadPtr( + CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)), + GPRInfo::regT0); + // Make a tail call. This will return back to JIT code. - emitPointerValidation(jit, GPRInfo::regT4); - jit.jump(GPRInfo::regT4); + emitPointerValidation(jit, GPRInfo::regT0); + jit.jump(GPRInfo::regT0); slowCase.link(&jit); // Here we don't know anything, so revert to the full slow path. - slowPathFor(jit, vm, operationVirtualCall); + slowPathFor(jit, vm, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE( patchBuffer, - ("Virtual %s%s slow path thunk at CodePtr(%p)", - callLinkInfo.specializationKind() == CodeForCall ? "call" : "construct", - callLinkInfo.registerPreservationMode() == MustPreserveRegisters ? " that preserves registers" : "", - callLinkInfo.callReturnLocation().dataLocation())); + ("Virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct")); } -enum ThunkEntryType { EnterViaCall, EnterViaJump }; +MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm) +{ + return virtualForThunkGenerator(vm, CodeForCall); +} + +MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm) +{ + return virtualForThunkGenerator(vm, CodeForConstruct); +} -static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall) +static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind) { int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind); JSInterfaceJIT jit(vm); - - if (entryType == EnterViaCall) - jit.emitFunctionPrologue(); - + jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock); jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); #if CPU(X86) + // Load caller frame's scope chain into this callframe so that whatever we call can + // get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.peek(JSInterfaceJIT::regT1); + jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1); + // Calling convention: f(ecx, edx, ...); // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); - jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue. + jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call. // call the function jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1); jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1); + jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); + jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); #elif CPU(X86_64) + // Load caller frame's scope chain into this callframe so that whatever we call can + // get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.peek(JSInterfaceJIT::regT1); + jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1); + #if !OS(WINDOWS) // Calling convention: f(edi, esi, edx, ecx, ...); // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi); + jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call. + jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi); jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9); + jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); + jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); #else // Calling convention: f(ecx, edx, r8, r9, ...); // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx); - // Leave space for the callee parameter home addresses. - // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it. - jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); + // Leave space for the callee parameter home addresses and align the stack. + jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx); jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9); + jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); + jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); #endif #elif CPU(ARM64) @@ -261,13 +324,34 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1); COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2); + // Load caller frame's scope chain into this callframe so that whatever we call can + // get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved + jit.emitPutReturnPCToCallFrameHeader(ARM64Registers::lr); + // Host function signature: f(ExecState*); jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0); jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1); jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2); + jit.move(ARM64Registers::x3, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction)); + + jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3); + #elif CPU(ARM) || CPU(SH4) || CPU(MIPS) + // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data. + jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2); + jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2); + jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain); + + jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved + jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT3); + #if CPU(MIPS) // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments. jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); @@ -278,6 +362,7 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0); jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1); + jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack. jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2); jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction)); @@ -285,10 +370,12 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k // Restore stack space jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); #endif + + jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3); #else #error "JIT not supported on this platform." UNUSED_PARAM(executableOffsetToFunction); - abortWithReason(TGNotSupported); + breakpoint(); #endif // Check for an exception @@ -298,42 +385,40 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k #else JSInterfaceJIT::Jump exceptionHandler = jit.branch32( JSInterfaceJIT::NotEqual, - JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()), - JSInterfaceJIT::TrustedImm32(0)); + JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), + JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag)); #endif - jit.emitFunctionEpilogue(); // Return. jit.ret(); // Handle an exception exceptionHandler.link(&jit); + // Grab the return address. + jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1); + + jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2); + jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2); + jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame); #if CPU(X86) && USE(JSVALUE32_64) jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister); - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::regT0); - jit.push(JSInterfaceJIT::regT0); + jit.push(JSInterfaceJIT::callFrameRegister); #else -#if OS(WINDOWS) - // Allocate space on stack for the 4 parameter registers. - jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); -#endif - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister), JSInterfaceJIT::argumentGPR0); + jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0); #endif jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3); jit.call(JSInterfaceJIT::regT3); #if CPU(X86) && USE(JSVALUE32_64) jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister); -#elif OS(WINDOWS) - jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); #endif jit.jumpToExceptionHandler(); - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data())); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); + return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data())); } MacroAssemblerCodeRef nativeCallGenerator(VM* vm) @@ -341,60 +426,43 @@ MacroAssemblerCodeRef nativeCallGenerator(VM* vm) return nativeForGenerator(vm, CodeForCall); } -MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm) -{ - return nativeForGenerator(vm, CodeForCall, EnterViaJump); -} - MacroAssemblerCodeRef nativeConstructGenerator(VM* vm) { return nativeForGenerator(vm, CodeForConstruct); } -MacroAssemblerCodeRef arityFixupGenerator(VM* vm) +MacroAssemblerCodeRef arityFixup(VM* vm) { JSInterfaceJIT jit(vm); - // We enter with fixup count, in aligned stack units, in regT0 and the return thunk in - // regT5 on 32-bit and regT7 on 64-bit. + // We enter with fixup count in regT0 #if USE(JSVALUE64) # if CPU(X86_64) jit.pop(JSInterfaceJIT::regT4); # endif - jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); jit.neg64(JSInterfaceJIT::regT0); - jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT6); - jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); + jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); + jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2); jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); // Move current frame down regT0 number of slots JSInterfaceJIT::Label copyLoop(jit.label()); - jit.load64(JSInterfaceJIT::regT6, JSInterfaceJIT::regT1); - jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); + jit.load64(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1); + jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); + jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); - // Fill in regT0 - 1 missing arg slots with undefined + // Fill in regT0 missing arg slots with undefined jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1); - jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); - jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT6); + jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); + jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); - - // Adjust call frame register and stack pointer to account for missing args - jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); - jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); - // Save the original return PC. - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); - jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT6, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - - // Install the new return PC. - jit.storePtr(GPRInfo::regT7, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); + // Adjust call frame register to account for missing args + jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0); + jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); # if CPU(X86_64) jit.push(JSInterfaceJIT::regT4); @@ -404,10 +472,9 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm) # if CPU(X86) jit.pop(JSInterfaceJIT::regT4); # endif - jit.lshift32(JSInterfaceJIT::TrustedImm32(logStackAlignmentRegisters()), JSInterfaceJIT::regT0); jit.neg32(JSInterfaceJIT::regT0); jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3); - jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::regT2); + jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2); jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2); // Move current frame down regT0 number of slots @@ -419,9 +486,8 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm) jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit); - // Fill in regT0 - 1 missing arg slots with undefined + // Fill in regT0 missing arg slots with undefined jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2); - jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2); JSInterfaceJIT::Label fillUndefinedLoop(jit.label()); jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1); jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); @@ -431,106 +497,20 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm) jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3); jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit); - // Adjust call frame register and stack pointer to account for missing args - jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1); - jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT1); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::callFrameRegister); - jit.addPtr(JSInterfaceJIT::regT1, JSInterfaceJIT::stackPointerRegister); + // Adjust call frame register to account for missing args + jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0); + jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); - // Save the original return PC. - jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset()), GPRInfo::regT1); - jit.storePtr(GPRInfo::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight)); - - // Install the new return PC. - jit.storePtr(GPRInfo::regT5, JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrame::returnPCOffset())); - # if CPU(X86) jit.push(JSInterfaceJIT::regT4); # endif jit.ret(); #endif - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); + LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID); return FINALIZE_CODE(patchBuffer, ("fixup arity")); } -MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm) -{ - JSInterfaceJIT jit(vm); - -#if USE(JSVALUE64) - jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); -#else - jit.setupResults(GPRInfo::regT0, GPRInfo::regT1); -#endif - - unsigned numberOfParameters = 0; - numberOfParameters++; // The 'this' argument. - numberOfParameters++; // The true return PC. - - unsigned numberOfRegsForCall = - JSStack::CallFrameHeaderSize + numberOfParameters; - - unsigned numberOfBytesForCall = - numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); - - unsigned alignedNumberOfBytesForCall = - WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); - - // The real return address is stored above the arguments. We passed one argument, which is - // 'this'. So argument at index 1 is the return address. - jit.loadPtr( - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (virtualRegisterForArgument(1).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)), - GPRInfo::regT2); - - jit.addPtr( - AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall), - AssemblyHelpers::stackPointerRegister); - - jit.jump(GPRInfo::regT2); - - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("baseline getter return thunk")); -} - -MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm) -{ - JSInterfaceJIT jit(vm); - - unsigned numberOfParameters = 0; - numberOfParameters++; // The 'this' argument. - numberOfParameters++; // The value to set. - numberOfParameters++; // The true return PC. - - unsigned numberOfRegsForCall = - JSStack::CallFrameHeaderSize + numberOfParameters; - - unsigned numberOfBytesForCall = - numberOfRegsForCall * sizeof(Register) - sizeof(CallerFrameAndPC); - - unsigned alignedNumberOfBytesForCall = - WTF::roundUpToMultipleOf(stackAlignmentBytes(), numberOfBytesForCall); - - // The real return address is stored above the arguments. We passed two arguments, so - // the argument at index 2 is the return address. - jit.loadPtr( - AssemblyHelpers::Address( - AssemblyHelpers::stackPointerRegister, - (virtualRegisterForArgument(2).offset() - JSStack::CallerFrameAndPCSize) * sizeof(Register)), - GPRInfo::regT2); - - jit.addPtr( - AssemblyHelpers::TrustedImm32(alignedNumberOfBytesForCall), - AssemblyHelpers::stackPointerRegister); - - jit.jump(GPRInfo::regT2); - - LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID); - return FINALIZE_CODE(patchBuffer, ("baseline setter return thunk")); -} - static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm) { // load string @@ -574,7 +554,7 @@ MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm) SpecializedThunkJIT jit(vm, 1); stringCharLoad(jit, vm); jit.returnInt32(SpecializedThunkJIT::regT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charCodeAt"); } MacroAssemblerCodeRef charAtThunkGenerator(VM* vm) @@ -583,7 +563,7 @@ MacroAssemblerCodeRef charAtThunkGenerator(VM* vm) stringCharLoad(jit, vm); charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); jit.returnJSCell(SpecializedThunkJIT::regT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charAt"); } MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm) @@ -593,28 +573,7 @@ MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm) jit.loadInt32Argument(0, SpecializedThunkJIT::regT0); charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); jit.returnJSCell(SpecializedThunkJIT::regT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode"); -} - -MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm) -{ - SpecializedThunkJIT jit(vm, 1); - MacroAssembler::Jump nonIntArgJump; - jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump); - - SpecializedThunkJIT::Label convertedArgumentReentry(&jit); - jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); - jit.returnInt32(SpecializedThunkJIT::regT1); - - if (jit.supportsFloatingPointTruncate()) { - nonIntArgJump.link(&jit); - jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); - jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit); - jit.appendFailure(jit.jump()); - } else - jit.appendFailure(nonIntArgJump); - - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "fromCharCode"); } MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm) @@ -626,15 +585,25 @@ MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "sqrt"); } #define UnaryDoubleOpWrapper(function) function##Wrapper enum MathThunkCallingConvention { }; typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); +extern "C" { -#if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX)) +double jsRound(double) REFERENCED_FROM_ASM; +double jsRound(double d) +{ + double integer = ceil(d); + return integer - (integer - d > 0.5); +} + +} + +#if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) #define defineUnaryDoubleOpWrapper(function) \ asm( \ @@ -642,9 +611,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); ".globl " SYMBOL_STRING(function##Thunk) "\n" \ HIDE_SYMBOL(function##Thunk) "\n" \ SYMBOL_STRING(function##Thunk) ":" "\n" \ - "pushq %rax\n" \ "call " GLOBAL_REFERENCE(function) "\n" \ - "popq %rcx\n" \ "ret\n" \ );\ extern "C" { \ @@ -652,7 +619,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__) +#elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__) #define defineUnaryDoubleOpWrapper(function) \ asm( \ ".text\n" \ @@ -676,19 +643,19 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX)) +#elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX)) #define defineUnaryDoubleOpWrapper(function) \ asm( \ ".text\n" \ ".globl " SYMBOL_STRING(function##Thunk) "\n" \ HIDE_SYMBOL(function##Thunk) "\n" \ SYMBOL_STRING(function##Thunk) ":" "\n" \ - "subl $20, %esp\n" \ + "subl $8, %esp\n" \ "movsd %xmm0, (%esp) \n" \ "call " GLOBAL_REFERENCE(function) "\n" \ "fstpl (%esp) \n" \ "movsd (%esp), %xmm0 \n" \ - "addl $20, %esp\n" \ + "addl $8, %esp\n" \ "ret\n" \ );\ extern "C" { \ @@ -696,7 +663,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS) +#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS) #define defineUnaryDoubleOpWrapper(function) \ asm( \ @@ -729,38 +696,12 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention); HIDE_SYMBOL(function##Thunk) "\n" \ SYMBOL_STRING(function##Thunk) ":" "\n" \ "b " GLOBAL_REFERENCE(function) "\n" \ - ".previous" \ ); \ extern "C" { \ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \ } \ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; -#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS) - -// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions. -static double (_cdecl *floorFunction)(double) = floor; -static double (_cdecl *ceilFunction)(double) = ceil; -static double (_cdecl *expFunction)(double) = exp; -static double (_cdecl *logFunction)(double) = log; -static double (_cdecl *jsRoundFunction)(double) = jsRound; - -#define defineUnaryDoubleOpWrapper(function) \ - extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \ - { \ - __asm \ - { \ - __asm sub esp, 20 \ - __asm movsd mmword ptr [esp], xmm0 \ - __asm call function##Function \ - __asm fstp qword ptr [esp] \ - __asm movsd xmm0, mmword ptr [esp] \ - __asm add esp, 20 \ - __asm ret \ - } \ - } \ - static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk; - #else #define defineUnaryDoubleOpWrapper(function) \ @@ -799,7 +740,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm) SpecializedThunkJIT::Jump intResult; SpecializedThunkJIT::JumpList doubleResult; if (jit.supportsFloatingPointTruncate()) { - jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1); doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); SpecializedThunkJIT::JumpList slowPath; // Handle the negative doubles in the slow path for now. @@ -816,7 +757,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm) doubleResult.link(&jit); jit.returnDouble(SpecializedThunkJIT::fpRegT0); #endif // CPU(ARM64) - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "floor"); } MacroAssemblerCodeRef ceilThunkGenerator(VM* vm) @@ -839,7 +780,7 @@ MacroAssemblerCodeRef ceilThunkGenerator(VM* vm) jit.returnInt32(SpecializedThunkJIT::regT0); doubleResult.link(&jit); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "ceil"); } MacroAssemblerCodeRef roundThunkGenerator(VM* vm) @@ -855,12 +796,12 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm) SpecializedThunkJIT::Jump intResult; SpecializedThunkJIT::JumpList doubleResult; if (jit.supportsFloatingPointTruncate()) { - jit.loadDouble(MacroAssembler::TrustedImmPtr(&zeroConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1); doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); SpecializedThunkJIT::JumpList slowPath; // Handle the negative doubles in the slow path for now. slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); - jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1); jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0)); intResult = jit.jump(); @@ -873,7 +814,7 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm) jit.returnInt32(SpecializedThunkJIT::regT0); doubleResult.link(&jit); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "round"); } MacroAssemblerCodeRef expThunkGenerator(VM* vm) @@ -886,7 +827,7 @@ MacroAssemblerCodeRef expThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp)); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "exp"); } MacroAssemblerCodeRef logThunkGenerator(VM* vm) @@ -899,7 +840,7 @@ MacroAssemblerCodeRef logThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log)); jit.returnDouble(SpecializedThunkJIT::fpRegT0); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "log"); } MacroAssemblerCodeRef absThunkGenerator(VM* vm) @@ -919,7 +860,7 @@ MacroAssemblerCodeRef absThunkGenerator(VM* vm) jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1); jit.returnDouble(SpecializedThunkJIT::fpRegT1); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "abs"); } MacroAssemblerCodeRef powThunkGenerator(VM* vm) @@ -928,7 +869,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm) if (!jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm)); - jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1); + jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1); jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); MacroAssembler::Jump nonIntExponent; jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent); @@ -956,7 +897,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm) if (jit.supportsFloatingPointSqrt()) { nonIntExponent.link(&jit); - jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3); + jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3); jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0); jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1)); jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3)); @@ -971,7 +912,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm) } else jit.appendFailure(nonIntExponent); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "pow"); } MacroAssemblerCodeRef imulThunkGenerator(VM* vm) @@ -990,7 +931,8 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm) nonIntArg0Jump.link(&jit); jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit); - jit.appendFailure(jit.jump()); + jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0); + jit.jump(doneLoadingArg0); } else jit.appendFailure(nonIntArg0Jump); @@ -998,13 +940,117 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm) nonIntArg1Jump.link(&jit); jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1); jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit); - jit.appendFailure(jit.jump()); + jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1); + jit.jump(doneLoadingArg1); } else jit.appendFailure(nonIntArg1Jump); - return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul"); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "imul"); } +static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind) +{ + typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32; + typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr; + typedef SpecializedThunkJIT::Address Address; + typedef SpecializedThunkJIT::BaseIndex BaseIndex; + typedef SpecializedThunkJIT::Jump Jump; + + SpecializedThunkJIT jit(vm); + // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively + jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1); + + // Early exit if we don't have a thunk for this form of iteration + jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue))); + + jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0); + + jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1); + + // Pull out the butterfly from iteratedObject + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSCell::structureOffset()), SpecializedThunkJIT::regT2); + + jit.load8(Address(SpecializedThunkJIT::regT2, Structure::indexingTypeOffset()), SpecializedThunkJIT::regT3); + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + Jump nullButterfly = jit.branchTestPtr(SpecializedThunkJIT::Zero, SpecializedThunkJIT::regT2); + + Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength())); + + nullButterfly.link(&jit); + + // Return the termination signal to indicate that we've finished + jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0); + jit.returnJSCell(SpecializedThunkJIT::regT0); + + notDone.link(&jit); + + if (kind == ArrayIterateKey) { + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnInt32(SpecializedThunkJIT::regT1); + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-key"); + + } + ASSERT(kind == ArrayIterateValue); + + // Okay, now we're returning a value so make sure we're inside the vector size + jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength()))); + + // So now we perform inline loads for int32, value/undecided, and double storage + Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithUndecided)); + Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithContiguous)); + + undecidedStorage.link(&jit); + + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + +#if USE(JSVALUE64) + jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0); + Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0); + jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0); + notHole.link(&jit); + jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnJSValue(SpecializedThunkJIT::regT0); +#else + jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3); + Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag)); + jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1); + jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1); + notHole.link(&jit); + jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1); + jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); +#endif + notContiguousStorage.link(&jit); + + Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithInt32)); + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnInt32(SpecializedThunkJIT::regT0); + notInt32Storage.link(&jit); + + jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithDouble))); + jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2); + jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0); + jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex())); + jit.returnDouble(SpecializedThunkJIT::fpRegT0); + + return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-value"); +} + +MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm) +{ + return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey); +} + +MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm) +{ + return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue); +} + } #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.h b/Source/JavaScriptCore/jit/ThunkGenerators.h index 01c2df6ac..0e2762890 100644 --- a/Source/JavaScriptCore/jit/ThunkGenerators.h +++ b/Source/JavaScriptCore/jit/ThunkGenerators.h @@ -26,33 +26,27 @@ #ifndef ThunkGenerators_h #define ThunkGenerators_h -#include "CodeSpecializationKind.h" -#include "RegisterPreservationMode.h" #include "ThunkGenerator.h" #if ENABLE(JIT) namespace JSC { -class CallLinkInfo; - MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM*); MacroAssemblerCodeRef linkCallThunkGenerator(VM*); -MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM*); +MacroAssemblerCodeRef linkConstructThunkGenerator(VM*); + +MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM*); -MacroAssemblerCodeRef virtualThunkFor(VM*, CallLinkInfo&); +MacroAssemblerCodeRef virtualCallThunkGenerator(VM*); +MacroAssemblerCodeRef virtualConstructThunkGenerator(VM*); MacroAssemblerCodeRef nativeCallGenerator(VM*); MacroAssemblerCodeRef nativeConstructGenerator(VM*); -MacroAssemblerCodeRef nativeTailCallGenerator(VM*); -MacroAssemblerCodeRef arityFixupGenerator(VM*); - -MacroAssemblerCodeRef baselineGetterReturnThunkGenerator(VM* vm); -MacroAssemblerCodeRef baselineSetterReturnThunkGenerator(VM* vm); +MacroAssemblerCodeRef arityFixup(VM*); MacroAssemblerCodeRef charCodeAtThunkGenerator(VM*); MacroAssemblerCodeRef charAtThunkGenerator(VM*); -MacroAssemblerCodeRef clz32ThunkGenerator(VM*); MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM*); MacroAssemblerCodeRef absThunkGenerator(VM*); MacroAssemblerCodeRef ceilThunkGenerator(VM*); @@ -63,6 +57,8 @@ MacroAssemblerCodeRef roundThunkGenerator(VM*); MacroAssemblerCodeRef sqrtThunkGenerator(VM*); MacroAssemblerCodeRef powThunkGenerator(VM*); MacroAssemblerCodeRef imulThunkGenerator(VM*); +MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM*); +MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM*); } #endif // ENABLE(JIT) |
