From 6882a04fb36642862b11efe514251d32070c3d65 Mon Sep 17 00:00:00 2001 From: Konstantin Tokarev Date: Thu, 25 Aug 2016 19:20:41 +0300 Subject: Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443) Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f Reviewed-by: Konstantin Tokarev --- Source/JavaScriptCore/jit/AssemblyHelpers.cpp | 544 +++ Source/JavaScriptCore/jit/AssemblyHelpers.h | 1394 ++++++++ Source/JavaScriptCore/jit/BinarySwitch.cpp | 391 +++ Source/JavaScriptCore/jit/BinarySwitch.h | 147 + Source/JavaScriptCore/jit/CCallHelpers.h | 2215 ++++++++++++ Source/JavaScriptCore/jit/CachedRecovery.cpp | 71 + Source/JavaScriptCore/jit/CachedRecovery.h | 137 + Source/JavaScriptCore/jit/CallFrameShuffleData.cpp | 68 + Source/JavaScriptCore/jit/CallFrameShuffleData.h | 54 + Source/JavaScriptCore/jit/CallFrameShuffler.cpp | 774 +++++ Source/JavaScriptCore/jit/CallFrameShuffler.h | 804 +++++ .../JavaScriptCore/jit/CallFrameShuffler32_64.cpp | 305 ++ Source/JavaScriptCore/jit/CallFrameShuffler64.cpp | 369 ++ .../JavaScriptCore/jit/ClosureCallStubRoutine.cpp | 63 - Source/JavaScriptCore/jit/ClosureCallStubRoutine.h | 66 - Source/JavaScriptCore/jit/CompactJITCodeMap.h | 38 +- .../jit/ExecutableAllocationFuzz.cpp | 73 + .../JavaScriptCore/jit/ExecutableAllocationFuzz.h | 51 + Source/JavaScriptCore/jit/ExecutableAllocator.cpp | 45 +- Source/JavaScriptCore/jit/ExecutableAllocator.h | 72 +- .../jit/ExecutableAllocatorFixedVMPool.cpp | 84 +- Source/JavaScriptCore/jit/FPRInfo.h | 431 +++ .../JavaScriptCore/jit/GCAwareJITStubRoutine.cpp | 61 +- Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h | 41 +- Source/JavaScriptCore/jit/GPRInfo.cpp | 42 + Source/JavaScriptCore/jit/GPRInfo.h | 918 +++++ Source/JavaScriptCore/jit/HostCallReturnValue.cpp | 1 + Source/JavaScriptCore/jit/HostCallReturnValue.h | 7 +- Source/JavaScriptCore/jit/IntrinsicEmitter.cpp | 134 + Source/JavaScriptCore/jit/JIT.cpp | 705 ++-- Source/JavaScriptCore/jit/JIT.h | 761 ++--- Source/JavaScriptCore/jit/JITAddGenerator.cpp | 136 + Source/JavaScriptCore/jit/JITAddGenerator.h | 80 + Source/JavaScriptCore/jit/JITArithmetic.cpp | 1256 +++---- Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 918 +---- Source/JavaScriptCore/jit/JITBitAndGenerator.cpp | 85 + Source/JavaScriptCore/jit/JITBitAndGenerator.h | 49 + .../JavaScriptCore/jit/JITBitBinaryOpGenerator.h | 71 + Source/JavaScriptCore/jit/JITBitOrGenerator.cpp | 74 + Source/JavaScriptCore/jit/JITBitOrGenerator.h | 49 + Source/JavaScriptCore/jit/JITBitXorGenerator.cpp | 73 + Source/JavaScriptCore/jit/JITBitXorGenerator.h | 49 + Source/JavaScriptCore/jit/JITCall.cpp | 361 +- Source/JavaScriptCore/jit/JITCall32_64.cpp | 339 +- Source/JavaScriptCore/jit/JITCode.cpp | 220 +- Source/JavaScriptCore/jit/JITCode.h | 333 +- Source/JavaScriptCore/jit/JITCompilationEffort.h | 2 +- Source/JavaScriptCore/jit/JITDisassembler.cpp | 6 +- Source/JavaScriptCore/jit/JITDisassembler.h | 10 +- Source/JavaScriptCore/jit/JITDivGenerator.cpp | 116 + Source/JavaScriptCore/jit/JITDivGenerator.h | 85 + Source/JavaScriptCore/jit/JITDriver.h | 123 - Source/JavaScriptCore/jit/JITExceptions.cpp | 57 +- Source/JavaScriptCore/jit/JITExceptions.h | 18 +- .../JavaScriptCore/jit/JITInlineCacheGenerator.cpp | 175 + .../JavaScriptCore/jit/JITInlineCacheGenerator.h | 126 + Source/JavaScriptCore/jit/JITInlines.h | 1112 ++++-- .../JavaScriptCore/jit/JITLeftShiftGenerator.cpp | 84 + Source/JavaScriptCore/jit/JITLeftShiftGenerator.h | 49 + Source/JavaScriptCore/jit/JITMulGenerator.cpp | 198 ++ Source/JavaScriptCore/jit/JITMulGenerator.h | 83 + Source/JavaScriptCore/jit/JITNegGenerator.cpp | 72 + Source/JavaScriptCore/jit/JITNegGenerator.h | 64 + Source/JavaScriptCore/jit/JITOpcodes.cpp | 1768 ++++------ Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 1238 ++++--- Source/JavaScriptCore/jit/JITOperations.cpp | 2237 ++++++++++++ Source/JavaScriptCore/jit/JITOperations.h | 393 +++ Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp | 46 + Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 1964 +++++------ .../JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 1553 ++++----- .../JavaScriptCore/jit/JITRightShiftGenerator.cpp | 140 + Source/JavaScriptCore/jit/JITRightShiftGenerator.h | 63 + Source/JavaScriptCore/jit/JITStubCall.h | 303 -- Source/JavaScriptCore/jit/JITStubRoutine.cpp | 9 +- Source/JavaScriptCore/jit/JITStubRoutine.h | 21 +- Source/JavaScriptCore/jit/JITStubs.cpp | 3576 -------------------- Source/JavaScriptCore/jit/JITStubs.h | 460 --- Source/JavaScriptCore/jit/JITStubsMSVC64.asm | 66 +- Source/JavaScriptCore/jit/JITSubGenerator.cpp | 91 + Source/JavaScriptCore/jit/JITSubGenerator.h | 78 + Source/JavaScriptCore/jit/JITThunks.cpp | 67 +- Source/JavaScriptCore/jit/JITThunks.h | 24 +- .../jit/JITToDFGDeferredCompilationCallback.cpp | 76 + .../jit/JITToDFGDeferredCompilationCallback.h | 56 + Source/JavaScriptCore/jit/JITWriteBarrier.h | 10 +- Source/JavaScriptCore/jit/JSInterfaceJIT.h | 263 +- .../jit/JumpReplacementWatchpoint.cpp | 59 - .../JavaScriptCore/jit/JumpReplacementWatchpoint.h | 80 - Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp | 301 ++ Source/JavaScriptCore/jit/PCToCodeOriginMap.h | 104 + .../jit/PolymorphicCallStubRoutine.cpp | 137 + .../jit/PolymorphicCallStubRoutine.h | 115 + Source/JavaScriptCore/jit/Reg.cpp | 49 + Source/JavaScriptCore/jit/Reg.h | 250 ++ Source/JavaScriptCore/jit/RegisterAtOffset.cpp | 45 + Source/JavaScriptCore/jit/RegisterAtOffset.h | 81 + Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp | 80 + Source/JavaScriptCore/jit/RegisterAtOffsetList.h | 82 + Source/JavaScriptCore/jit/RegisterMap.h | 113 + Source/JavaScriptCore/jit/RegisterSet.cpp | 404 +++ Source/JavaScriptCore/jit/RegisterSet.h | 175 + Source/JavaScriptCore/jit/Repatch.cpp | 939 +++++ Source/JavaScriptCore/jit/Repatch.h | 56 + .../jit/ScratchRegisterAllocator.cpp | 302 ++ .../JavaScriptCore/jit/ScratchRegisterAllocator.h | 112 + Source/JavaScriptCore/jit/SetupVarargsFrame.cpp | 143 + Source/JavaScriptCore/jit/SetupVarargsFrame.h | 53 + Source/JavaScriptCore/jit/SlowPathCall.h | 94 + Source/JavaScriptCore/jit/SnippetOperand.h | 101 + Source/JavaScriptCore/jit/SpecializedThunkJIT.h | 90 +- Source/JavaScriptCore/jit/SpillRegistersMode.h | 35 + Source/JavaScriptCore/jit/TempRegisterSet.cpp | 54 + Source/JavaScriptCore/jit/TempRegisterSet.h | 223 ++ Source/JavaScriptCore/jit/ThunkGenerator.h | 2 - Source/JavaScriptCore/jit/ThunkGenerators.cpp | 856 +++-- Source/JavaScriptCore/jit/ThunkGenerators.h | 23 +- 116 files changed, 24617 insertions(+), 12382 deletions(-) create mode 100644 Source/JavaScriptCore/jit/AssemblyHelpers.cpp create mode 100644 Source/JavaScriptCore/jit/AssemblyHelpers.h create mode 100644 Source/JavaScriptCore/jit/BinarySwitch.cpp create mode 100644 Source/JavaScriptCore/jit/BinarySwitch.h create mode 100644 Source/JavaScriptCore/jit/CCallHelpers.h create mode 100644 Source/JavaScriptCore/jit/CachedRecovery.cpp create mode 100644 Source/JavaScriptCore/jit/CachedRecovery.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffleData.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffleData.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler64.cpp delete mode 100644 Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp delete mode 100644 Source/JavaScriptCore/jit/ClosureCallStubRoutine.h create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h create mode 100644 Source/JavaScriptCore/jit/FPRInfo.h create mode 100644 Source/JavaScriptCore/jit/GPRInfo.cpp create mode 100644 Source/JavaScriptCore/jit/GPRInfo.h create mode 100644 Source/JavaScriptCore/jit/IntrinsicEmitter.cpp create mode 100644 Source/JavaScriptCore/jit/JITAddGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITAddGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitAndGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitAndGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitOrGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitOrGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitXorGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitXorGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITDivGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITDivGenerator.h delete mode 100644 Source/JavaScriptCore/jit/JITDriver.h create mode 100644 Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITInlineCacheGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITLeftShiftGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITMulGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITMulGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITNegGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITNegGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITOperations.cpp create mode 100644 Source/JavaScriptCore/jit/JITOperations.h create mode 100644 Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp create mode 100644 Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITRightShiftGenerator.h delete mode 100644 Source/JavaScriptCore/jit/JITStubCall.h delete mode 100644 Source/JavaScriptCore/jit/JITStubs.cpp delete mode 100644 Source/JavaScriptCore/jit/JITStubs.h create mode 100644 Source/JavaScriptCore/jit/JITSubGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITSubGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp create mode 100644 Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h delete mode 100644 Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp delete mode 100644 Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h create mode 100644 Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp create mode 100644 Source/JavaScriptCore/jit/PCToCodeOriginMap.h create mode 100644 Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp create mode 100644 Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h create mode 100644 Source/JavaScriptCore/jit/Reg.cpp create mode 100644 Source/JavaScriptCore/jit/Reg.h create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffset.cpp create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffset.h create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffsetList.h create mode 100644 Source/JavaScriptCore/jit/RegisterMap.h create mode 100644 Source/JavaScriptCore/jit/RegisterSet.cpp create mode 100644 Source/JavaScriptCore/jit/RegisterSet.h create mode 100644 Source/JavaScriptCore/jit/Repatch.cpp create mode 100644 Source/JavaScriptCore/jit/Repatch.h create mode 100644 Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp create mode 100644 Source/JavaScriptCore/jit/ScratchRegisterAllocator.h create mode 100644 Source/JavaScriptCore/jit/SetupVarargsFrame.cpp create mode 100644 Source/JavaScriptCore/jit/SetupVarargsFrame.h create mode 100644 Source/JavaScriptCore/jit/SlowPathCall.h create mode 100644 Source/JavaScriptCore/jit/SnippetOperand.h create mode 100644 Source/JavaScriptCore/jit/SpillRegistersMode.h create mode 100644 Source/JavaScriptCore/jit/TempRegisterSet.cpp create mode 100644 Source/JavaScriptCore/jit/TempRegisterSet.h (limited to 'Source/JavaScriptCore/jit') diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp new file mode 100644 index 000000000..c1be5932c --- /dev/null +++ b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp @@ -0,0 +1,544 @@ +/* + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "AssemblyHelpers.h" + +#if ENABLE(JIT) + +#include "JITOperations.h" +#include "JSCInlines.h" + +namespace JSC { + +ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin) +{ + if (!codeOrigin.inlineCallFrame) + return m_codeBlock->ownerExecutable(); + + return codeOrigin.inlineCallFrame->baselineCodeBlock->ownerExecutable(); +} + +Vector& AssemblyHelpers::decodedCodeMapFor(CodeBlock* codeBlock) +{ + ASSERT(codeBlock == codeBlock->baselineVersion()); + ASSERT(codeBlock->jitType() == JITCode::BaselineJIT); + ASSERT(codeBlock->jitCodeMap()); + + HashMap>::AddResult result = m_decodedCodeMaps.add(codeBlock, Vector()); + + if (result.isNewEntry) + codeBlock->jitCodeMap()->decode(result.iterator->value); + + return result.iterator->value; +} + +AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType( + JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode) +{ + AssemblyHelpers::JumpList result; + + switch (descriptor.kind()) { + case InferredType::Bottom: + result.append(jump()); + break; + + case InferredType::Boolean: + result.append(branchIfNotBoolean(regs, tempGPR)); + break; + + case InferredType::Other: + result.append(branchIfNotOther(regs, tempGPR)); + break; + + case InferredType::Int32: + result.append(branchIfNotInt32(regs, mode)); + break; + + case InferredType::Number: + result.append(branchIfNotNumber(regs, tempGPR, mode)); + break; + + case InferredType::String: + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotString(regs.payloadGPR())); + break; + + case InferredType::Symbol: + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotSymbol(regs.payloadGPR())); + break; + + case InferredType::ObjectWithStructure: + result.append(branchIfNotCell(regs, mode)); + result.append( + branchStructure( + NotEqual, + Address(regs.payloadGPR(), JSCell::structureIDOffset()), + descriptor.structure())); + break; + + case InferredType::ObjectWithStructureOrOther: { + Jump ok = branchIfOther(regs, tempGPR); + result.append(branchIfNotCell(regs, mode)); + result.append( + branchStructure( + NotEqual, + Address(regs.payloadGPR(), JSCell::structureIDOffset()), + descriptor.structure())); + ok.link(this); + break; + } + + case InferredType::Object: + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotObject(regs.payloadGPR())); + break; + + case InferredType::ObjectOrOther: { + Jump ok = branchIfOther(regs, tempGPR); + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotObject(regs.payloadGPR())); + ok.link(this); + break; + } + + case InferredType::Top: + break; + } + + return result; +} + +AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR) +{ + return branch32( + Equal, + Address(baseGPR, JSArrayBufferView::offsetOfMode()), + TrustedImm32(FastTypedArray)); +} + +AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR) +{ + return branch32( + NotEqual, + Address(baseGPR, JSArrayBufferView::offsetOfMode()), + TrustedImm32(FastTypedArray)); +} + +AssemblyHelpers::Jump AssemblyHelpers::loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR) +{ + RELEASE_ASSERT(baseGPR != resultGPR); + + loadPtr(Address(baseGPR, JSArrayBufferView::offsetOfVector()), resultGPR); + Jump ok = branchIfToSpace(resultGPR); + Jump result = branchIfFastTypedArray(baseGPR); + ok.link(this); + return result; +} + +void AssemblyHelpers::purifyNaN(FPRReg fpr) +{ + MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr); + static const double NaN = PNaN; + loadDouble(TrustedImmPtr(&NaN), fpr); + notNaN.link(this); +} + +#if ENABLE(SAMPLING_FLAGS) +void AssemblyHelpers::setSamplingFlag(int32_t flag) +{ + ASSERT(flag >= 1); + ASSERT(flag <= 32); + or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags())); +} + +void AssemblyHelpers::clearSamplingFlag(int32_t flag) +{ + ASSERT(flag >= 1); + ASSERT(flag <= 32); + and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags())); +} +#endif + +#if !ASSERT_DISABLED +#if USE(JSVALUE64) +void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) +{ +#if CPU(X86_64) + Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast(0xFFFFFFFFu))); + abortWithReason(AHIsNotInt32); + checkInt32.link(this); +#else + UNUSED_PARAM(gpr); +#endif +} + +void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr) +{ + Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister); + abortWithReason(AHIsNotJSInt32); + checkJSInt32.link(this); +} + +void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) +{ + Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister); + abortWithReason(AHIsNotJSNumber); + checkJSNumber.link(this); +} + +void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr) +{ + Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister); + Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister); + checkJSInt32.link(this); + abortWithReason(AHIsNotJSDouble); + checkJSNumber.link(this); +} + +void AssemblyHelpers::jitAssertIsCell(GPRReg gpr) +{ + Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister); + abortWithReason(AHIsNotCell); + checkCell.link(this); +} + +void AssemblyHelpers::jitAssertTagsInPlace() +{ + Jump ok = branch64(Equal, GPRInfo::tagTypeNumberRegister, TrustedImm64(TagTypeNumber)); + abortWithReason(AHTagTypeNumberNotInPlace); + breakpoint(); + ok.link(this); + + ok = branch64(Equal, GPRInfo::tagMaskRegister, TrustedImm64(TagMask)); + abortWithReason(AHTagMaskNotInPlace); + ok.link(this); +} +#elif USE(JSVALUE32_64) +void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) +{ + UNUSED_PARAM(gpr); +} + +void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr) +{ + Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag)); + abortWithReason(AHIsNotJSInt32); + checkJSInt32.link(this); +} + +void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) +{ + Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag)); + Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag)); + abortWithReason(AHIsNotJSNumber); + checkJSInt32.link(this); + checkJSDouble.link(this); +} + +void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr) +{ + Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag)); + abortWithReason(AHIsNotJSDouble); + checkJSDouble.link(this); +} + +void AssemblyHelpers::jitAssertIsCell(GPRReg gpr) +{ + Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag)); + abortWithReason(AHIsNotCell); + checkCell.link(this); +} + +void AssemblyHelpers::jitAssertTagsInPlace() +{ +} +#endif // USE(JSVALUE32_64) + +void AssemblyHelpers::jitAssertHasValidCallFrame() +{ + Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7)); + abortWithReason(AHCallFrameMisaligned); + checkCFR.link(this); +} + +void AssemblyHelpers::jitAssertIsNull(GPRReg gpr) +{ + Jump checkNull = branchTestPtr(Zero, gpr); + abortWithReason(AHIsNotNull); + checkNull.link(this); +} + +void AssemblyHelpers::jitAssertArgumentCountSane() +{ + Jump ok = branch32(Below, payloadFor(JSStack::ArgumentCount), TrustedImm32(10000000)); + abortWithReason(AHInsaneArgumentCount); + ok.link(this); +} + +#endif // !ASSERT_DISABLED + +void AssemblyHelpers::jitReleaseAssertNoException() +{ + Jump noException; +#if USE(JSVALUE64) + noException = branchTest64(Zero, AbsoluteAddress(vm()->addressOfException())); +#elif USE(JSVALUE32_64) + noException = branch32(Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0)); +#endif + abortWithReason(JITUncoughtExceptionAfterCall); + noException.link(this); +} + +void AssemblyHelpers::callExceptionFuzz() +{ + if (!Options::useExceptionFuzz()) + return; + + EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)); + + for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { +#if USE(JSVALUE64) + store64(GPRInfo::toRegister(i), buffer + i); +#else + store32(GPRInfo::toRegister(i), buffer + i); +#endif + } + for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { + move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); + storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0)); + } + + // Set up one argument. +#if CPU(X86) + poke(GPRInfo::callFrameRegister, 0); +#else + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); +#endif + move(TrustedImmPtr(bitwise_cast(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR); + call(GPRInfo::nonPreservedNonReturnGPR); + + for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { + move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); + loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i)); + } + for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { +#if USE(JSVALUE64) + load64(buffer + i, GPRInfo::toRegister(i)); +#else + load32(buffer + i, GPRInfo::toRegister(i)); +#endif + } +} + +AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind, ExceptionJumpWidth width) +{ + callExceptionFuzz(); + + if (width == FarJumpWidth) + kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck); + + Jump result; +#if USE(JSVALUE64) + result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException())); +#elif USE(JSVALUE32_64) + result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0)); +#endif + + if (width == NormalJumpWidth) + return result; + + PatchableJump realJump = patchableJump(); + result.link(this); + + return realJump.m_jump; +} + +AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck() +{ + callExceptionFuzz(); + + Jump result; +#if USE(JSVALUE64) + result = branchTest64(NonZero, AbsoluteAddress(vm()->addressOfException())); +#elif USE(JSVALUE32_64) + result = branch32(NotEqual, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0)); +#endif + + return result; +} + +void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest) +{ + const Structure* structurePtr = static_cast(structure.m_value); +#if USE(JSVALUE64) + jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset())); + if (!ASSERT_DISABLED) { + Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id())); + jit.abortWithReason(AHStructureIDIsValid); + correctStructure.link(&jit); + + Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()), TrustedImm32(structurePtr->indexingType())); + jit.abortWithReason(AHIndexingTypeIsValid); + correctIndexingType.link(&jit); + + Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type())); + jit.abortWithReason(AHTypeInfoIsValid); + correctType.link(&jit); + + Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags())); + jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid); + correctFlags.link(&jit); + } +#else + // Do a 32-bit wide store to initialize the cell's fields. + jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeOffset())); + jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset())); +#endif +} + +#if USE(JSVALUE64) +template +void emitRandomThunkImpl(AssemblyHelpers& jit, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result, const LoadFromHigh& loadFromHigh, const StoreToHigh& storeToHigh, const LoadFromLow& loadFromLow, const StoreToLow& storeToLow) +{ + // Inlined WeakRandom::advance(). + // uint64_t x = m_low; + loadFromLow(scratch0); + // uint64_t y = m_high; + loadFromHigh(scratch1); + // m_low = y; + storeToLow(scratch1); + + // x ^= x << 23; + jit.move(scratch0, scratch2); + jit.lshift64(AssemblyHelpers::TrustedImm32(23), scratch2); + jit.xor64(scratch2, scratch0); + + // x ^= x >> 17; + jit.move(scratch0, scratch2); + jit.rshift64(AssemblyHelpers::TrustedImm32(17), scratch2); + jit.xor64(scratch2, scratch0); + + // x ^= y ^ (y >> 26); + jit.move(scratch1, scratch2); + jit.rshift64(AssemblyHelpers::TrustedImm32(26), scratch2); + jit.xor64(scratch1, scratch2); + jit.xor64(scratch2, scratch0); + + // m_high = x; + storeToHigh(scratch0); + + // return x + y; + jit.add64(scratch1, scratch0); + + // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation. + jit.move(AssemblyHelpers::TrustedImm64((1ULL << 53) - 1), scratch1); + jit.and64(scratch1, scratch0); + // Now, scratch0 is always in range of int64_t. Safe to convert it to double with cvtsi2sdq. + jit.convertInt64ToDouble(scratch0, result); + + // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`. + // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)). + static const double scale = 1.0 / (1ULL << 53); + + // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer. + // It just reduces the exp part of the given 53bit double integer. + // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.) + // Now we get 53bit precision random double value in [0, 1). + jit.move(AssemblyHelpers::TrustedImmPtr(&scale), scratch1); + jit.mulDouble(AssemblyHelpers::Address(scratch1), result); +} + +void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result) +{ + void* lowAddress = reinterpret_cast(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset(); + void* highAddress = reinterpret_cast(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset(); + + auto loadFromHigh = [&](GPRReg high) { + load64(highAddress, high); + }; + auto storeToHigh = [&](GPRReg high) { + store64(high, highAddress); + }; + auto loadFromLow = [&](GPRReg low) { + load64(lowAddress, low); + }; + auto storeToLow = [&](GPRReg low) { + store64(low, lowAddress); + }; + + emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow); +} + +void AssemblyHelpers::emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result) +{ + emitGetFromCallFrameHeaderPtr(JSStack::Callee, scratch3); + emitLoadStructure(scratch3, scratch3, scratch0); + loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3); + // Now, scratch3 holds JSGlobalObject*. + + auto loadFromHigh = [&](GPRReg high) { + load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()), high); + }; + auto storeToHigh = [&](GPRReg high) { + store64(high, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset())); + }; + auto loadFromLow = [&](GPRReg low) { + load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()), low); + }; + auto storeToLow = [&](GPRReg low) { + store64(low, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset())); + }; + + emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow); +} +#endif + +void AssemblyHelpers::restoreCalleeSavesFromVMCalleeSavesBuffer() +{ +#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 + char* sourceBuffer = bitwise_cast(m_vm->calleeSaveRegistersBuffer); + + RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets(); + RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters(); + unsigned registerCount = allCalleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = allCalleeSaves->at(i); + if (dontRestoreRegisters.get(entry.reg())) + continue; + if (entry.reg().isGPR()) + loadPtr(static_cast(sourceBuffer + entry.offset()), entry.reg().gpr()); + else + loadDouble(TrustedImmPtr(sourceBuffer + entry.offset()), entry.reg().fpr()); + } +#endif +} + +} // namespace JSC + +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.h b/Source/JavaScriptCore/jit/AssemblyHelpers.h new file mode 100644 index 000000000..918af7dca --- /dev/null +++ b/Source/JavaScriptCore/jit/AssemblyHelpers.h @@ -0,0 +1,1394 @@ +/* + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef AssemblyHelpers_h +#define AssemblyHelpers_h + +#if ENABLE(JIT) + +#include "CodeBlock.h" +#include "CopyBarrier.h" +#include "FPRInfo.h" +#include "GPRInfo.h" +#include "InlineCallFrame.h" +#include "JITCode.h" +#include "MacroAssembler.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "RegisterAtOffsetList.h" +#include "RegisterSet.h" +#include "TypeofType.h" +#include "VM.h" + +namespace JSC { + +typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*); + +class AssemblyHelpers : public MacroAssembler { +public: + AssemblyHelpers(VM* vm, CodeBlock* codeBlock) + : m_vm(vm) + , m_codeBlock(codeBlock) + , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0) + { + if (m_codeBlock) { + ASSERT(m_baselineCodeBlock); + ASSERT(!m_baselineCodeBlock->alternative()); + ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType())); + } + } + + CodeBlock* codeBlock() { return m_codeBlock; } + VM* vm() { return m_vm; } + AssemblerType_T& assembler() { return m_assembler; } + + void checkStackPointerAlignment() + { + // This check is both unneeded and harder to write correctly for ARM64 +#if !defined(NDEBUG) && !CPU(ARM64) + Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf)); + abortWithReason(AHStackPointerMisaligned); + stackPointerAligned.link(this); +#endif + } + + template + void storeCell(T cell, Address address) + { +#if USE(JSVALUE64) + store64(cell, address); +#else + store32(cell, address.withOffset(PayloadOffset)); + store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset)); +#endif + } + + void storeValue(JSValueRegs regs, Address address) + { +#if USE(JSVALUE64) + store64(regs.gpr(), address); +#else + store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); + store32(regs.tagGPR(), address.withOffset(TagOffset)); +#endif + } + + void storeValue(JSValueRegs regs, BaseIndex address) + { +#if USE(JSVALUE64) + store64(regs.gpr(), address); +#else + store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); + store32(regs.tagGPR(), address.withOffset(TagOffset)); +#endif + } + + void storeValue(JSValueRegs regs, void* address) + { +#if USE(JSVALUE64) + store64(regs.gpr(), address); +#else + store32(regs.payloadGPR(), bitwise_cast(bitwise_cast(address) + PayloadOffset)); + store32(regs.tagGPR(), bitwise_cast(bitwise_cast(address) + TagOffset)); +#endif + } + + void loadValue(Address address, JSValueRegs regs) + { +#if USE(JSVALUE64) + load64(address, regs.gpr()); +#else + if (address.base == regs.payloadGPR()) { + load32(address.withOffset(TagOffset), regs.tagGPR()); + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + } else { + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + load32(address.withOffset(TagOffset), regs.tagGPR()); + } +#endif + } + + void loadValue(BaseIndex address, JSValueRegs regs) + { +#if USE(JSVALUE64) + load64(address, regs.gpr()); +#else + if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) { + // We actually could handle the case where the registers are aliased to both + // tag and payload, but we don't for now. + RELEASE_ASSERT(address.base != regs.tagGPR()); + RELEASE_ASSERT(address.index != regs.tagGPR()); + + load32(address.withOffset(TagOffset), regs.tagGPR()); + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + } else { + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + load32(address.withOffset(TagOffset), regs.tagGPR()); + } +#endif + } + + void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs) + { +#if USE(JSVALUE32_64) + move(srcRegs.tagGPR(), destRegs.tagGPR()); +#endif + move(srcRegs.payloadGPR(), destRegs.payloadGPR()); + } + + void moveValue(JSValue value, JSValueRegs regs) + { +#if USE(JSVALUE64) + move(Imm64(JSValue::encode(value)), regs.gpr()); +#else + move(Imm32(value.tag()), regs.tagGPR()); + move(Imm32(value.payload()), regs.payloadGPR()); +#endif + } + + void moveTrustedValue(JSValue value, JSValueRegs regs) + { +#if USE(JSVALUE64) + move(TrustedImm64(JSValue::encode(value)), regs.gpr()); +#else + move(TrustedImm32(value.tag()), regs.tagGPR()); + move(TrustedImm32(value.payload()), regs.payloadGPR()); +#endif + } + + void storeTrustedValue(JSValue value, Address address) + { +#if USE(JSVALUE64) + store64(TrustedImm64(JSValue::encode(value)), address); +#else + store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); + store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); +#endif + } + + void storeTrustedValue(JSValue value, BaseIndex address) + { +#if USE(JSVALUE64) + store64(TrustedImm64(JSValue::encode(value)), address); +#else + store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); + store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); +#endif + } + + void emitSaveCalleeSavesFor(CodeBlock* codeBlock) + { + ASSERT(codeBlock); + + RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); + RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); + unsigned registerCount = calleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = calleeSaves->at(i); + if (dontSaveRegisters.get(entry.reg())) + continue; + storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset())); + } + } + + enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame }; + + void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp) + { + ASSERT(codeBlock); + + RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); + RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); + unsigned registerCount = calleeSaves->size(); + +#if USE(JSVALUE64) + RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters(); +#endif + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = calleeSaves->at(i); + if (dontSaveRegisters.get(entry.reg())) + continue; + + GPRReg registerToWrite; + +#if USE(JSVALUE32_64) + UNUSED_PARAM(tagRegisterMode); + UNUSED_PARAM(temp); +#else + if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) { + registerToWrite = temp; + loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite); + } else +#endif + registerToWrite = entry.reg().gpr(); + + storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset())); + } + } + + void emitRestoreCalleeSavesFor(CodeBlock* codeBlock) + { + ASSERT(codeBlock); + + RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); + RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); + unsigned registerCount = calleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = calleeSaves->at(i); + if (dontRestoreRegisters.get(entry.reg())) + continue; + loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr()); + } + } + + void emitSaveCalleeSaves() + { + emitSaveCalleeSavesFor(codeBlock()); + } + + void emitRestoreCalleeSaves() + { + emitRestoreCalleeSavesFor(codeBlock()); + } + + void copyCalleeSavesToVMCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() }) + { +#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 + GPRReg temp1 = usedRegisters.getFreeGPR(0); + + move(TrustedImmPtr(m_vm->calleeSaveRegistersBuffer), temp1); + + RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets(); + RegisterSet dontCopyRegisters = RegisterSet::stackRegisters(); + unsigned registerCount = allCalleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = allCalleeSaves->at(i); + if (dontCopyRegisters.get(entry.reg())) + continue; + if (entry.reg().isGPR()) + storePtr(entry.reg().gpr(), Address(temp1, entry.offset())); + else + storeDouble(entry.reg().fpr(), Address(temp1, entry.offset())); + } +#else + UNUSED_PARAM(usedRegisters); +#endif + } + + void restoreCalleeSavesFromVMCalleeSavesBuffer(); + + void copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() }) + { +#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 + GPRReg temp1 = usedRegisters.getFreeGPR(0); + GPRReg temp2 = usedRegisters.getFreeGPR(1); + FPRReg fpTemp = usedRegisters.getFreeFPR(); + ASSERT(temp2 != InvalidGPRReg); + + ASSERT(codeBlock()); + + // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer + move(TrustedImmPtr(m_vm->calleeSaveRegistersBuffer), temp1); + + RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets(); + RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters(); + RegisterSet dontCopyRegisters = RegisterSet::stackRegisters(); + unsigned registerCount = allCalleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset vmEntry = allCalleeSaves->at(i); + if (dontCopyRegisters.get(vmEntry.reg())) + continue; + RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(vmEntry.reg()); + + if (vmEntry.reg().isGPR()) { + GPRReg regToStore; + if (currentFrameEntry) { + // Load calleeSave from stack into temp register + regToStore = temp2; + loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore); + } else + // Just store callee save directly + regToStore = vmEntry.reg().gpr(); + + storePtr(regToStore, Address(temp1, vmEntry.offset())); + } else { + FPRReg fpRegToStore; + if (currentFrameEntry) { + // Load calleeSave from stack into temp register + fpRegToStore = fpTemp; + loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore); + } else + // Just store callee save directly + fpRegToStore = vmEntry.reg().fpr(); + + storeDouble(fpRegToStore, Address(temp1, vmEntry.offset())); + } + } +#else + UNUSED_PARAM(usedRegisters); +#endif + } + + void emitMaterializeTagCheckRegisters() + { +#if USE(JSVALUE64) + move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); + orPtr(MacroAssembler::TrustedImm32(TagBitTypeOther), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister); +#endif + } + +#if CPU(X86_64) || CPU(X86) + static size_t prologueStackPointerDelta() + { + // Prologue only saves the framePointerRegister + return sizeof(void*); + } + + void emitFunctionPrologue() + { + push(framePointerRegister); + move(stackPointerRegister, framePointerRegister); + } + + void emitFunctionEpilogueWithEmptyFrame() + { + pop(framePointerRegister); + } + + void emitFunctionEpilogue() + { + move(framePointerRegister, stackPointerRegister); + pop(framePointerRegister); + } + + void preserveReturnAddressAfterCall(GPRReg reg) + { + pop(reg); + } + + void restoreReturnAddressBeforeReturn(GPRReg reg) + { + push(reg); + } + + void restoreReturnAddressBeforeReturn(Address address) + { + push(address); + } +#endif // CPU(X86_64) || CPU(X86) + +#if CPU(ARM) || CPU(ARM64) + static size_t prologueStackPointerDelta() + { + // Prologue saves the framePointerRegister and linkRegister + return 2 * sizeof(void*); + } + + void emitFunctionPrologue() + { + pushPair(framePointerRegister, linkRegister); + move(stackPointerRegister, framePointerRegister); + } + + void emitFunctionEpilogueWithEmptyFrame() + { + popPair(framePointerRegister, linkRegister); + } + + void emitFunctionEpilogue() + { + move(framePointerRegister, stackPointerRegister); + emitFunctionEpilogueWithEmptyFrame(); + } + + ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) + { + move(linkRegister, reg); + } + + ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg) + { + move(reg, linkRegister); + } + + ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) + { + loadPtr(address, linkRegister); + } +#endif + +#if CPU(MIPS) + static size_t prologueStackPointerDelta() + { + // Prologue saves the framePointerRegister and returnAddressRegister + return 2 * sizeof(void*); + } + + void emitFunctionPrologue() + { + pushPair(framePointerRegister, returnAddressRegister); + move(stackPointerRegister, framePointerRegister); + } + + void emitFunctionEpilogueWithEmptyFrame() + { + popPair(framePointerRegister, returnAddressRegister); + } + + void emitFunctionEpilogue() + { + move(framePointerRegister, stackPointerRegister); + emitFunctionEpilogueWithEmptyFrame(); + } + + ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) + { + move(returnAddressRegister, reg); + } + + ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg) + { + move(reg, returnAddressRegister); + } + + ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) + { + loadPtr(address, returnAddressRegister); + } +#endif + +#if CPU(SH4) + static size_t prologueStackPointerDelta() + { + // Prologue saves the framePointerRegister and link register + return 2 * sizeof(void*); + } + + void emitFunctionPrologue() + { + push(linkRegister); + push(framePointerRegister); + move(stackPointerRegister, framePointerRegister); + } + + void emitFunctionEpilogue() + { + move(framePointerRegister, stackPointerRegister); + pop(framePointerRegister); + pop(linkRegister); + } + + ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) + { + m_assembler.stspr(reg); + } + + ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg) + { + m_assembler.ldspr(reg); + } + + ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) + { + loadPtrLinkReg(address); + } +#endif + + void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) + { + loadPtr(Address(from, entry * sizeof(Register)), to); + } + void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) + { + load32(Address(from, entry * sizeof(Register)), to); + } +#if USE(JSVALUE64) + void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) + { + load64(Address(from, entry * sizeof(Register)), to); + } +#endif // USE(JSVALUE64) + void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry) + { + storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); + } + + void emitPutToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry) + { + storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); + } + + void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to) + { + loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to); + } + void emitPutCallerFrameToCallFrameHeader(RegisterID from) + { + storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset())); + } + + void emitPutReturnPCToCallFrameHeader(RegisterID from) + { + storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); + } + void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from) + { + storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); + } + + // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header + // fields before the code from emitFunctionPrologue() has executed. + // First, the access is via the stack pointer. Second, the address calculation must also take + // into account that the stack pointer may not have been adjusted down for the return PC and/or + // caller's frame pointer. On some platforms, the callee is responsible for pushing the + // "link register" containing the return address in the function prologue. +#if USE(JSVALUE64) + void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry) + { + storePtr(from, Address(stackPointerRegister, entry * static_cast(sizeof(Register)) - prologueStackPointerDelta())); + } +#else + void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry) + { + storePtr(from, Address(stackPointerRegister, entry * static_cast(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + } + + void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, JSStack::CallFrameHeaderEntry entry) + { + storePtr(tag, Address(stackPointerRegister, entry * static_cast(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + } +#endif + + JumpList branchIfNotEqual(JSValueRegs regs, JSValue value) + { +#if USE(JSVALUE64) + return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value))); +#else + JumpList result; + result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag()))); + if (value.isEmpty() || value.isUndefinedOrNull()) + return result; // These don't have anything interesting in the payload. + result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()))); + return result; +#endif + } + + Jump branchIfEqual(JSValueRegs regs, JSValue value) + { +#if USE(JSVALUE64) + return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value))); +#else + Jump notEqual; + // These don't have anything interesting in the payload. + if (!value.isEmpty() && !value.isUndefinedOrNull()) + notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())); + Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag())); + if (notEqual.isSet()) + notEqual.link(this); + return result; +#endif + } + + enum TagRegistersMode { + DoNotHaveTagRegisters, + HaveTagRegisters + }; + + Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == HaveTagRegisters) + return branchTest64(NonZero, reg, GPRInfo::tagMaskRegister); + return branchTest64(NonZero, reg, TrustedImm64(TagMask)); +#else + UNUSED_PARAM(mode); + return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag)); +#endif + } + Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + return branchIfNotCell(regs.gpr(), mode); +#else + return branchIfNotCell(regs.tagGPR(), mode); +#endif + } + + Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == HaveTagRegisters) + return branchTest64(Zero, reg, GPRInfo::tagMaskRegister); + return branchTest64(Zero, reg, TrustedImm64(TagMask)); +#else + UNUSED_PARAM(mode); + return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag)); +#endif + } + Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + return branchIfCell(regs.gpr(), mode); +#else + return branchIfCell(regs.tagGPR(), mode); +#endif + } + + Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR) + { +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + and64(TrustedImm32(~TagBitUndefined), tempGPR); + return branch64(Equal, tempGPR, TrustedImm64(ValueNull)); +#else + or32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag)); +#endif + } + + Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR) + { +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + and64(TrustedImm32(~TagBitUndefined), tempGPR); + return branch64(NotEqual, tempGPR, TrustedImm64(ValueNull)); +#else + or32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag)); +#endif + } + + Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == HaveTagRegisters) + return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister); + return branch64(AboveOrEqual, regs.gpr(), TrustedImm64(TagTypeNumber)); +#else + UNUSED_PARAM(mode); + return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag)); +#endif + } + +#if USE(JSVALUE64) + Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters) + { + if (mode == HaveTagRegisters) + return branch64(Below, gpr, GPRInfo::tagTypeNumberRegister); + return branch64(Below, gpr, TrustedImm64(TagTypeNumber)); + } +#endif + + Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + return branchIfNotInt32(regs.gpr(), mode); +#else + UNUSED_PARAM(mode); + return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag)); +#endif + } + + // Note that the tempGPR is not used in 64-bit mode. + Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + UNUSED_PARAM(tempGPR); + if (mode == HaveTagRegisters) + return branchTest64(NonZero, regs.gpr(), GPRInfo::tagTypeNumberRegister); + return branchTest64(NonZero, regs.gpr(), TrustedImm64(TagTypeNumber)); +#else + UNUSED_PARAM(mode); + add32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); +#endif + } + + // Note that the tempGPR is not used in 64-bit mode. + Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + UNUSED_PARAM(tempGPR); + if (mode == HaveTagRegisters) + return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister); + return branchTest64(Zero, regs.gpr(), TrustedImm64(TagTypeNumber)); +#else + UNUSED_PARAM(mode); + add32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); +#endif + } + + // Note that the tempGPR is not used in 32-bit mode. + Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR) + { +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + xor64(TrustedImm32(static_cast(ValueFalse)), tempGPR); + return branchTest64(Zero, tempGPR, TrustedImm32(static_cast(~1))); +#else + UNUSED_PARAM(tempGPR); + return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag)); +#endif + } + + // Note that the tempGPR is not used in 32-bit mode. + Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR) + { +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + xor64(TrustedImm32(static_cast(ValueFalse)), tempGPR); + return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast(~1))); +#else + UNUSED_PARAM(tempGPR); + return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag)); +#endif + } + + Jump branchIfObject(GPRReg cellGPR) + { + return branch8( + AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); + } + + Jump branchIfNotObject(GPRReg cellGPR) + { + return branch8( + Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); + } + + Jump branchIfType(GPRReg cellGPR, JSType type) + { + return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); + } + + Jump branchIfNotType(GPRReg cellGPR, JSType type) + { + return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); + } + + Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); } + Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); } + Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); } + Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); } + Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); } + Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); } + + Jump branchIfEmpty(JSValueRegs regs) + { +#if USE(JSVALUE64) + return branchTest64(Zero, regs.gpr()); +#else + return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::EmptyValueTag)); +#endif + } + + JumpList branchIfNotType( + JSValueRegs, GPRReg tempGPR, const InferredType::Descriptor&, TagRegistersMode); + + template + Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure) + { +#if USE(JSVALUE64) + return branch32(condition, leftHandSide, TrustedImm32(structure->id())); +#else + return branchPtr(condition, leftHandSide, TrustedImmPtr(structure)); +#endif + } + + Jump branchIfToSpace(GPRReg storageGPR) + { + return branchTest32(Zero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits)); + } + + Jump branchIfNotToSpace(GPRReg storageGPR) + { + return branchTest32(NonZero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits)); + } + + void removeSpaceBits(GPRReg storageGPR) + { + andPtr(TrustedImmPtr(~static_cast(CopyBarrierBase::spaceBits)), storageGPR); + } + + Jump branchIfFastTypedArray(GPRReg baseGPR); + Jump branchIfNotFastTypedArray(GPRReg baseGPR); + + // Returns a jump to slow path for when we need to execute the barrier. Note that baseGPR and + // resultGPR must be different. + Jump loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR); + + static Address addressForByteOffset(ptrdiff_t byteOffset) + { + return Address(GPRInfo::callFrameRegister, byteOffset); + } + static Address addressFor(VirtualRegister virtualRegister, GPRReg baseReg) + { + ASSERT(virtualRegister.isValid()); + return Address(baseReg, virtualRegister.offset() * sizeof(Register)); + } + static Address addressFor(VirtualRegister virtualRegister) + { + // NB. It's tempting on some architectures to sometimes use an offset from the stack + // register because for some offsets that will encode to a smaller instruction. But we + // cannot do this. We use this in places where the stack pointer has been moved to some + // unpredictable location. + ASSERT(virtualRegister.isValid()); + return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register)); + } + static Address addressFor(int operand) + { + return addressFor(static_cast(operand)); + } + + static Address tagFor(VirtualRegister virtualRegister) + { + ASSERT(virtualRegister.isValid()); + return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset); + } + static Address tagFor(int operand) + { + return tagFor(static_cast(operand)); + } + + static Address payloadFor(VirtualRegister virtualRegister) + { + ASSERT(virtualRegister.isValid()); + return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset); + } + static Address payloadFor(int operand) + { + return payloadFor(static_cast(operand)); + } + + // Access to our fixed callee CallFrame. + static Address calleeFrameSlot(int slot) + { + ASSERT(slot >= JSStack::CallerFrameAndPCSize); + return Address(stackPointerRegister, sizeof(Register) * (slot - JSStack::CallerFrameAndPCSize)); + } + + // Access to our fixed callee CallFrame. + static Address calleeArgumentSlot(int argument) + { + return calleeFrameSlot(virtualRegisterForArgument(argument).offset()); + } + + static Address calleeFrameTagSlot(int slot) + { + return calleeFrameSlot(slot).withOffset(TagOffset); + } + + static Address calleeFramePayloadSlot(int slot) + { + return calleeFrameSlot(slot).withOffset(PayloadOffset); + } + + static Address calleeArgumentTagSlot(int argument) + { + return calleeArgumentSlot(argument).withOffset(TagOffset); + } + + static Address calleeArgumentPayloadSlot(int argument) + { + return calleeArgumentSlot(argument).withOffset(PayloadOffset); + } + + static Address calleeFrameCallerFrame() + { + return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset()); + } + + static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg, GPRReg preserve5 = InvalidGPRReg) + { + if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0 && preserve5 != GPRInfo::regT0) + return GPRInfo::regT0; + + if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1 && preserve5 != GPRInfo::regT1) + return GPRInfo::regT1; + + if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2 && preserve5 != GPRInfo::regT2) + return GPRInfo::regT2; + + if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3 && preserve5 != GPRInfo::regT3) + return GPRInfo::regT3; + + if (preserve1 != GPRInfo::regT4 && preserve2 != GPRInfo::regT4 && preserve3 != GPRInfo::regT4 && preserve4 != GPRInfo::regT4 && preserve5 != GPRInfo::regT4) + return GPRInfo::regT4; + + return GPRInfo::regT5; + } + + // Add a debug call. This call has no effect on JIT code execution state. + void debugCall(V_DebugOperation_EPP function, void* argument) + { + size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters); + ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize); + EncodedJSValue* buffer = static_cast(scratchBuffer->dataBuffer()); + + for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { +#if USE(JSVALUE64) + store64(GPRInfo::toRegister(i), buffer + i); +#else + store32(GPRInfo::toRegister(i), buffer + i); +#endif + } + + for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { + move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); + storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0); + } + + // Tell GC mark phase how much of the scratch buffer is active during call. + move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0); + storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0); + +#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4) + move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2); + move(TrustedImmPtr(argument), GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2); +#elif CPU(X86) + poke(GPRInfo::callFrameRegister, 0); + poke(TrustedImmPtr(argument), 1); + poke(TrustedImmPtr(buffer), 2); + GPRReg scratch = GPRInfo::regT0; +#else +#error "JIT not supported on this platform." +#endif + move(TrustedImmPtr(reinterpret_cast(function)), scratch); + call(scratch); + + move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0); + storePtr(TrustedImmPtr(0), GPRInfo::regT0); + + for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { + move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); + loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i)); + } + for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { +#if USE(JSVALUE64) + load64(buffer + i, GPRInfo::toRegister(i)); +#else + load32(buffer + i, GPRInfo::toRegister(i)); +#endif + } + } + + // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs. +#if !ASSERT_DISABLED + void jitAssertIsInt32(GPRReg); + void jitAssertIsJSInt32(GPRReg); + void jitAssertIsJSNumber(GPRReg); + void jitAssertIsJSDouble(GPRReg); + void jitAssertIsCell(GPRReg); + void jitAssertHasValidCallFrame(); + void jitAssertIsNull(GPRReg); + void jitAssertTagsInPlace(); + void jitAssertArgumentCountSane(); +#else + void jitAssertIsInt32(GPRReg) { } + void jitAssertIsJSInt32(GPRReg) { } + void jitAssertIsJSNumber(GPRReg) { } + void jitAssertIsJSDouble(GPRReg) { } + void jitAssertIsCell(GPRReg) { } + void jitAssertHasValidCallFrame() { } + void jitAssertIsNull(GPRReg) { } + void jitAssertTagsInPlace() { } + void jitAssertArgumentCountSane() { } +#endif + + void jitReleaseAssertNoException(); + + void purifyNaN(FPRReg); + + // These methods convert between doubles, and doubles boxed and JSValues. +#if USE(JSVALUE64) + GPRReg boxDouble(FPRReg fpr, GPRReg gpr) + { + moveDoubleTo64(fpr, gpr); + sub64(GPRInfo::tagTypeNumberRegister, gpr); + jitAssertIsJSDouble(gpr); + return gpr; + } + FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) + { + add64(GPRInfo::tagTypeNumberRegister, gpr, resultGPR); + move64ToDouble(resultGPR, fpr); + return fpr; + } + FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) + { + jitAssertIsJSDouble(gpr); + return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr); + } + + void boxDouble(FPRReg fpr, JSValueRegs regs) + { + boxDouble(fpr, regs.gpr()); + } + + void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg) + { + unboxDouble(regs.payloadGPR(), resultGPR, destFPR); + } + + // Here are possible arrangements of source, target, scratch: + // - source, target, scratch can all be separate registers. + // - source and target can be the same but scratch is separate. + // - target and scratch can be the same but source is separate. + void boxInt52(GPRReg source, GPRReg target, GPRReg scratch, FPRReg fpScratch) + { + // Is it an int32? + signExtend32ToPtr(source, scratch); + Jump isInt32 = branch64(Equal, source, scratch); + + // Nope, it's not, but regT0 contains the int64 value. + convertInt64ToDouble(source, fpScratch); + boxDouble(fpScratch, target); + Jump done = jump(); + + isInt32.link(this); + zeroExtend32ToPtr(source, target); + or64(GPRInfo::tagTypeNumberRegister, target); + + done.link(this); + } +#endif + +#if USE(JSVALUE32_64) + void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR) + { + moveDoubleToInts(fpr, payloadGPR, tagGPR); + } + void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR) + { + moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR); + } + + void boxDouble(FPRReg fpr, JSValueRegs regs) + { + boxDouble(fpr, regs.tagGPR(), regs.payloadGPR()); + } + void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR) + { + unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR); + } + + void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR) + { + unboxDouble(regs, destFPR, scratchFPR); + } +#endif + + void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR) + { +#if USE(JSVALUE64) + add32(TrustedImm32(ValueFalse), boolGPR, payloadGPR); +#else + move(boolGPR, payloadGPR); +#endif + } + + void boxBooleanPayload(bool value, GPRReg payloadGPR) + { +#if USE(JSVALUE64) + move(TrustedImm32(ValueFalse + value), payloadGPR); +#else + move(TrustedImm32(value), payloadGPR); +#endif + } + + void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs) + { + boxBooleanPayload(boolGPR, boxedRegs.payloadGPR()); +#if USE(JSVALUE32_64) + move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR()); +#endif + } + + void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == DoNotHaveTagRegisters) { + move(intGPR, boxedRegs.gpr()); + or64(TrustedImm64(TagTypeNumber), boxedRegs.gpr()); + } else + or64(GPRInfo::tagTypeNumberRegister, intGPR, boxedRegs.gpr()); +#else + UNUSED_PARAM(mode); + move(intGPR, boxedRegs.payloadGPR()); + move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR()); +#endif + } + + void callExceptionFuzz(); + + enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck }; + enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth }; + Jump emitExceptionCheck( + ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth); + Jump emitNonPatchableExceptionCheck(); + +#if ENABLE(SAMPLING_COUNTERS) + static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1) + { + jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter())); + } + void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1) + { + add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter())); + } +#endif + +#if ENABLE(SAMPLING_FLAGS) + void setSamplingFlag(int32_t); + void clearSamplingFlag(int32_t flag); +#endif + + JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin) + { + return codeBlock()->globalObjectFor(codeOrigin); + } + + bool isStrictModeFor(CodeOrigin codeOrigin) + { + if (!codeOrigin.inlineCallFrame) + return codeBlock()->isStrictMode(); + return codeOrigin.inlineCallFrame->isStrictMode(); + } + + ECMAMode ecmaModeFor(CodeOrigin codeOrigin) + { + return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode; + } + + ExecutableBase* executableFor(const CodeOrigin& codeOrigin); + + CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin) + { + return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock()); + } + + CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame) + { + if (!inlineCallFrame) + return baselineCodeBlock(); + return baselineCodeBlockForInlineCallFrame(inlineCallFrame); + } + + CodeBlock* baselineCodeBlock() + { + return m_baselineCodeBlock; + } + + static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame) + { + if (!inlineCallFrame) + return VirtualRegister(CallFrame::argumentOffset(0)); + if (inlineCallFrame->arguments.size() <= 1) + return virtualRegisterForLocal(0); + ValueRecovery recovery = inlineCallFrame->arguments[1]; + RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack); + return recovery.virtualRegister(); + } + + static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin) + { + return argumentsStart(codeOrigin.inlineCallFrame); + } + + void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch) + { +#if USE(JSVALUE64) + load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest); + loadPtr(vm()->heap.structureIDTable().base(), scratch); + loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest); +#else + UNUSED_PARAM(scratch); + loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest); +#endif + } + + static void emitLoadStructure(AssemblyHelpers& jit, RegisterID base, RegisterID dest, RegisterID scratch) + { +#if USE(JSVALUE64) + jit.load32(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest); + jit.loadPtr(jit.vm()->heap.structureIDTable().base(), scratch); + jit.loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest); +#else + UNUSED_PARAM(scratch); + jit.loadPtr(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest); +#endif + } + + void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID) + { + emitStoreStructureWithTypeInfo(*this, structure, dest); + } + + void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch) + { +#if USE(JSVALUE64) + load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch); + store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset())); +#else + // Store all the info flags using a single 32-bit wide load and store. + load32(MacroAssembler::Address(structure, Structure::indexingTypeOffset()), scratch); + store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeOffset())); + + // Store the StructureID + storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset())); +#endif + } + + static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest); + + Jump jumpIfIsRememberedOrInEden(GPRReg cell) + { + return branchTest8(MacroAssembler::NonZero, MacroAssembler::Address(cell, JSCell::cellStateOffset())); + } + + Jump jumpIfIsRememberedOrInEden(JSCell* cell) + { + uint8_t* address = reinterpret_cast(cell) + JSCell::cellStateOffset(); + return branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(address)); + } + + // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The + // functor is called at those points where we have pinpointed a type. One way to use this is to + // have the functor emit the code to put the type string into an appropriate register and then + // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow + // case. It is passed the unlinked jump to the slow case. + template + void emitTypeOf( + JSValueRegs regs, GPRReg tempGPR, const Functor& functor, + const SlowPathFunctor& slowPathFunctor) + { + // Implements the following branching structure: + // + // if (is cell) { + // if (is object) { + // if (is function) { + // return function; + // } else if (doesn't have call trap and doesn't masquerade as undefined) { + // return object + // } else { + // return slowPath(); + // } + // } else if (is string) { + // return string + // } else { + // return symbol + // } + // } else if (is number) { + // return number + // } else if (is null) { + // return object + // } else if (is boolean) { + // return boolean + // } else { + // return undefined + // } + + Jump notCell = branchIfNotCell(regs); + + GPRReg cellGPR = regs.payloadGPR(); + Jump notObject = branchIfNotObject(cellGPR); + + Jump notFunction = branchIfNotFunction(cellGPR); + functor(TypeofType::Function, false); + + notFunction.link(this); + slowPathFunctor( + branchTest8( + NonZero, + Address(cellGPR, JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData))); + functor(TypeofType::Object, false); + + notObject.link(this); + + Jump notString = branchIfNotString(cellGPR); + functor(TypeofType::String, false); + notString.link(this); + functor(TypeofType::Symbol, false); + + notCell.link(this); + + Jump notNumber = branchIfNotNumber(regs, tempGPR); + functor(TypeofType::Number, false); + notNumber.link(this); + + JumpList notNull = branchIfNotEqual(regs, jsNull()); + functor(TypeofType::Object, false); + notNull.link(this); + + Jump notBoolean = branchIfNotBoolean(regs, tempGPR); + functor(TypeofType::Boolean, false); + notBoolean.link(this); + + functor(TypeofType::Undefined, true); + } + + Vector& decodedCodeMapFor(CodeBlock*); + + void makeSpaceOnStackForCCall() + { + unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall); + if (stackOffset) + subPtr(TrustedImm32(stackOffset), stackPointerRegister); + } + + void reclaimSpaceOnStackForCCall() + { + unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall); + if (stackOffset) + addPtr(TrustedImm32(stackOffset), stackPointerRegister); + } + +#if USE(JSVALUE64) + void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result); + void emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result); +#endif + +protected: + VM* m_vm; + CodeBlock* m_codeBlock; + CodeBlock* m_baselineCodeBlock; + + HashMap> m_decodedCodeMaps; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // AssemblyHelpers_h + diff --git a/Source/JavaScriptCore/jit/BinarySwitch.cpp b/Source/JavaScriptCore/jit/BinarySwitch.cpp new file mode 100644 index 000000000..f3ddcfca9 --- /dev/null +++ b/Source/JavaScriptCore/jit/BinarySwitch.cpp @@ -0,0 +1,391 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BinarySwitch.h" + +#if ENABLE(JIT) + +#include "JSCInlines.h" +#include + +namespace JSC { + +static const bool verbose = false; + +static unsigned globalCounter; // We use a different seed every time we are invoked. + +BinarySwitch::BinarySwitch(GPRReg value, const Vector& cases, Type type) + : m_value(value) + , m_weakRandom(globalCounter++) + , m_index(0) + , m_caseIndex(UINT_MAX) + , m_type(type) +{ + if (cases.isEmpty()) + return; + + if (verbose) + dataLog("Original cases: ", listDump(cases), "\n"); + + for (unsigned i = 0; i < cases.size(); ++i) + m_cases.append(Case(cases[i], i)); + + std::sort(m_cases.begin(), m_cases.end()); + + if (verbose) + dataLog("Sorted cases: ", listDump(m_cases), "\n"); + + for (unsigned i = 1; i < m_cases.size(); ++i) + RELEASE_ASSERT(m_cases[i - 1] < m_cases[i]); + + build(0, false, m_cases.size()); +} + +BinarySwitch::~BinarySwitch() +{ +} + +bool BinarySwitch::advance(MacroAssembler& jit) +{ + if (m_cases.isEmpty()) { + m_fallThrough.append(jit.jump()); + return false; + } + + if (m_index == m_branches.size()) { + RELEASE_ASSERT(m_jumpStack.isEmpty()); + return false; + } + + for (;;) { + const BranchCode& code = m_branches[m_index++]; + switch (code.kind) { + case NotEqualToFallThrough: + switch (m_type) { + case Int32: + m_fallThrough.append(jit.branch32( + MacroAssembler::NotEqual, m_value, + MacroAssembler::Imm32(static_cast(m_cases[code.index].value)))); + break; + case IntPtr: + m_fallThrough.append(jit.branchPtr( + MacroAssembler::NotEqual, m_value, + MacroAssembler::ImmPtr(bitwise_cast(static_cast(m_cases[code.index].value))))); + break; + } + break; + case NotEqualToPush: + switch (m_type) { + case Int32: + m_jumpStack.append(jit.branch32( + MacroAssembler::NotEqual, m_value, + MacroAssembler::Imm32(static_cast(m_cases[code.index].value)))); + break; + case IntPtr: + m_jumpStack.append(jit.branchPtr( + MacroAssembler::NotEqual, m_value, + MacroAssembler::ImmPtr(bitwise_cast(static_cast(m_cases[code.index].value))))); + break; + } + break; + case LessThanToPush: + switch (m_type) { + case Int32: + m_jumpStack.append(jit.branch32( + MacroAssembler::LessThan, m_value, + MacroAssembler::Imm32(static_cast(m_cases[code.index].value)))); + break; + case IntPtr: + m_jumpStack.append(jit.branchPtr( + MacroAssembler::LessThan, m_value, + MacroAssembler::ImmPtr(bitwise_cast(static_cast(m_cases[code.index].value))))); + break; + } + break; + case Pop: + m_jumpStack.takeLast().link(&jit); + break; + case ExecuteCase: + m_caseIndex = code.index; + return true; + } + } +} + +void BinarySwitch::build(unsigned start, bool hardStart, unsigned end) +{ + if (verbose) + dataLog("Building with start = ", start, ", hardStart = ", hardStart, ", end = ", end, "\n"); + + auto append = [&] (const BranchCode& code) { + if (verbose) + dataLog("==> ", code, "\n"); + m_branches.append(code); + }; + + unsigned size = end - start; + + RELEASE_ASSERT(size); + + // This code uses some random numbers to keep things balanced. It's important to keep in mind + // that this does not improve average-case throughput under the assumption that all cases fire + // with equal probability. It just ensures that there will not be some switch structure that + // when combined with some input will always produce pathologically good or pathologically bad + // performance. + + const unsigned leafThreshold = 3; + + if (size <= leafThreshold) { + if (verbose) + dataLog("It's a leaf.\n"); + + // It turns out that for exactly three cases or less, it's better to just compare each + // case individually. This saves 1/6 of a branch on average, and up to 1/3 of a branch in + // extreme cases where the divide-and-conquer bottoms out in a lot of 3-case subswitches. + // + // This assumes that we care about the cost of hitting some case more than we care about + // bottoming out in a default case. I believe that in most places where we use switch + // statements, we are more likely to hit one of the cases than we are to fall through to + // default. Intuitively, if we wanted to improve the performance of default, we would + // reduce the value of leafThreshold to 2 or even to 1. See below for a deeper discussion. + + bool allConsecutive = false; + + if ((hardStart || (start && m_cases[start - 1].value == m_cases[start].value - 1)) + && start + size < m_cases.size() + && m_cases[start + size - 1].value == m_cases[start + size].value - 1) { + allConsecutive = true; + for (unsigned i = 0; i < size - 1; ++i) { + if (m_cases[start + i].value + 1 != m_cases[start + i + 1].value) { + allConsecutive = false; + break; + } + } + } + + if (verbose) + dataLog("allConsecutive = ", allConsecutive, "\n"); + + Vector localCaseIndices; + for (unsigned i = 0; i < size; ++i) + localCaseIndices.append(start + i); + + std::random_shuffle( + localCaseIndices.begin(), localCaseIndices.end(), + [this] (unsigned n) { + // We use modulo to get a random number in the range we want fully knowing that + // this introduces a tiny amount of bias, but we're fine with such tiny bias. + return m_weakRandom.getUint32() % n; + }); + + for (unsigned i = 0; i < size - 1; ++i) { + append(BranchCode(NotEqualToPush, localCaseIndices[i])); + append(BranchCode(ExecuteCase, localCaseIndices[i])); + append(BranchCode(Pop)); + } + + if (!allConsecutive) + append(BranchCode(NotEqualToFallThrough, localCaseIndices.last())); + + append(BranchCode(ExecuteCase, localCaseIndices.last())); + return; + } + + if (verbose) + dataLog("It's not a leaf.\n"); + + // There are two different strategies we could consider here: + // + // Isolate median and split: pick a median and check if the comparison value is equal to it; + // if so, execute the median case. Otherwise check if the value is less than the median, and + // recurse left or right based on this. This has two subvariants: we could either first test + // equality for the median and then do the less-than, or we could first do the less-than and + // then check equality on the not-less-than path. + // + // Ignore median and split: do a less-than comparison on a value that splits the cases in two + // equal-sized halves. Recurse left or right based on the comparison. Do not test for equality + // against the median (or anything else); let the recursion handle those equality comparisons + // once we bottom out in a list that case 3 cases or less (see above). + // + // I'll refer to these strategies as Isolate and Ignore. I initially believed that Isolate + // would be faster since it leads to less branching for some lucky cases. It turns out that + // Isolate is almost a total fail in the average, assuming all cases are equally likely. How + // bad Isolate is depends on whether you believe that doing two consecutive branches based on + // the same comparison is cheaper than doing the compare/branches separately. This is + // difficult to evaluate. For small immediates that aren't blinded, we just care about + // avoiding a second compare instruction. For large immediates or when blinding is in play, we + // also care about the instructions used to materialize the immediate a second time. Isolate + // can help with both costs since it involves first doing a < compare+branch on some value, + // followed by a == compare+branch on the same exact value (or vice-versa). Ignore will do a < + // compare+branch on some value, and then the == compare+branch on that same value will happen + // much later. + // + // To evaluate these costs, I wrote the recurrence relation for Isolate and Ignore, assuming + // that ComparisonCost is the cost of a compare+branch and ChainedComparisonCost is the cost + // of a compare+branch on some value that you've just done another compare+branch for. These + // recurrence relations compute the total cost incurred if you executed the switch statement + // on each matching value. So the average cost of hitting some case can be computed as + // Isolate[n]/n or Ignore[n]/n, respectively for the two relations. + // + // Isolate[1] = ComparisonCost + // Isolate[2] = (2 + 1) * ComparisonCost + // Isolate[3] = (3 + 2 + 1) * ComparisonCost + // Isolate[n_] := With[ + // {medianIndex = Floor[n/2] + If[EvenQ[n], RandomInteger[], 1]}, + // ComparisonCost + ChainedComparisonCost + + // (ComparisonCost * (medianIndex - 1) + Isolate[medianIndex - 1]) + + // (2 * ComparisonCost * (n - medianIndex) + Isolate[n - medianIndex])] + // + // Ignore[1] = ComparisonCost + // Ignore[2] = (2 + 1) * ComparisonCost + // Ignore[3] = (3 + 2 + 1) * ComparisonCost + // Ignore[n_] := With[ + // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]}, + // (medianIndex * ComparisonCost + Ignore[medianIndex]) + + // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])] + // + // This does not account for the average cost of hitting the default case. See further below + // for a discussion of that. + // + // It turns out that for ComparisonCost = 1 and ChainedComparisonCost = 1, Ignore is always + // better than Isolate. If we assume that ChainedComparisonCost = 0, then Isolate wins for + // switch statements that have 20 cases or fewer, though the margin of victory is never large + // - it might sometimes save an average of 0.3 ComparisonCost. For larger switch statements, + // we see divergence between the two with Ignore winning. This is of course rather + // unrealistic since the chained comparison is never free. For ChainedComparisonCost = 0.5, we + // see Isolate winning for 10 cases or fewer, by maybe 0.2 ComparisonCost. Again we see + // divergence for large switches with Ignore winning, for example if a switch statement has + // 100 cases then Ignore saves one branch on average. + // + // Our current JIT backends don't provide for optimization for chained comparisons, except for + // reducing the code for materializing the immediate if the immediates are large or blinding + // comes into play. Probably our JIT backends live somewhere north of + // ChainedComparisonCost = 0.5. + // + // This implies that using the Ignore strategy is likely better. If we wanted to incorporate + // the Isolate strategy, we'd want to determine the switch size threshold at which the two + // cross over and then use Isolate for switches that are smaller than that size. + // + // The average cost of hitting the default case is similar, but involves a different cost for + // the base cases: you have to assume that you will always fail each branch. For the Ignore + // strategy we would get this recurrence relation; the same kind of thing happens to the + // Isolate strategy: + // + // Ignore[1] = ComparisonCost + // Ignore[2] = (2 + 2) * ComparisonCost + // Ignore[3] = (3 + 3 + 3) * ComparisonCost + // Ignore[n_] := With[ + // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]}, + // (medianIndex * ComparisonCost + Ignore[medianIndex]) + + // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])] + // + // This means that if we cared about the default case more, we would likely reduce + // leafThreshold. Reducing it to 2 would reduce the average cost of the default case by 1/3 + // in the most extreme cases (num switch cases = 3, 6, 12, 24, ...). But it would also + // increase the average cost of taking one of the non-default cases by 1/3. Typically the + // difference is 1/6 in either direction. This makes it a very simple trade-off: if we believe + // that the default case is more important then we would want leafThreshold to be 2, and the + // default case would become 1/6 faster on average. But we believe that most switch statements + // are more likely to take one of the cases than the default, so we use leafThreshold = 3 + // and get a 1/6 speed-up on average for taking an explicit case. + + unsigned medianIndex = (start + end) / 2; + + if (verbose) + dataLog("medianIndex = ", medianIndex, "\n"); + + // We want medianIndex to point to the thing we will do a less-than compare against. We want + // this less-than compare to split the current sublist into equal-sized sublists, or + // nearly-equal-sized with some randomness if we're in the odd case. With the above + // calculation, in the odd case we will have medianIndex pointing at either the element we + // want or the element to the left of the one we want. Consider the case of five elements: + // + // 0 1 2 3 4 + // + // start will be 0, end will be 5. The average is 2.5, which rounds down to 2. If we do + // value < 2, then we will split the list into 2 elements on the left and three on the right. + // That's pretty good, but in this odd case we'd like to at random choose 3 instead to ensure + // that we don't become unbalanced on the right. This does not improve throughput since one + // side will always get shafted, and that side might still be odd, in which case it will also + // have two sides and one of them will get shafted - and so on. We just want to avoid + // deterministic pathologies. + // + // In the even case, we will always end up pointing at the element we want: + // + // 0 1 2 3 + // + // start will be 0, end will be 4. So, the average is 2, which is what we'd like. + if (size & 1) { + RELEASE_ASSERT(medianIndex - start + 1 == end - medianIndex); + medianIndex += m_weakRandom.getUint32() & 1; + } else + RELEASE_ASSERT(medianIndex - start == end - medianIndex); + + RELEASE_ASSERT(medianIndex > start); + RELEASE_ASSERT(medianIndex + 1 < end); + + if (verbose) + dataLog("fixed medianIndex = ", medianIndex, "\n"); + + append(BranchCode(LessThanToPush, medianIndex)); + build(medianIndex, true, end); + append(BranchCode(Pop)); + build(start, hardStart, medianIndex); +} + +void BinarySwitch::Case::dump(PrintStream& out) const +{ + out.print(""); +} + +void BinarySwitch::BranchCode::dump(PrintStream& out) const +{ + switch (kind) { + case NotEqualToFallThrough: + out.print("NotEqualToFallThrough"); + break; + case NotEqualToPush: + out.print("NotEqualToPush"); + break; + case LessThanToPush: + out.print("LessThanToPush"); + break; + case Pop: + out.print("Pop"); + break; + case ExecuteCase: + out.print("ExecuteCase"); + break; + } + + if (index != UINT_MAX) + out.print("(", index, ")"); +} + +} // namespace JSC + +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/jit/BinarySwitch.h b/Source/JavaScriptCore/jit/BinarySwitch.h new file mode 100644 index 000000000..3ac08b701 --- /dev/null +++ b/Source/JavaScriptCore/jit/BinarySwitch.h @@ -0,0 +1,147 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef BinarySwitch_h +#define BinarySwitch_h + +#if ENABLE(JIT) + +#include "GPRInfo.h" +#include "MacroAssembler.h" +#include + +namespace JSC { + +// The BinarySwitch class makes it easy to emit a switch statement over either +// 32-bit integers or pointers, where the switch uses a tree of branches +// rather than a jump table. This makes it particularly useful if the case +// values are too far apart to make a jump table practical, or if there are +// sufficiently few cases that the total cost of log(numCases) branches is +// less than the cost of an indirected jump. +// +// In an effort to simplify the logic of emitting code for each case, this +// uses an iterator style, rather than a functor callback style. This makes +// sense because even the iterator implementation found herein is relatively +// simple, whereas the code it's used from is usually quite complex - one +// example being the trie-of-trees string switch implementation, where the +// code emitted for each case involves recursing to emit code for a sub-trie. +// +// Use this like so: +// +// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32); +// while (switch.advance(jit)) { +// int value = switch.caseValue(); +// unsigned index = switch.caseIndex(); // index into casesVector, above +// ... // generate code for this case +// ... = jit.jump(); // you have to jump out yourself; falling through causes undefined behavior +// } +// switch.fallThrough().link(&jit); + +class BinarySwitch { +public: + enum Type { + Int32, + IntPtr + }; + + BinarySwitch(GPRReg value, const Vector& cases, Type); + ~BinarySwitch(); + + unsigned caseIndex() const { return m_cases[m_caseIndex].index; } + int64_t caseValue() const { return m_cases[m_caseIndex].value; } + + bool advance(MacroAssembler&); + + MacroAssembler::JumpList& fallThrough() { return m_fallThrough; } + +private: + void build(unsigned start, bool hardStart, unsigned end); + + GPRReg m_value; + + struct Case { + Case() { } + + Case(int64_t value, unsigned index) + : value(value) + , index(index) + { + } + + bool operator<(const Case& other) const + { + return value < other.value; + } + + void dump(PrintStream& out) const; + + int64_t value; + unsigned index; + }; + + Vector m_cases; + + enum BranchKind { + NotEqualToFallThrough, + NotEqualToPush, + LessThanToPush, + Pop, + ExecuteCase + }; + + struct BranchCode { + BranchCode() { } + + BranchCode(BranchKind kind, unsigned index = UINT_MAX) + : kind(kind) + , index(index) + { + } + + void dump(PrintStream& out) const; + + BranchKind kind; + unsigned index; + }; + + WeakRandom m_weakRandom; + + Vector m_branches; + + unsigned m_index; + unsigned m_caseIndex; + Vector m_jumpStack; + + MacroAssembler::JumpList m_fallThrough; + + Type m_type; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // BinarySwitch_h + diff --git a/Source/JavaScriptCore/jit/CCallHelpers.h b/Source/JavaScriptCore/jit/CCallHelpers.h new file mode 100644 index 000000000..e649d39e6 --- /dev/null +++ b/Source/JavaScriptCore/jit/CCallHelpers.h @@ -0,0 +1,2215 @@ +/* + * Copyright (C) 2011, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CCallHelpers_h +#define CCallHelpers_h + +#if ENABLE(JIT) + +#include "AssemblyHelpers.h" +#include "GPRInfo.h" +#include "StackAlignment.h" + +namespace JSC { + +#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64)) +#define POKE_ARGUMENT_OFFSET 4 +#else +#define POKE_ARGUMENT_OFFSET 0 +#endif + +class CCallHelpers : public AssemblyHelpers { +public: + CCallHelpers(VM* vm, CodeBlock* codeBlock = 0) + : AssemblyHelpers(vm, codeBlock) + { + } + + // The most general helper for setting arguments that fit in a GPR, if you can compute each + // argument without using any argument registers. You usually want one of the setupArguments*() + // methods below instead of this. This thing is most useful if you have *a lot* of arguments. + template + void setupArgument(unsigned argumentIndex, const Functor& functor) + { + unsigned numberOfRegs = GPRInfo::numberOfArgumentRegisters; // Disguise the constant from clang's tautological compare warning. + if (argumentIndex < numberOfRegs) { + functor(GPRInfo::toArgumentRegister(argumentIndex)); + return; + } + + functor(GPRInfo::nonArgGPR0); + poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET + argumentIndex - GPRInfo::numberOfArgumentRegisters); + } + + void setupArgumentsWithExecState() { setupArgumentsExecState(); } + + // These methods used to sort arguments into the correct registers. + // On X86 we use cdecl calling conventions, which pass all arguments on the + // stack. On other architectures we may need to sort values into the + // correct registers. +#if !NUMBER_OF_ARGUMENT_REGISTERS + unsigned m_callArgumentOffset; + void resetCallArguments() { m_callArgumentOffset = 0; } + + // These methods are using internally to implement the callOperation methods. + void addCallArgument(GPRReg value) + { + poke(value, m_callArgumentOffset++); + } + void addCallArgument(TrustedImm32 imm) + { + poke(imm, m_callArgumentOffset++); + } + void addCallArgument(TrustedImmPtr pointer) + { + poke(pointer, m_callArgumentOffset++); + } + void addCallArgument(FPRReg value) + { + storeDouble(value, Address(stackPointerRegister, m_callArgumentOffset * sizeof(void*))); + m_callArgumentOffset += sizeof(double) / sizeof(void*); + } + + ALWAYS_INLINE void setupArguments(FPRReg arg1) + { + resetCallArguments(); + addCallArgument(arg1); + } + + ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1) + { + resetCallArguments(); + addCallArgument(arg1); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5, GPRReg arg6) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } + + ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1) + { + resetCallArguments(); + addCallArgument(arg1); + } + + ALWAYS_INLINE void setupArgumentsExecState() + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, TrustedImm32 arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImm32 arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImmPtr arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } + + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + addCallArgument(arg7); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } +#endif // !NUMBER_OF_ARGUMENT_REGISTERS + // These methods are suitable for any calling convention that provides for + // at least 4 argument registers, e.g. X86_64, ARMv7. +#if NUMBER_OF_ARGUMENT_REGISTERS >= 4 + template + void setupTwoStubArgsGPR(GPRReg srcA, GPRReg srcB) + { + // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: + // (1) both are already in arg regs, the right way around. + // (2) both are already in arg regs, the wrong way around. + // (3) neither are currently in arg registers. + // (4) srcA in in its correct reg. + // (5) srcA in in the incorrect reg. + // (6) srcB in in its correct reg. + // (7) srcB in in the incorrect reg. + // + // The trivial approach is to simply emit two moves, to put srcA in place then srcB in + // place (the MacroAssembler will omit redundant moves). This apporach will be safe in + // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2 + // (requires a swap) and 7 (must move srcB first, to avoid trampling.) + + if (srcB != destA) { + // Handle the easy cases - two simple moves. + move(srcA, destA); + move(srcB, destB); + } else if (srcA != destB) { + // Handle the non-swap case - just put srcB in place first. + move(srcB, destB); + move(srcA, destA); + } else + swap(destA, destB); + } + + template + void setupThreeStubArgsGPR(GPRReg srcA, GPRReg srcB, GPRReg srcC) + { + // If neither of srcB/srcC are in our way, then we can move srcA into place. + // Then we can use setupTwoStubArgs to fix srcB/srcC. + if (srcB != destA && srcC != destA) { + move(srcA, destA); + setupTwoStubArgsGPR(srcB, srcC); + return; + } + + // If neither of srcA/srcC are in our way, then we can move srcB into place. + // Then we can use setupTwoStubArgs to fix srcA/srcC. + if (srcA != destB && srcC != destB) { + move(srcB, destB); + setupTwoStubArgsGPR(srcA, srcC); + return; + } + + // If neither of srcA/srcB are in our way, then we can move srcC into place. + // Then we can use setupTwoStubArgs to fix srcA/srcB. + if (srcA != destC && srcB != destC) { + move(srcC, destC); + setupTwoStubArgsGPR(srcA, srcB); + return; + } + + // If we get here, we haven't been able to move any of srcA/srcB/srcC. + // Since all three are blocked, then all three must already be in the argument register. + // But are they in the right ones? + + // First, ensure srcA is in place. + if (srcA != destA) { + swap(srcA, destA); + + // If srcA wasn't in argumentGPR1, one of srcB/srcC must be. + ASSERT(srcB == destA || srcC == destA); + // If srcB was in argumentGPR1 it no longer is (due to the swap). + // Otherwise srcC must have been. Mark him as moved. + if (srcB == destA) + srcB = srcA; + else + srcC = srcA; + } + + // Either srcB & srcC need swapping, or we're all done. + ASSERT((srcB == destB || srcC == destC) + || (srcB == destC || srcC == destB)); + + if (srcB != destB) + swap(destB, destC); + } + +#if CPU(X86_64) || CPU(ARM64) + template + void setupTwoStubArgsFPR(FPRReg srcA, FPRReg srcB) + { + // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: + // (1) both are already in arg regs, the right way around. + // (2) both are already in arg regs, the wrong way around. + // (3) neither are currently in arg registers. + // (4) srcA in in its correct reg. + // (5) srcA in in the incorrect reg. + // (6) srcB in in its correct reg. + // (7) srcB in in the incorrect reg. + // + // The trivial approach is to simply emit two moves, to put srcA in place then srcB in + // place (the MacroAssembler will omit redundant moves). This apporach will be safe in + // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2 + // (requires a swap) and 7 (must move srcB first, to avoid trampling.) + + if (srcB != destA) { + // Handle the easy cases - two simple moves. + moveDouble(srcA, destA); + moveDouble(srcB, destB); + return; + } + + if (srcA != destB) { + // Handle the non-swap case - just put srcB in place first. + moveDouble(srcB, destB); + moveDouble(srcA, destA); + return; + } + + ASSERT(srcB == destA && srcA == destB); + // Need to swap; pick a temporary register. + FPRReg temp; + if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3) + temp = FPRInfo::argumentFPR3; + else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2) + temp = FPRInfo::argumentFPR2; + else { + ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1); + temp = FPRInfo::argumentFPR1; + } + moveDouble(destA, temp); + moveDouble(destB, destA); + moveDouble(temp, destB); + } +#endif + void setupStubArguments(GPRReg arg1, GPRReg arg2) + { + setupTwoStubArgsGPR(arg1, arg2); + } + + void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + } + +#if CPU(X86_64) || CPU(ARM64) + ALWAYS_INLINE void setupArguments(FPRReg arg1) + { + moveDouble(arg1, FPRInfo::argumentFPR0); + } + + ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) + { + setupTwoStubArgsFPR(arg1, arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) + { +#if OS(WINDOWS) && CPU(X86_64) + // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments. + // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx + moveDouble(arg1, FPRInfo::argumentFPR1); + move(arg2, GPRInfo::argumentGPR2); +#else + moveDouble(arg1, FPRInfo::argumentFPR0); + move(arg2, GPRInfo::argumentGPR1); +#endif + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + { +#if OS(WINDOWS) && CPU(X86_64) + // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments. + // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx + moveDouble(arg3, FPRInfo::argumentFPR3); +#else + moveDouble(arg3, FPRInfo::argumentFPR0); +#endif + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } +#elif CPU(ARM) +#if CPU(ARM_HARDFP) + ALWAYS_INLINE void setupArguments(FPRReg arg1) + { + moveDouble(arg1, FPRInfo::argumentFPR0); + } + + ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) + { + if (arg2 != FPRInfo::argumentFPR0) { + moveDouble(arg1, FPRInfo::argumentFPR0); + moveDouble(arg2, FPRInfo::argumentFPR1); + } else if (arg1 != FPRInfo::argumentFPR1) { + moveDouble(arg2, FPRInfo::argumentFPR1); + moveDouble(arg1, FPRInfo::argumentFPR0); + } else { + // Swap arg1, arg2. + moveDouble(FPRInfo::argumentFPR0, ARMRegisters::d2); + moveDouble(FPRInfo::argumentFPR1, FPRInfo::argumentFPR0); + moveDouble(ARMRegisters::d2, FPRInfo::argumentFPR1); + } + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) + { + moveDouble(arg1, FPRInfo::argumentFPR0); + move(arg2, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + { + moveDouble(arg3, FPRInfo::argumentFPR0); + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32, FPRReg arg2, GPRReg arg3) + { + moveDouble(arg2, FPRInfo::argumentFPR0); + move(arg3, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32, FPRReg arg4) + { + moveDouble(arg4, FPRInfo::argumentFPR0); + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + +#else + ALWAYS_INLINE void setupArguments(FPRReg arg1) + { + assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1); + } + + ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) + { + assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1); + assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) + { + move(arg2, GPRInfo::argumentGPR3); + assembler().vmov(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, arg1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + { + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + assembler().vmov(GPRInfo::argumentGPR3, GPRInfo::nonArgGPR0, arg3); + poke(GPRInfo::nonArgGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, FPRReg arg2, GPRReg arg3) + { + poke(arg3, POKE_ARGUMENT_OFFSET); + move(arg1, GPRInfo::argumentGPR1); + assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, FPRReg arg4) + { + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + move(arg3, GPRInfo::argumentGPR3); + assembler().vmov(GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, arg4); + poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET); + poke(GPRInfo::nonArgGPR1, POKE_ARGUMENT_OFFSET + 1); + } +#endif // CPU(ARM_HARDFP) +#elif CPU(MIPS) + ALWAYS_INLINE void setupArguments(FPRReg arg1) + { + moveDouble(arg1, FPRInfo::argumentFPR0); + } + + ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) + { + if (arg2 != FPRInfo::argumentFPR0) { + moveDouble(arg1, FPRInfo::argumentFPR0); + moveDouble(arg2, FPRInfo::argumentFPR1); + } else if (arg1 != FPRInfo::argumentFPR1) { + moveDouble(arg2, FPRInfo::argumentFPR1); + moveDouble(arg1, FPRInfo::argumentFPR0); + } else { + // Swap arg1, arg2. + swapDouble(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1); + } + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) + { + assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + poke(arg2, 4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + { + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + poke(arg3, 4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32, FPRReg arg2, GPRReg arg3) + { + setupArgumentsWithExecState(arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32, FPRReg arg4) + { + setupArgumentsWithExecState(arg1, arg2, arg4); + } +#elif CPU(SH4) + ALWAYS_INLINE void setupArguments(FPRReg arg1) + { + moveDouble(arg1, FPRInfo::argumentFPR0); + } + + ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) + { + if (arg2 != FPRInfo::argumentFPR0) { + moveDouble(arg1, FPRInfo::argumentFPR0); + moveDouble(arg2, FPRInfo::argumentFPR1); + } else if (arg1 != FPRInfo::argumentFPR1) { + moveDouble(arg2, FPRInfo::argumentFPR1); + moveDouble(arg1, FPRInfo::argumentFPR0); + } else + swapDouble(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) + { + moveDouble(arg1, FPRInfo::argumentFPR0); + move(arg2, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + { + moveDouble(arg3, FPRInfo::argumentFPR0); + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } +#else +#error "JIT not supported on this platform." +#endif + + ALWAYS_INLINE void setupArguments(GPRReg arg1) + { + move(arg1, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2) + { + move(arg2, GPRInfo::argumentGPR1); + move(arg1, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2) + { + setupTwoStubArgsGPR(arg1, arg2); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4) + { + setupTwoStubArgsGPR(arg1, arg2); + move(arg3, GPRInfo::argumentGPR2); + move(arg4, GPRInfo::argumentGPR3); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + move(arg4, GPRInfo::argumentGPR3); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImmPtr arg4) + { + setupTwoStubArgsGPR(arg1, arg3); + move(arg2, GPRInfo::argumentGPR1); + move(arg4, GPRInfo::argumentGPR3); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5, GPRReg arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 1); + poke(arg5, POKE_ARGUMENT_OFFSET); + setupTwoStubArgsGPR(arg1, arg2); + move(arg3, GPRInfo::argumentGPR2); + move(arg4, GPRInfo::argumentGPR3); + } + + ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1) + { + move(arg1, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsExecState() + { + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1) + { + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1) + { + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1) + { + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + +#if OS(WINDOWS) && CPU(X86_64) + ALWAYS_INLINE void setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32 arg1) + { + move(arg1, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + } +#endif + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2) + { + setupStubArguments(arg1, arg2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } +#if CPU(X86_64) || CPU(ARM64) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm64 arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm64 arg1, GPRReg arg2) + { + move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample! + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } +#endif + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, ImmPtr arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2) + { + move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample! + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2) + { + move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample! + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(ImmPtr arg1, GPRReg arg2) + { + move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample! + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, TrustedImm32 arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3) + { + setupStubArguments(arg1, arg2, arg3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3) + { + setupStubArguments(arg1, arg2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3) + { + setupTwoStubArgsGPR(arg1, arg3); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3) + { + setupTwoStubArgsGPR(arg1, arg3); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3) + { + setupStubArguments(arg1, arg2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3) + { + move(arg2, GPRInfo::argumentGPR2); + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3) + { + setupTwoStubArgsGPR(arg2, arg3); + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3) + { + setupTwoStubArgsGPR(arg2, arg3); + move(arg1, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImm32 arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImmPtr arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + +#endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4 + // These methods are suitable for any calling convention that provides for + // exactly 4 argument registers, e.g. ARMv7. +#if NUMBER_OF_ARGUMENT_REGISTERS == 4 + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + +#if CPU(X86_64) + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } +#endif + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, GPRReg arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, TrustedImmPtr arg8) + { + poke(arg8, POKE_ARGUMENT_OFFSET + 4); + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET); + setupTwoStubArgsGPR(arg1, arg2); + move(arg3, GPRInfo::argumentGPR2); + move(arg4, GPRInfo::argumentGPR3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } +#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4 + +#if NUMBER_OF_ARGUMENT_REGISTERS >= 5 + void setupStubArguments134(GPRReg arg1, GPRReg arg3, GPRReg arg4) + { + setupThreeStubArgsGPR(arg1, arg3, arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4) + { + setupTwoStubArgsGPR(arg1, arg4); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + setupThreeStubArgsGPR(arg1, arg4, arg5); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) + { + setupStubArguments134(arg1, arg3, arg4); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + setupTwoStubArgsGPR(arg2, arg3); + move(arg1, GPRInfo::argumentGPR1); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg4, GPRInfo::argumentGPR4); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + { + setupTwoStubArgsGPR(arg2, arg4); + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + setupTwoStubArgsGPR(arg2, arg3); + move(arg1, GPRInfo::argumentGPR1); + move(arg4, GPRInfo::argumentGPR4); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg4); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) + { + setupThreeStubArgsGPR(arg1, arg3, arg4); + move(arg2, GPRInfo::argumentGPR1); + move(arg5, GPRInfo::argumentGPR4); + } + + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5) + { + setupThreeStubArgsGPR(arg1, arg2, arg5); + move(arg3, GPRInfo::argumentGPR2); + move(arg4, GPRInfo::argumentGPR3); + } +#endif + + void setupArguments(JSValueRegs arg1) + { +#if USE(JSVALUE64) + setupArguments(arg1.gpr()); +#else + setupArguments(arg1.payloadGPR(), arg1.tagGPR()); +#endif + } + + void setupResults(GPRReg destA, GPRReg destB) + { + GPRReg srcA = GPRInfo::returnValueGPR; + GPRReg srcB = GPRInfo::returnValueGPR2; + + if (destA == InvalidGPRReg) + move(srcB, destB); + else if (destB == InvalidGPRReg) + move(srcA, destA); + else if (srcB != destA) { + // Handle the easy cases - two simple moves. + move(srcA, destA); + move(srcB, destB); + } else if (srcA != destB) { + // Handle the non-swap case - just put srcB in place first. + move(srcB, destB); + move(srcA, destA); + } else + swap(destA, destB); + } + + void setupResults(JSValueRegs regs) + { +#if USE(JSVALUE64) + move(GPRInfo::returnValueGPR, regs.gpr()); +#else + setupResults(regs.payloadGPR(), regs.tagGPR()); +#endif + } + + void jumpToExceptionHandler() + { + // genericUnwind() leaves the handler CallFrame* in vm->callFrameForCatch, + // and the address of the handler in vm->targetMachinePCForThrow. + loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1); + jump(GPRInfo::regT1); + } + + void prepareForTailCallSlow(GPRReg calleeGPR = InvalidGPRReg) + { + GPRReg temp1 = calleeGPR == GPRInfo::regT0 ? GPRInfo::regT3 : GPRInfo::regT0; + GPRReg temp2 = calleeGPR == GPRInfo::regT1 ? GPRInfo::regT3 : GPRInfo::regT1; + GPRReg temp3 = calleeGPR == GPRInfo::regT2 ? GPRInfo::regT3 : GPRInfo::regT2; + + GPRReg newFramePointer = temp1; + GPRReg newFrameSizeGPR = temp2; + { + // The old frame size is its number of arguments (or number of + // parameters in case of arity fixup), plus the frame header size, + // aligned + GPRReg oldFrameSizeGPR = temp2; + { + GPRReg argCountGPR = oldFrameSizeGPR; + load32(Address(framePointerRegister, JSStack::ArgumentCount * static_cast(sizeof(Register)) + PayloadOffset), argCountGPR); + + { + GPRReg numParametersGPR = temp1; + { + GPRReg codeBlockGPR = numParametersGPR; + loadPtr(Address(framePointerRegister, JSStack::CodeBlock * static_cast(sizeof(Register))), codeBlockGPR); + load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR); + } + + ASSERT(numParametersGPR != argCountGPR); + Jump argumentCountWasNotFixedUp = branch32(BelowOrEqual, numParametersGPR, argCountGPR); + move(numParametersGPR, argCountGPR); + argumentCountWasNotFixedUp.link(this); + } + + add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), argCountGPR, oldFrameSizeGPR); + and32(TrustedImm32(-stackAlignmentRegisters()), oldFrameSizeGPR); + // We assume < 2^28 arguments + mul32(TrustedImm32(sizeof(Register)), oldFrameSizeGPR, oldFrameSizeGPR); + } + + // The new frame pointer is at framePointer + oldFrameSize - newFrameSize + ASSERT(newFramePointer != oldFrameSizeGPR); + addPtr(framePointerRegister, oldFrameSizeGPR, newFramePointer); + + // The new frame size is just the number of arguments plus the + // frame header size, aligned + ASSERT(newFrameSizeGPR != newFramePointer); + load32(Address(stackPointerRegister, JSStack::ArgumentCount * static_cast(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)), + newFrameSizeGPR); + add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), newFrameSizeGPR); + and32(TrustedImm32(-stackAlignmentRegisters()), newFrameSizeGPR); + // We assume < 2^28 arguments + mul32(TrustedImm32(sizeof(Register)), newFrameSizeGPR, newFrameSizeGPR); + } + + GPRReg tempGPR = temp3; + ASSERT(tempGPR != newFramePointer && tempGPR != newFrameSizeGPR); + + // We don't need the current frame beyond this point. Masquerade as our + // caller. +#if CPU(ARM) || CPU(SH4) || CPU(ARM64) + loadPtr(Address(framePointerRegister, sizeof(void*)), linkRegister); + subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR); +#elif CPU(MIPS) + loadPtr(Address(framePointerRegister, sizeof(void*)), returnAddressRegister); + subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR); +#elif CPU(X86) || CPU(X86_64) + loadPtr(Address(framePointerRegister, sizeof(void*)), tempGPR); + push(tempGPR); + subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR); +#else + UNREACHABLE_FOR_PLATFORM(); +#endif + subPtr(newFrameSizeGPR, newFramePointer); + loadPtr(Address(framePointerRegister), framePointerRegister); + + + // We need to move the newFrameSizeGPR slots above the stack pointer by + // newFramePointer registers. We use pointer-sized chunks. + MacroAssembler::Label copyLoop(label()); + + subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR); + loadPtr(BaseIndex(stackPointerRegister, newFrameSizeGPR, TimesOne), tempGPR); + storePtr(tempGPR, BaseIndex(newFramePointer, newFrameSizeGPR, TimesOne)); + + branchTest32(MacroAssembler::NonZero, newFrameSizeGPR).linkTo(copyLoop, this); + + // Ready for a jump! + move(newFramePointer, stackPointerRegister); + } +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // CCallHelpers_h + diff --git a/Source/JavaScriptCore/jit/CachedRecovery.cpp b/Source/JavaScriptCore/jit/CachedRecovery.cpp new file mode 100644 index 000000000..f4aacc6c8 --- /dev/null +++ b/Source/JavaScriptCore/jit/CachedRecovery.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CachedRecovery.h" + +#if ENABLE(JIT) + +namespace JSC { + +// We prefer loading doubles and undetermined JSValues into FPRs +// because it would otherwise use up GPRs. Two in JSVALUE32_64. +bool CachedRecovery::loadsIntoFPR() const +{ + switch (recovery().technique()) { + case DoubleDisplacedInJSStack: + case DisplacedInJSStack: +#if USE(JSVALUE64) + case CellDisplacedInJSStack: +#endif + return true; + + default: + return false; + } +} + +// Integers, booleans and cells can be loaded into GPRs +bool CachedRecovery::loadsIntoGPR() const +{ + switch (recovery().technique()) { + case Int32DisplacedInJSStack: +#if USE(JSVALUE64) + case Int52DisplacedInJSStack: + case StrictInt52DisplacedInJSStack: + case DisplacedInJSStack: +#endif + case BooleanDisplacedInJSStack: + case CellDisplacedInJSStack: + return true; + + default: + return false; + } +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CachedRecovery.h b/Source/JavaScriptCore/jit/CachedRecovery.h new file mode 100644 index 000000000..5fe39dee7 --- /dev/null +++ b/Source/JavaScriptCore/jit/CachedRecovery.h @@ -0,0 +1,137 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CachedRecovery_h +#define CachedRecovery_h + +#if ENABLE(JIT) + +#include "ValueRecovery.h" +#include "VirtualRegister.h" +#include + +namespace JSC { + +// A CachedRecovery is a wrapper around a ValueRecovery that records where said +// value should go on the stack and/or in registers. Whenever we perform an +// operation changing the ValueRecovery, we update the CachedRecovery's member +// in place. +class CachedRecovery { +public: + CachedRecovery(ValueRecovery recovery) + : m_recovery { recovery } + { + } + + CachedRecovery(CachedRecovery&) = delete; + CachedRecovery(CachedRecovery&&) = delete; + CachedRecovery& operator=(CachedRecovery&) = delete; + CachedRecovery& operator=(CachedRecovery&&) = delete; + + const Vector& targets() const { return m_targets; } + + void addTarget(VirtualRegister reg) + { + ASSERT(m_targets.isEmpty() || m_targets.last() < reg); + m_targets.append(reg); + } + + void removeTarget(VirtualRegister reg) + { + ASSERT_UNUSED(reg, m_targets.last() == reg); + m_targets.shrink(m_targets.size() - 1); + } + + void clearTargets() + { + m_targets.clear(); + } + + void setWantedJSValueRegs(JSValueRegs jsValueRegs) + { + ASSERT(m_wantedFPR == InvalidFPRReg); + m_wantedJSValueRegs = jsValueRegs; + } + + void setWantedFPR(FPRReg fpr) + { + ASSERT(!m_wantedJSValueRegs); + m_wantedFPR = fpr; + } + + // Determine whether converting this recovery into a JSValue will + // require additional GPRs and/or FPRs. + // This is guaranteed to only depend on the DataFormat, and the + // result of these calls will stay valid after loads and/or stores. + bool boxingRequiresGPR() const + { +#if USE(JSVALUE64) + return recovery().dataFormat() == DataFormatDouble; +#else + return false; +#endif + } + bool boxingRequiresFPR() const + { +#if USE(JSVALUE64) + switch (recovery().dataFormat()) { + case DataFormatInt52: + case DataFormatStrictInt52: + return true; + + default: + return false; + } +#else + return false; +#endif + } + + // This is used to determine what kind of register we need to be + // able to load a recovery. We only use it when a direct load is + // currently impossible, to determine whether we should spill a + // GPR or an FPR for loading this value. + bool loadsIntoGPR() const; + bool loadsIntoFPR() const; + + ValueRecovery recovery() const { return m_recovery; } + + void setRecovery(ValueRecovery recovery) { m_recovery = recovery; } + + JSValueRegs wantedJSValueRegs() const { return m_wantedJSValueRegs; } + + FPRReg wantedFPR() const { return m_wantedFPR; } +private: + ValueRecovery m_recovery; + JSValueRegs m_wantedJSValueRegs; + FPRReg m_wantedFPR { InvalidFPRReg }; + Vector m_targets; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // CachedRecovery_h diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp new file mode 100644 index 000000000..567202c15 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffleData.h" + +#if ENABLE(JIT) + +#include "CCallHelpers.h" +#include "CodeBlock.h" + +namespace JSC { + +#if USE(JSVALUE64) + +void CallFrameShuffleData::setupCalleeSaveRegisters(CodeBlock* codeBlock) +{ + RegisterSet calleeSaveRegisters { RegisterSet::vmCalleeSaveRegisters() }; + RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters(); + + for (size_t i = 0; i < registerSaveLocations->size(); ++i) { + RegisterAtOffset entry { registerSaveLocations->at(i) }; + if (!calleeSaveRegisters.get(entry.reg())) + continue; + + VirtualRegister saveSlot { entry.offsetAsIndex() }; + registers[entry.reg()] + = ValueRecovery::displacedInJSStack(saveSlot, DataFormatJS); + } + + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (!calleeSaveRegisters.get(reg)) + continue; + + if (registers[reg]) + continue; + + registers[reg] = ValueRecovery::inRegister(reg, DataFormatJS); + } +} + +#endif // USE(JSVALUE64) + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.h b/Source/JavaScriptCore/jit/CallFrameShuffleData.h new file mode 100644 index 000000000..d85e55b3e --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.h @@ -0,0 +1,54 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallFrameShuffleData_h +#define CallFrameShuffleData_h + +#if ENABLE(JIT) + +#include "RegisterMap.h" +#include "ValueRecovery.h" + +namespace JSC { + +struct CallFrameShuffleData { + WTF_MAKE_FAST_ALLOCATED; +public: + unsigned numLocals; + ValueRecovery callee; + Vector args; +#if USE(JSVALUE64) + RegisterMap registers; + GPRReg tagTypeNumber { InvalidGPRReg }; + + void setupCalleeSaveRegisters(CodeBlock*); +#endif +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // CallFrameShuffleData_h diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp new file mode 100644 index 000000000..45af55dd6 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp @@ -0,0 +1,774 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffler.h" + +#if ENABLE(JIT) + +#include "CachedRecovery.h" +#include "CCallHelpers.h" +#include "CodeBlock.h" + +namespace JSC { + +CallFrameShuffler::CallFrameShuffler(CCallHelpers& jit, const CallFrameShuffleData& data) + : m_jit(jit) + , m_oldFrame(data.numLocals + JSStack::CallerFrameAndPCSize, nullptr) + , m_newFrame(data.args.size() + JSStack::CallFrameHeaderSize, nullptr) + , m_alignedOldFrameSize(JSStack::CallFrameHeaderSize + + roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters())) + , m_alignedNewFrameSize(JSStack::CallFrameHeaderSize + + roundArgumentCountToAlignFrame(data.args.size())) + , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize) + , m_lockedRegisters(RegisterSet::allRegisters()) +{ + // We are allowed all the usual registers... + for (unsigned i = GPRInfo::numberOfRegisters; i--; ) + m_lockedRegisters.clear(GPRInfo::toRegister(i)); + for (unsigned i = FPRInfo::numberOfRegisters; i--; ) + m_lockedRegisters.clear(FPRInfo::toRegister(i)); + // ... as well as the runtime registers. + m_lockedRegisters.exclude(RegisterSet::vmCalleeSaveRegisters()); + + ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal()); + addNew(VirtualRegister(JSStack::Callee), data.callee); + + for (size_t i = 0; i < data.args.size(); ++i) { + ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal()); + addNew(virtualRegisterForArgument(i), data.args[i]); + } + +#if USE(JSVALUE64) + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (!data.registers[reg].isSet()) + continue; + + if (reg.isGPR()) + addNew(JSValueRegs(reg.gpr()), data.registers[reg]); + else + addNew(reg.fpr(), data.registers[reg]); + } + + m_tagTypeNumber = data.tagTypeNumber; + if (m_tagTypeNumber != InvalidGPRReg) + lockGPR(m_tagTypeNumber); +#endif +} + +void CallFrameShuffler::dump(PrintStream& out) const +{ + static const char* delimiter = " +-------------------------------+ "; + static const char* dangerDelimiter = " X-------------------------------X "; + static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX "; + static const char* emptySpace = " "; + out.print(" "); + out.print(" Old frame "); + out.print(" New frame "); + out.print("\n"); + int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3; + for (int i = 0; i < totalSize; ++i) { + VirtualRegister old { m_alignedOldFrameSize - i - 1 }; + VirtualRegister newReg { old + m_frameDelta }; + + if (!isValidOld(old) && old != firstOld() - 1 + && !isValidNew(newReg) && newReg != firstNew() - 1) + continue; + + out.print(" "); + if (dangerFrontier() >= firstNew() + && (newReg == dangerFrontier() || newReg == firstNew() - 1)) + out.print(dangerBoundsDelimiter); + else if (isValidOld(old)) + out.print(isValidNew(newReg) && isDangerNew(newReg) ? dangerDelimiter : delimiter); + else if (old == firstOld() - 1) + out.print(delimiter); + else + out.print(emptySpace); + if (dangerFrontier() >= firstNew() + && (newReg == dangerFrontier() || newReg == firstNew() - 1)) + out.print(dangerBoundsDelimiter); + else if (isValidNew(newReg) || newReg == firstNew() - 1) + out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter); + else + out.print(emptySpace); + out.print("\n"); + if (old == firstOld()) + out.print(" sp --> "); + else if (!old.offset()) + out.print(" fp --> "); + else + out.print(" "); + if (isValidOld(old)) { + if (getOld(old)) { + auto str = toCString(old); + if (isValidNew(newReg) && isDangerNew(newReg)) + out.printf(" X %18s X ", str.data()); + else + out.printf(" | %18s | ", str.data()); + } else if (isValidNew(newReg) && isDangerNew(newReg)) + out.printf(" X%30s X ", ""); + else + out.printf(" |%30s | ", ""); + } else + out.print(emptySpace); + if (isValidNew(newReg)) { + const char d = isDangerNew(newReg) ? 'X' : '|'; + auto str = toCString(newReg); + if (getNew(newReg)) { + if (getNew(newReg)->recovery().isConstant()) + out.printf(" %c%8s <- constant %c ", d, str.data(), d); + else { + auto recoveryStr = toCString(getNew(newReg)->recovery()); + out.printf(" %c%8s <- %18s %c ", d, str.data(), + recoveryStr.data(), d); + } + } else if (newReg == VirtualRegister { JSStack::ArgumentCount }) + out.printf(" %c%8s <- %18zu %c ", d, str.data(), argCount(), d); + else + out.printf(" %c%30s %c ", d, "", d); + } else + out.print(emptySpace); + if (newReg == firstNew() - m_newFrameOffset && !isSlowPath()) + out.print(" <-- new sp before jump (current ", m_newFrameBase, ") "); + if (newReg == firstNew()) + out.print(" <-- new fp after prologue"); + out.print("\n"); + } + out.print(" "); + out.print(" Live registers "); + out.print(" Wanted registers "); + out.print("\n"); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* oldCachedRecovery { m_registers[reg] }; + CachedRecovery* newCachedRecovery { m_newRegisters[reg] }; + if (!oldCachedRecovery && !newCachedRecovery) + continue; + out.print(" "); + if (oldCachedRecovery) { + auto str = toCString(reg); + out.printf(" %8s ", str.data()); + } else + out.print(emptySpace); +#if USE(JSVALUE32_64) + if (newCachedRecovery) { + JSValueRegs wantedJSValueRegs { newCachedRecovery->wantedJSValueRegs() }; + if (reg.isFPR()) + out.print(reg, " <- ", newCachedRecovery->recovery()); + else { + if (reg.gpr() == wantedJSValueRegs.tagGPR()) + out.print(reg.gpr(), " <- tag(", newCachedRecovery->recovery(), ")"); + else + out.print(reg.gpr(), " <- payload(", newCachedRecovery->recovery(), ")"); + } + } +#else + if (newCachedRecovery) + out.print(" ", reg, " <- ", newCachedRecovery->recovery()); +#endif + out.print("\n"); + } + out.print(" Locked registers: "); + bool firstLocked { true }; + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (m_lockedRegisters.get(reg)) { + out.print(firstLocked ? "" : ", ", reg); + firstLocked = false; + } + } + out.print("\n"); + + if (isSlowPath()) + out.print(" Using fp-relative addressing for slow path call\n"); + else + out.print(" Using sp-relative addressing for jump (using ", m_newFrameBase, " as new sp)\n"); + if (m_oldFrameOffset) + out.print(" Old frame offset is ", m_oldFrameOffset, "\n"); + if (m_newFrameOffset) + out.print(" New frame offset is ", m_newFrameOffset, "\n"); +#if USE(JSVALUE64) + if (m_tagTypeNumber != InvalidGPRReg) + out.print(" TagTypeNumber is currently in ", m_tagTypeNumber, "\n"); +#endif +} + +CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery) +{ + ASSERT(!recovery.isConstant()); + if (recovery.isInGPR()) + return m_registers[recovery.gpr()]; + if (recovery.isInFPR()) + return m_registers[recovery.fpr()]; +#if USE(JSVALUE32_64) + if (recovery.technique() == InPair) { + ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]); + return m_registers[recovery.payloadGPR()]; + } +#endif + ASSERT(recovery.isInJSStack()); + return getOld(recovery.virtualRegister()); +} + +CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery) +{ + ASSERT(!recovery.isConstant()); + if (recovery.isInGPR()) + return m_registers[recovery.gpr()] = cachedRecovery; + if (recovery.isInFPR()) + return m_registers[recovery.fpr()] = cachedRecovery; +#if USE(JSVALUE32_64) + if (recovery.technique() == InPair) { + m_registers[recovery.tagGPR()] = cachedRecovery; + return m_registers[recovery.payloadGPR()] = cachedRecovery; + } +#endif + ASSERT(recovery.isInJSStack()); + setOld(recovery.virtualRegister(), cachedRecovery); + return cachedRecovery; +} + +void CallFrameShuffler::spill(CachedRecovery& cachedRecovery) +{ + ASSERT(!isSlowPath()); + ASSERT(cachedRecovery.recovery().isInRegisters()); + + VirtualRegister spillSlot { 0 }; + for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) { + if (slot >= newAsOld(firstNew())) + break; + + if (getOld(slot)) + continue; + + spillSlot = slot; + break; + } + // We must have enough slots to be able to fit the whole callee's + // frame for the slow path - unless we are in the FTL. In that + // case, we are allowed to extend the frame *once*, since we are + // guaranteed to have enough available space for that. + if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) { + RELEASE_ASSERT(!m_didExtendFrame); + extendFrameIfNeeded(); + spill(cachedRecovery); + return; + } + + if (verbose) + dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n"); + auto format = emitStore(cachedRecovery, addressForOld(spillSlot)); + ASSERT(format != DataFormatNone); + updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format)); +} + +void CallFrameShuffler::emitDeltaCheck() +{ + if (ASSERT_DISABLED) + return; + + GPRReg scratchGPR { getFreeGPR() }; + if (scratchGPR != InvalidGPRReg) { + if (verbose) + dataLog(" Using ", scratchGPR, " for the fp-sp delta check\n"); + m_jit.move(MacroAssembler::stackPointerRegister, scratchGPR); + m_jit.subPtr(GPRInfo::callFrameRegister, scratchGPR); + MacroAssembler::Jump ok = m_jit.branch32( + MacroAssembler::Equal, scratchGPR, + MacroAssembler::TrustedImm32(-numLocals() * sizeof(Register))); + m_jit.abortWithReason(JITUnexpectedCallFrameSize); + ok.link(&m_jit); + } else if (verbose) + dataLog(" Skipping the fp-sp delta check since there is too much pressure"); +} + +void CallFrameShuffler::extendFrameIfNeeded() +{ + ASSERT(!m_didExtendFrame); + + VirtualRegister firstRead { firstOld() }; + for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) { + if (getOld(firstRead)) + break; + } + size_t availableSize = static_cast(firstRead.offset() - firstOld().offset()); + size_t wantedSize = m_newFrame.size() + m_newFrameOffset; + + if (availableSize < wantedSize) { + size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize); + m_oldFrame.grow(m_oldFrame.size() + delta); + for (size_t i = 0; i < delta; ++i) + m_oldFrame[m_oldFrame.size() - i - 1] = nullptr; + m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister); + + if (isSlowPath()) + m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize; + else + m_oldFrameOffset = numLocals(); + + if (verbose) + dataLogF(" Not enough space - extending the old frame %zu slot\n", delta); + } + + m_didExtendFrame = true; +} + +void CallFrameShuffler::prepareForSlowPath() +{ + ASSERT(isUndecided()); + emitDeltaCheck(); + + m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize; + m_newFrameBase = MacroAssembler::stackPointerRegister; + m_newFrameOffset = -JSStack::CallerFrameAndPCSize; + + if (verbose) + dataLog("\n\nPreparing frame for slow path call:\n"); + + // When coming from the FTL, we need to extend the frame. In other + // cases, we may end up extending the frame if we previously + // spilled things (e.g. in polymorphic cache). + extendFrameIfNeeded(); + + if (verbose) + dataLog(*this); + + prepareAny(); + + if (verbose) + dataLog("Ready for slow path call!\n"); +} + +void CallFrameShuffler::prepareForTailCall() +{ + ASSERT(isUndecided()); + emitDeltaCheck(); + + // We'll use sp-based indexing so that we can load the + // caller's frame pointer into the fpr immediately + m_oldFrameBase = MacroAssembler::stackPointerRegister; + m_oldFrameOffset = numLocals(); + m_newFrameBase = acquireGPR(); +#if CPU(X86) + // We load the frame pointer manually, but we need to ask the + // algorithm to move the return PC for us (it'd probably + // require a write to the danger zone). Since it'd be awkward + // to ask for half a value move, we ask that the whole thing + // be moved for us. + addNew(VirtualRegister { 0 }, + ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS)); + + // sp will point to head0 and we will move it up half a slot + // manually + m_newFrameOffset = 0; +#elif CPU(ARM) || CPU(SH4) || CPU(MIPS) + // We load the the frame pointer and link register + // manually. We could ask the algorithm to load them for us, + // and it would allow us to use the link register as an extra + // temporary - but it'd mean that the frame pointer can also + // be used as an extra temporary, so we keep the link register + // locked instead. + + // sp will point to head1 since the callee's prologue pushes + // the call frame and link register. + m_newFrameOffset = -1; +#elif CPU(ARM64) + // We load the frame pointer and link register manually. We + // could ask the algorithm to load the link register for us + // (which would allow for its use as an extra temporary), but + // since its not in GPRInfo, we can't do it. + + // sp will point to head2 since the callee's prologue pushes the + // call frame and link register + m_newFrameOffset = -2; +#elif CPU(X86_64) + // We load the frame pointer manually, but we ask the + // algorithm to move the return PC for us (it'd probably + // require a write in the danger zone) + addNew(VirtualRegister { 1 }, + ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS)); + + // sp will point to head1 since the callee's prologue pushes + // the call frame register + m_newFrameOffset = -1; +#else + UNREACHABLE_FOR_PLATFORM(); +#endif + + if (verbose) + dataLog(" Emitting code for computing the new frame base\n"); + + // We compute the new frame base by first computing the top of the + // old frame (taking into account an argument count higher than + // the number of parameters), then substracting to it the aligned + // new frame size (adjusted). + m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, JSStack::ArgumentCount * static_cast(sizeof(Register)) + PayloadOffset), m_newFrameBase); + MacroAssembler::Jump argumentCountOK = + m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase, + MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters())); + m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + JSStack::CallFrameHeaderSize), m_newFrameBase); + m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase); + m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase); + MacroAssembler::Jump done = m_jit.jump(); + argumentCountOK.link(&m_jit); + m_jit.move( + MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)), + m_newFrameBase); + done.link(&m_jit); + + m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase); + m_jit.subPtr( + MacroAssembler::TrustedImm32( + (m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)), + m_newFrameBase); + + // We load the link register manually for architectures that have one +#if CPU(ARM) || CPU(SH4) || CPU(ARM64) + m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)), + MacroAssembler::linkRegister); +#elif CPU(MIPS) + m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)), + MacroAssembler::returnAddressRegister); +#endif + + // We want the frame pointer to always point to a valid frame, and + // we are going to trash the current one. Let's make it point to + // our caller's frame, since that's what we want to end up with. + m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister), + MacroAssembler::framePointerRegister); + + if (verbose) + dataLog("Preparing frame for tail call:\n", *this); + + prepareAny(); + +#if CPU(X86) + if (verbose) + dataLog(" Simulating pop of the call frame register\n"); + m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister); +#endif + + if (verbose) + dataLog("Ready for tail call!\n"); +} + +bool CallFrameShuffler::tryWrites(CachedRecovery& cachedRecovery) +{ + ASSERT(m_newFrameBase != InvalidGPRReg); + + // If the value is already set up correctly, we don't have + // anything to do. + if (isSlowPath() && cachedRecovery.recovery().isInJSStack() + && cachedRecovery.targets().size() == 1 + && newAsOld(cachedRecovery.targets()[0]) == cachedRecovery.recovery().virtualRegister()) { + cachedRecovery.clearTargets(); + if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg) + clearCachedRecovery(cachedRecovery.recovery()); + return true; + } + + if (!canLoadAndBox(cachedRecovery)) + return false; + + emitLoad(cachedRecovery); + emitBox(cachedRecovery); + ASSERT(cachedRecovery.recovery().isInRegisters() + || cachedRecovery.recovery().isConstant()); + + if (verbose) + dataLog(" * Storing ", cachedRecovery.recovery()); + for (size_t i = 0; i < cachedRecovery.targets().size(); ++i) { + VirtualRegister target { cachedRecovery.targets()[i] }; + ASSERT(!isDangerNew(target)); + if (verbose) + dataLog(!i ? " into " : ", and ", "NEW ", target); + emitStore(cachedRecovery, addressForNew(target)); + setNew(target, nullptr); + } + if (verbose) + dataLog("\n"); + cachedRecovery.clearTargets(); + if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg) + clearCachedRecovery(cachedRecovery.recovery()); + + return true; +} + +bool CallFrameShuffler::performSafeWrites() +{ + VirtualRegister firstSafe; + VirtualRegister end { lastNew() + 1 }; + Vector failures; + + // For all cachedRecoveries that writes to the safe zone, if it + // doesn't also write to the danger zone, we try to perform + // the writes. This may free up danger slots, so we iterate + // again until it doesn't happen anymore. + // + // Note that even though we have a while block, we look at + // each slot of the new call frame at most once since in each + // iteration beyond the first, we only load up the portion of + // the new call frame that was dangerous and became safe due + // to the previous iteration. + do { + firstSafe = dangerFrontier() + 1; + if (verbose) + dataLog(" Trying safe writes (between NEW ", firstSafe, " and NEW ", end - 1, ")\n"); + bool didProgress = false; + for (VirtualRegister reg = firstSafe; reg < end; reg += 1) { + CachedRecovery* cachedRecovery = getNew(reg); + if (!cachedRecovery) { + if (verbose) + dataLog(" + ", reg, " is OK.\n"); + continue; + } + if (!hasOnlySafeWrites(*cachedRecovery)) { + if (verbose) { + dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg, + " but also has dangerous writes.\n"); + } + continue; + } + if (cachedRecovery->wantedJSValueRegs()) { + if (verbose) { + dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg, + " but is also needed in registers.\n"); + } + continue; + } + if (cachedRecovery->wantedFPR() != InvalidFPRReg) { + if (verbose) { + dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg, + " but is also needed in an FPR.\n"); + } + continue; + } + if (!tryWrites(*cachedRecovery)) { + if (verbose) + dataLog(" - Unable to write to NEW ", reg, " from ", cachedRecovery->recovery(), "\n"); + failures.append(reg); + } + didProgress = true; + } + end = firstSafe; + + // If we have cachedRecoveries that failed to write, it is + // because they are on the stack and we didn't have enough + // registers available at the time to load them into. If + // we have a free register, we should try again because it + // could free up some danger slots. + if (didProgress && hasFreeRegister()) { + Vector stillFailing; + for (VirtualRegister failed : failures) { + CachedRecovery* cachedRecovery = getNew(failed); + // It could have been handled later if it had + // several targets + if (!cachedRecovery) + continue; + + ASSERT(hasOnlySafeWrites(*cachedRecovery) + && !cachedRecovery->wantedJSValueRegs() + && cachedRecovery->wantedFPR() == InvalidFPRReg); + if (!tryWrites(*cachedRecovery)) + stillFailing.append(failed); + } + failures = WTFMove(stillFailing); + } + if (verbose && firstSafe != dangerFrontier() + 1) + dataLog(" We freed up danger slots!\n"); + } while (firstSafe != dangerFrontier() + 1); + + return failures.isEmpty(); +} + +void CallFrameShuffler::prepareAny() +{ + ASSERT(!isUndecided()); + + updateDangerFrontier(); + + // First, we try to store any value that goes above the danger + // frontier. This will never use more registers since we are only + // loading+storing if we ensure that any register used for the load + // will be freed up after the stores (i.e., all stores are above + // the danger frontier, and there is no wanted register). + performSafeWrites(); + + // At this point, we couldn't have more available registers than + // we have withouth spilling: all values currently in registers + // either require a write to the danger zone, or have a wanted + // register, which means that in any case they will have to go + // through registers again. + + // We now slowly free up the danger zone by first loading the old + // value on the danger frontier, spilling as many registers as + // needed to do so and ensuring that the corresponding slot in the + // new frame is now ready to be written. Then, we store the old + // value to its target location if possible (we could have failed + // to load it previously due to high pressure). Finally, we write + // to any of the newly safe slots that we can, which could free up + // registers (hence why we do it eagerly). + for (VirtualRegister reg = dangerFrontier(); reg >= firstNew(); reg -= 1) { + if (reg == dangerFrontier()) { + if (verbose) + dataLog(" Next slot (NEW ", reg, ") is the danger frontier\n"); + CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) }; + ASSERT(cachedRecovery); + ensureLoad(*cachedRecovery); + emitLoad(*cachedRecovery); + ensureBox(*cachedRecovery); + emitBox(*cachedRecovery); + if (hasOnlySafeWrites(*cachedRecovery)) + tryWrites(*cachedRecovery); + } else if (verbose) + dataLog(" Next slot is NEW ", reg, "\n"); + + ASSERT(!isDangerNew(reg)); + CachedRecovery* cachedRecovery = getNew(reg); + // This could be one of the header slots we don't care about. + if (!cachedRecovery) { + if (verbose) + dataLog(" + ", reg, " is OK\n"); + continue; + } + + if (canLoadAndBox(*cachedRecovery) && hasOnlySafeWrites(*cachedRecovery) + && !cachedRecovery->wantedJSValueRegs() + && cachedRecovery->wantedFPR() == InvalidFPRReg) { + emitLoad(*cachedRecovery); + emitBox(*cachedRecovery); + bool writesOK = tryWrites(*cachedRecovery); + ASSERT_UNUSED(writesOK, writesOK); + } else if (verbose) + dataLog(" - ", cachedRecovery->recovery(), " can't be handled just yet.\n"); + } + ASSERT(dangerFrontier() < firstNew()); + + // Now, the danger zone is empty, but we still have a couple of + // things to do: + // + // 1) There could be remaining safe writes that failed earlier due + // to high register pressure and had nothing to do with the + // danger zone whatsoever. + // + // 2) Some wanted registers could have to be loaded (this could + // happen either when making a call to a new function with a + // lower number of arguments - since above here, we only load + // wanted registers when they are at the danger frontier -, or + // if a wanted register got spilled). + // + // 3) Some wanted registers could have been loaded in the wrong + // registers + // + // 4) We have to take care of some bookkeeping - namely, storing + // the argument count and updating the stack pointer. + + // At this point, we must have enough registers available for + // handling 1). None of the loads can fail because we have been + // eagerly freeing up registers in all the previous phases - so + // the only values that are in registers at this point must have + // wanted registers. + if (verbose) + dataLog(" Danger zone is clear, performing remaining writes.\n"); + for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) { + CachedRecovery* cachedRecovery { getNew(reg) }; + if (!cachedRecovery) + continue; + + emitLoad(*cachedRecovery); + emitBox(*cachedRecovery); + bool writesOK = tryWrites(*cachedRecovery); + ASSERT_UNUSED(writesOK, writesOK); + } + +#if USE(JSVALUE64) + if (m_tagTypeNumber != InvalidGPRReg && m_newRegisters[m_tagTypeNumber]) + releaseGPR(m_tagTypeNumber); +#endif + + // Handle 2) by loading all registers. We don't have to do any + // writes, since they have been taken care of above. + if (verbose) + dataLog(" Loading wanted registers into registers\n"); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + + emitLoad(*cachedRecovery); + emitBox(*cachedRecovery); + ASSERT(cachedRecovery->targets().isEmpty()); + } + +#if USE(JSVALUE64) + if (m_tagTypeNumber != InvalidGPRReg) + releaseGPR(m_tagTypeNumber); +#endif + + // At this point, we have read everything we cared about from the + // stack, and written everything we had to to the stack. + if (verbose) + dataLog(" Callee frame is fully set up\n"); + if (!ASSERT_DISABLED) { + for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) + ASSERT_UNUSED(reg, !getNew(reg)); + + for (CachedRecovery* cachedRecovery : m_cachedRecoveries) { + ASSERT_UNUSED(cachedRecovery, cachedRecovery->targets().isEmpty()); + ASSERT(!cachedRecovery->recovery().isInJSStack()); + } + } + + // We need to handle 4) first because it implies releasing + // m_newFrameBase, which could be a wanted register. + if (verbose) + dataLog(" * Storing the argument count into ", VirtualRegister { JSStack::ArgumentCount }, "\n"); + m_jit.store32(MacroAssembler::TrustedImm32(0), + addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(TagOffset)); + m_jit.store32(MacroAssembler::TrustedImm32(argCount()), + addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(PayloadOffset)); + + if (!isSlowPath()) { + ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister); + if (verbose) + dataLog(" Releasing the new frame base pointer\n"); + m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister); + releaseGPR(m_newFrameBase); + } + + // Finally we handle 3) + if (verbose) + dataLog(" Ensuring wanted registers are in the right register\n"); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + + emitDisplace(*cachedRecovery); + } +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.h b/Source/JavaScriptCore/jit/CallFrameShuffler.h new file mode 100644 index 000000000..d5e6f4253 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler.h @@ -0,0 +1,804 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef CallFrameShuffler_h +#define CallFrameShuffler_h + +#if ENABLE(JIT) + +#include "CachedRecovery.h" +#include "CallFrameShuffleData.h" +#include "MacroAssembler.h" +#include "RegisterSet.h" +#include "StackAlignment.h" +#include + +namespace JSC { + +class CallFrameShuffler { + WTF_MAKE_FAST_ALLOCATED; +public: + CallFrameShuffler(CCallHelpers&, const CallFrameShuffleData&); + + void dump(PrintStream&) const; + + // Any register that has been locked or acquired must be released + // before calling prepareForTailCall() or prepareForSlowPath(). + void lockGPR(GPRReg gpr) + { + ASSERT(!m_lockedRegisters.get(gpr)); + m_lockedRegisters.set(gpr); + if (verbose) + dataLog(" * Locking ", gpr, "\n"); + } + + GPRReg acquireGPR() + { + ensureGPR(); + GPRReg gpr { getFreeGPR() }; + ASSERT(!m_registers[gpr]); + lockGPR(gpr); + return gpr; + } + + void releaseGPR(GPRReg gpr) + { + if (verbose) { + if (m_lockedRegisters.get(gpr)) + dataLog(" * Releasing ", gpr, "\n"); + else + dataLog(" * ", gpr, " was not locked\n"); + } + m_lockedRegisters.clear(gpr); + } + + void restoreGPR(GPRReg gpr) + { + if (!m_newRegisters[gpr]) + return; + + ensureGPR(); +#if USE(JSVALUE32_64) + GPRReg tempGPR { getFreeGPR() }; + lockGPR(tempGPR); + ensureGPR(); + releaseGPR(tempGPR); +#endif + emitDisplace(*m_newRegisters[gpr]); + } + + // You can only take a snapshot if the recovery has not started + // yet. The only operations that are valid before taking a + // snapshot are lockGPR(), acquireGPR() and releaseGPR(). + // + // Locking status is *NOT* preserved by the snapshot: it only + // contains information about where the + // arguments/callee/callee-save registers are by taking into + // account any spilling that acquireGPR() could have done. + CallFrameShuffleData snapshot() const + { + ASSERT(isUndecided()); + + CallFrameShuffleData data; + data.numLocals = numLocals(); + data.callee = getNew(VirtualRegister { JSStack::Callee })->recovery(); + data.args.resize(argCount()); + for (size_t i = 0; i < argCount(); ++i) + data.args[i] = getNew(virtualRegisterForArgument(i))->recovery(); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + +#if USE(JSVALUE64) + data.registers[reg] = cachedRecovery->recovery(); +#else + RELEASE_ASSERT_NOT_REACHED(); +#endif + } + return data; + } + + // Ask the shuffler to put the callee into some registers once the + // shuffling is done. You should call this before any of the + // prepare() methods, and must not take a snapshot afterwards, as + // this would crash 32bits platforms. + void setCalleeJSValueRegs(JSValueRegs jsValueRegs) + { + ASSERT(isUndecided()); + ASSERT(!getNew(jsValueRegs)); + CachedRecovery* cachedRecovery { getNew(VirtualRegister(JSStack::Callee)) }; + ASSERT(cachedRecovery); + addNew(jsValueRegs, cachedRecovery->recovery()); + } + + // Ask the suhffler to assume the callee has already be checked to + // be a cell. This is a no-op on 64bit platforms, but allows to + // free up a GPR on 32bit platforms. + // You obviously must have ensured that this is the case before + // running any of the prepare methods. + void assumeCalleeIsCell() + { +#if USE(JSVALUE32_64) + CachedRecovery& calleeCachedRecovery = *getNew(VirtualRegister(JSStack::Callee)); + switch (calleeCachedRecovery.recovery().technique()) { + case InPair: + updateRecovery( + calleeCachedRecovery, + ValueRecovery::inGPR( + calleeCachedRecovery.recovery().payloadGPR(), + DataFormatCell)); + break; + case DisplacedInJSStack: + updateRecovery( + calleeCachedRecovery, + ValueRecovery::displacedInJSStack( + calleeCachedRecovery.recovery().virtualRegister(), + DataFormatCell)); + break; + case InFPR: + case UnboxedCellInGPR: + case CellDisplacedInJSStack: + break; + case Constant: + ASSERT(calleeCachedRecovery.recovery().constant().isCell()); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } +#endif + } + + // This will emit code to build the new frame over the old one. + void prepareForTailCall(); + + // This will emit code to build the new frame as if performing a + // regular call. However, the callee save registers will be + // restored, and any locals (not the header or arguments) of the + // current frame can be overwritten. + // + // A frame built using prepareForSlowPath() should be used either + // to throw an exception in, or destroyed using + // CCallHelpers::prepareForTailCallSlow() followed by a tail call. + void prepareForSlowPath(); + +private: + static const bool verbose = false; + + CCallHelpers& m_jit; + + void prepareAny(); + + void spill(CachedRecovery&); + + // "box" is arguably a bad name here. The meaning is that after + // calling emitBox(), your ensure that subsequently calling + // emitStore() will be able to store the value without additional + // transformation. In particular, this is a no-op for constants, + // and is a complete no-op on 32bits since any unboxed value can + // still be stored by storing the payload and a statically known + // tag. + void emitBox(CachedRecovery&); + + bool canBox(CachedRecovery& cachedRecovery) + { + if (cachedRecovery.boxingRequiresGPR() && getFreeGPR() == InvalidGPRReg) + return false; + + if (cachedRecovery.boxingRequiresFPR() && getFreeFPR() == InvalidFPRReg) + return false; + + return true; + } + + void ensureBox(CachedRecovery& cachedRecovery) + { + if (canBox(cachedRecovery)) + return; + + if (cachedRecovery.boxingRequiresGPR()) + ensureGPR(); + + if (cachedRecovery.boxingRequiresFPR()) + ensureFPR(); + } + + void emitLoad(CachedRecovery&); + + bool canLoad(CachedRecovery&); + + void ensureLoad(CachedRecovery& cachedRecovery) + { + if (canLoad(cachedRecovery)) + return; + + ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR()); + + if (cachedRecovery.loadsIntoFPR()) { + if (cachedRecovery.loadsIntoGPR()) + ensureRegister(); + else + ensureFPR(); + } else + ensureGPR(); + } + + bool canLoadAndBox(CachedRecovery& cachedRecovery) + { + // We don't have interfering loads & boxes + ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR()); + ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR()); + + return canLoad(cachedRecovery) && canBox(cachedRecovery); + } + + DataFormat emitStore(CachedRecovery&, MacroAssembler::Address); + + void emitDisplace(CachedRecovery&); + + void emitDeltaCheck(); + + Bag m_cachedRecoveries; + + void updateRecovery(CachedRecovery& cachedRecovery, ValueRecovery recovery) + { + clearCachedRecovery(cachedRecovery.recovery()); + cachedRecovery.setRecovery(recovery); + setCachedRecovery(recovery, &cachedRecovery); + } + + CachedRecovery* getCachedRecovery(ValueRecovery); + + CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*); + + void clearCachedRecovery(ValueRecovery recovery) + { + if (!recovery.isConstant()) + setCachedRecovery(recovery, nullptr); + } + + CachedRecovery* addCachedRecovery(ValueRecovery recovery) + { + if (recovery.isConstant()) + return m_cachedRecoveries.add(recovery); + CachedRecovery* cachedRecovery = getCachedRecovery(recovery); + if (!cachedRecovery) + return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery)); + return cachedRecovery; + } + + // This is the current recoveries present in the old frame's + // slots. A null CachedRecovery means we can trash the current + // value as we don't care about it. + Vector m_oldFrame; + + int numLocals() const + { + return m_oldFrame.size() - JSStack::CallerFrameAndPCSize; + } + + CachedRecovery* getOld(VirtualRegister reg) const + { + return m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1]; + } + + void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery) + { + m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1] = cachedRecovery; + } + + VirtualRegister firstOld() const + { + return VirtualRegister { static_cast(-numLocals()) }; + } + + VirtualRegister lastOld() const + { + return VirtualRegister { JSStack::CallerFrameAndPCSize - 1 }; + } + + bool isValidOld(VirtualRegister reg) const + { + return reg >= firstOld() && reg <= lastOld(); + } + + bool m_didExtendFrame { false }; + + void extendFrameIfNeeded(); + + // This stores, for each slot in the new frame, information about + // the recovery for the value that should eventually go into that + // slot. + // + // Once the slot has been written, the corresponding entry in + // m_newFrame will be empty. + Vector m_newFrame; + + size_t argCount() const + { + return m_newFrame.size() - JSStack::CallFrameHeaderSize; + } + + CachedRecovery* getNew(VirtualRegister newRegister) const + { + return m_newFrame[newRegister.offset()]; + } + + void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery) + { + m_newFrame[newRegister.offset()] = cachedRecovery; + } + + void addNew(VirtualRegister newRegister, ValueRecovery recovery) + { + CachedRecovery* cachedRecovery = addCachedRecovery(recovery); + cachedRecovery->addTarget(newRegister); + setNew(newRegister, cachedRecovery); + } + + VirtualRegister firstNew() const + { + return VirtualRegister { 0 }; + } + + VirtualRegister lastNew() const + { + return VirtualRegister { static_cast(m_newFrame.size()) - 1 }; + } + + bool isValidNew(VirtualRegister reg) const + { + return reg >= firstNew() && reg <= lastNew(); + } + + + int m_alignedOldFrameSize; + int m_alignedNewFrameSize; + + // This is the distance, in slots, between the base of the new + // frame and the base of the old frame. It could be negative when + // preparing for a tail call to a function with smaller argument + // count. + // + // We will overwrite this appropriately for slow path calls, but + // we initialize it as if doing a fast path for the spills we + // could do while undecided (typically while calling acquireGPR() + // for a polymorphic call). + int m_frameDelta; + + VirtualRegister newAsOld(VirtualRegister reg) const + { + return reg - m_frameDelta; + } + + // This stores the set of locked registers, i.e. registers for + // which we have an implicit requirement that they are not changed. + // + // This will usually contains the link register on architectures + // that have one, any scratch register used by the macro assembler + // (e.g. r11 on X86_64), as well as any register that we use for + // addressing (see m_oldFrameBase and m_newFrameBase). + // + // We also use this to lock registers temporarily, for instance to + // ensure that we have at least 2 available registers for loading + // a pair on 32bits. + mutable RegisterSet m_lockedRegisters; + + // This stores the current recoveries present in registers. A null + // CachedRecovery means we can trash the current value as we don't + // care about it. + RegisterMap m_registers; + +#if USE(JSVALUE64) + mutable GPRReg m_tagTypeNumber; + + bool tryAcquireTagTypeNumber(); +#endif + + // This stores, for each register, information about the recovery + // for the value that should eventually go into that register. The + // only registers that have a target recovery will be callee-save + // registers, as well as possibly one JSValueRegs for holding the + // callee. + // + // Once the correct value has been put into the registers, and + // contrary to what we do with m_newFrame, we keep the entry in + // m_newRegisters to simplify spilling. + RegisterMap m_newRegisters; + + template + Reg getFreeRegister(const CheckFunctor& check) const + { + Reg nonTemp { }; + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (m_lockedRegisters.get(reg)) + continue; + + if (!check(reg)) + continue; + + if (!m_registers[reg]) { + if (!m_newRegisters[reg]) + return reg; + if (!nonTemp) + nonTemp = reg; + } + } + +#if USE(JSVALUE64) + if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) { + ASSERT(m_lockedRegisters.get(m_tagTypeNumber)); + m_lockedRegisters.clear(m_tagTypeNumber); + nonTemp = Reg { m_tagTypeNumber }; + m_tagTypeNumber = InvalidGPRReg; + } +#endif + return nonTemp; + } + + GPRReg getFreeTempGPR() const + { + Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) }; + if (!freeTempGPR) + return InvalidGPRReg; + return freeTempGPR.gpr(); + } + + GPRReg getFreeGPR() const + { + Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) }; + if (!freeGPR) + return InvalidGPRReg; + return freeGPR.gpr(); + } + + FPRReg getFreeFPR() const + { + Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) }; + if (!freeFPR) + return InvalidFPRReg; + return freeFPR.fpr(); + } + + bool hasFreeRegister() const + { + return static_cast(getFreeRegister([] (Reg) { return true; })); + } + + // This frees up a register satisfying the check functor (this + // functor could theoretically have any kind of logic, but it must + // ensure that it will only return true for registers - spill + // assumes and asserts that it is passed a cachedRecovery stored in a + // register). + template + void ensureRegister(const CheckFunctor& check) + { + // If we can spill a callee-save, that's best, because it will + // free up a register that would otherwise been taken for the + // longest amount of time. + // + // We could try to bias towards those that are not in their + // target registers yet, but the gain is probably super + // small. Unless you have a huge number of argument (at least + // around twice the number of available registers on your + // architecture), no spilling is going to take place anyways. + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (m_lockedRegisters.get(reg)) + continue; + + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + + if (check(*cachedRecovery)) { + if (verbose) + dataLog(" ", cachedRecovery->recovery(), " looks like a good spill candidate\n"); + spill(*cachedRecovery); + return; + } + } + + // We use the cachedRecovery associated with the first new slot we + // can, because that is the one for which a write will be + // possible the latest, i.e. that is the one that we would + // have had to retain in registers for the longest. + for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) { + CachedRecovery* cachedRecovery { getNew(reg) }; + if (!cachedRecovery) + continue; + + if (check(*cachedRecovery)) { + spill(*cachedRecovery); + return; + } + } + + RELEASE_ASSERT_NOT_REACHED(); + } + + void ensureRegister() + { + if (hasFreeRegister()) + return; + + if (verbose) + dataLog(" Finding a register to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInGPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().gpr()); + if (cachedRecovery.recovery().isInFPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().fpr()); +#if USE(JSVALUE32_64) + if (cachedRecovery.recovery().technique() == InPair) { + return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR()) + && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR()); + } +#endif + return false; + }); + } + + void ensureTempGPR() + { + if (getFreeTempGPR() != InvalidGPRReg) + return; + + if (verbose) + dataLog(" Finding a temp GPR to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInGPR()) { + return !m_lockedRegisters.get(cachedRecovery.recovery().gpr()) + && !m_newRegisters[cachedRecovery.recovery().gpr()]; + } +#if USE(JSVALUE32_64) + if (cachedRecovery.recovery().technique() == InPair) { + return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR()) + && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR()) + && !m_newRegisters[cachedRecovery.recovery().tagGPR()] + && !m_newRegisters[cachedRecovery.recovery().payloadGPR()]; + } +#endif + return false; + }); + } + + void ensureGPR() + { + if (getFreeGPR() != InvalidGPRReg) + return; + + if (verbose) + dataLog(" Finding a GPR to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInGPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().gpr()); +#if USE(JSVALUE32_64) + if (cachedRecovery.recovery().technique() == InPair) { + return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR()) + && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR()); + } +#endif + return false; + }); + } + + void ensureFPR() + { + if (getFreeFPR() != InvalidFPRReg) + return; + + if (verbose) + dataLog(" Finding an FPR to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInFPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().fpr()); + return false; + }); + } + + CachedRecovery* getNew(JSValueRegs jsValueRegs) const + { +#if USE(JSVALUE64) + return m_newRegisters[jsValueRegs.gpr()]; +#else + ASSERT( + jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg + || m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]); + if (jsValueRegs.payloadGPR() == InvalidGPRReg) + return m_newRegisters[jsValueRegs.tagGPR()]; + return m_newRegisters[jsValueRegs.payloadGPR()]; +#endif + } + + void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery) + { + ASSERT(jsValueRegs && !getNew(jsValueRegs)); + CachedRecovery* cachedRecovery = addCachedRecovery(recovery); +#if USE(JSVALUE64) + if (cachedRecovery->wantedJSValueRegs()) + m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr; + m_newRegisters[jsValueRegs.gpr()] = cachedRecovery; +#else + if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) { + if (oldRegs.payloadGPR()) + m_newRegisters[oldRegs.payloadGPR()] = nullptr; + if (oldRegs.tagGPR()) + m_newRegisters[oldRegs.tagGPR()] = nullptr; + } + if (jsValueRegs.payloadGPR() != InvalidGPRReg) + m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery; + if (jsValueRegs.tagGPR() != InvalidGPRReg) + m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery; +#endif + ASSERT(!cachedRecovery->wantedJSValueRegs()); + cachedRecovery->setWantedJSValueRegs(jsValueRegs); + } + + void addNew(FPRReg fpr, ValueRecovery recovery) + { + ASSERT(fpr != InvalidFPRReg && !m_newRegisters[fpr]); + CachedRecovery* cachedRecovery = addCachedRecovery(recovery); + m_newRegisters[fpr] = cachedRecovery; + ASSERT(cachedRecovery->wantedFPR() == InvalidFPRReg); + cachedRecovery->setWantedFPR(fpr); + } + + // m_oldFrameBase is the register relative to which we access + // slots in the old call frame, with an additional offset of + // m_oldFrameOffset. + // + // - For an actual tail call, m_oldFrameBase is the stack + // pointer, and m_oldFrameOffset is the number of locals of the + // tail caller's frame. We use such stack pointer-based + // addressing because it allows us to load the tail caller's + // caller's frame pointer in the frame pointer register + // immediately instead of awkwardly keeping it around on the + // stack. + // + // - For a slow path call, m_oldFrameBase is just the frame + // pointer, and m_oldFrameOffset is 0. + GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister }; + int m_oldFrameOffset { 0 }; + + MacroAssembler::Address addressForOld(VirtualRegister reg) const + { + return MacroAssembler::Address(m_oldFrameBase, + (m_oldFrameOffset + reg.offset()) * sizeof(Register)); + } + + // m_newFrameBase is the register relative to which we access + // slots in the new call frame, and we always make it point to + // wherever the stack pointer will be right before making the + // actual call/jump. The actual base of the new frame is at offset + // m_newFrameOffset relative to m_newFrameBase. + // + // - For an actual tail call, m_newFrameBase is computed + // dynamically, and m_newFrameOffset varies between 0 and -2 + // depending on the architecture's calling convention (see + // prepareForTailCall). + // + // - For a slow path call, m_newFrameBase is the actual stack + // pointer, and m_newFrameOffset is - CallerFrameAndPCSize, + // following the convention for a regular call. + GPRReg m_newFrameBase { InvalidGPRReg }; + int m_newFrameOffset { 0}; + + bool isUndecided() const + { + return m_newFrameBase == InvalidGPRReg; + } + + bool isSlowPath() const + { + return m_newFrameBase == MacroAssembler::stackPointerRegister; + } + + MacroAssembler::Address addressForNew(VirtualRegister reg) const + { + return MacroAssembler::Address(m_newFrameBase, + (m_newFrameOffset + reg.offset()) * sizeof(Register)); + } + + // We use a concept of "danger zone". The danger zone consists of + // all the writes in the new frame that could overlap with reads + // in the old frame. + // + // Because we could have a higher actual number of arguments than + // parameters, when preparing a tail call, we need to assume that + // writing to a slot on the new frame could overlap not only with + // the corresponding slot in the old frame, but also with any slot + // above it. Thus, the danger zone consists of all writes between + // the first write and what I call the "danger frontier": the + // highest slot in the old frame we still care about. Thus, the + // danger zone contains all the slots between the first slot of + // the new frame and the danger frontier. Because the danger + // frontier is related to the new frame, it is stored as a virtual + // register *in the new frame*. + VirtualRegister m_dangerFrontier; + + VirtualRegister dangerFrontier() const + { + ASSERT(!isUndecided()); + + return m_dangerFrontier; + } + + bool isDangerNew(VirtualRegister reg) const + { + ASSERT(!isUndecided() && isValidNew(reg)); + return reg <= dangerFrontier(); + } + + void updateDangerFrontier() + { + ASSERT(!isUndecided()); + + m_dangerFrontier = firstNew() - 1; + for (VirtualRegister reg = lastNew(); reg >= firstNew(); reg -= 1) { + if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg))) + continue; + + m_dangerFrontier = reg; + if (verbose) + dataLog(" Danger frontier now at NEW ", m_dangerFrontier, "\n"); + break; + } + if (verbose) + dataLog(" All clear! Danger zone is empty.\n"); + } + + // A safe write is a write that never writes into the danger zone. + bool hasOnlySafeWrites(CachedRecovery& cachedRecovery) const + { + for (VirtualRegister target : cachedRecovery.targets()) { + if (isDangerNew(target)) + return false; + } + return true; + } + + // You must ensure that there is no dangerous writes before + // calling this function. + bool tryWrites(CachedRecovery&); + + // This function tries to ensure that there is no longer any + // possible safe write, i.e. all remaining writes are either to + // the danger zone or callee save restorations. + // + // It returns false if it was unable to perform some safe writes + // due to high register pressure. + bool performSafeWrites(); +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // CallFrameShuffler_h diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp new file mode 100644 index 000000000..5dfe96e81 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp @@ -0,0 +1,305 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffler.h" + +#if ENABLE(JIT) && USE(JSVALUE32_64) + +#include "CCallHelpers.h" +#include "DataFormat.h" +#include "JSCInlines.h" + +namespace JSC { + +DataFormat CallFrameShuffler::emitStore(CachedRecovery& location, MacroAssembler::Address address) +{ + ASSERT(!location.recovery().isInJSStack()); + + switch (location.recovery().technique()) { + case UnboxedInt32InGPR: + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::Int32Tag), + address.withOffset(TagOffset)); + m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatInt32; + case UnboxedCellInGPR: + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), + address.withOffset(TagOffset)); + m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatCell; + case Constant: + m_jit.storeTrustedValue(location.recovery().constant(), address); + return DataFormatJS; + case InPair: + m_jit.storeValue(location.recovery().jsValueRegs(), address); + return DataFormatJS; + case UnboxedBooleanInGPR: + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::BooleanTag), + address.withOffset(TagOffset)); + m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatBoolean; + case InFPR: + case UnboxedDoubleInFPR: + m_jit.storeDouble(location.recovery().fpr(), address); + return DataFormatJS; + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +void CallFrameShuffler::emitBox(CachedRecovery& location) +{ + // Nothing to do, we're good! JSValues and doubles can be stored + // immediately, and other formats don't need any transformation - + // just storing a constant tag separately. + ASSERT_UNUSED(location, canBox(location)); +} + +void CallFrameShuffler::emitLoad(CachedRecovery& location) +{ + if (!location.recovery().isInJSStack()) + return; + + if (verbose) + dataLog(" * Loading ", location.recovery(), " into "); + VirtualRegister reg { location.recovery().virtualRegister() }; + MacroAssembler::Address address { addressForOld(reg) }; + + bool tryFPR { true }; + JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() }; + if (wantedJSValueRegs) { + if (wantedJSValueRegs.payloadGPR() != InvalidGPRReg + && !m_registers[wantedJSValueRegs.payloadGPR()] + && !m_lockedRegisters.get(wantedJSValueRegs.payloadGPR())) + tryFPR = false; + if (wantedJSValueRegs.tagGPR() != InvalidGPRReg + && !m_registers[wantedJSValueRegs.tagGPR()] + && !m_lockedRegisters.get(wantedJSValueRegs.tagGPR())) + tryFPR = false; + } + + if (tryFPR && location.loadsIntoFPR()) { + FPRReg resultFPR = location.wantedFPR(); + if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR)) + resultFPR = getFreeFPR(); + if (resultFPR != InvalidFPRReg) { + m_jit.loadDouble(address, resultFPR); + DataFormat dataFormat = DataFormatJS; + if (location.recovery().dataFormat() == DataFormatDouble) + dataFormat = DataFormatDouble; + updateRecovery(location, + ValueRecovery::inFPR(resultFPR, dataFormat)); + if (verbose) + dataLog(location.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); + return; + } + } + + if (location.loadsIntoGPR()) { + GPRReg resultGPR { wantedJSValueRegs.payloadGPR() }; + if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR)) + resultGPR = getFreeGPR(); + ASSERT(resultGPR != InvalidGPRReg); + m_jit.loadPtr(address.withOffset(PayloadOffset), resultGPR); + updateRecovery(location, + ValueRecovery::inGPR(resultGPR, location.recovery().dataFormat())); + if (verbose) + dataLog(location.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); + return; + } + + ASSERT(location.recovery().technique() == DisplacedInJSStack); + GPRReg payloadGPR { wantedJSValueRegs.payloadGPR() }; + GPRReg tagGPR { wantedJSValueRegs.tagGPR() }; + if (payloadGPR == InvalidGPRReg || m_registers[payloadGPR] || m_lockedRegisters.get(payloadGPR)) + payloadGPR = getFreeGPR(); + m_lockedRegisters.set(payloadGPR); + if (tagGPR == InvalidGPRReg || m_registers[tagGPR] || m_lockedRegisters.get(tagGPR)) + tagGPR = getFreeGPR(); + m_lockedRegisters.clear(payloadGPR); + ASSERT(payloadGPR != InvalidGPRReg && tagGPR != InvalidGPRReg && tagGPR != payloadGPR); + m_jit.loadPtr(address.withOffset(PayloadOffset), payloadGPR); + m_jit.loadPtr(address.withOffset(TagOffset), tagGPR); + updateRecovery(location, + ValueRecovery::inPair(tagGPR, payloadGPR)); + if (verbose) + dataLog(location.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); +} + +bool CallFrameShuffler::canLoad(CachedRecovery& location) +{ + if (!location.recovery().isInJSStack()) + return true; + + if (location.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg) + return true; + + if (location.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg) + return true; + + if (location.recovery().technique() == DisplacedInJSStack) { + GPRReg payloadGPR { getFreeGPR() }; + if (payloadGPR == InvalidGPRReg) + return false; + m_lockedRegisters.set(payloadGPR); + GPRReg tagGPR { getFreeGPR() }; + m_lockedRegisters.clear(payloadGPR); + return tagGPR != InvalidGPRReg; + } + + return false; +} + +void CallFrameShuffler::emitDisplace(CachedRecovery& location) +{ + ASSERT(location.recovery().isInRegisters()); + JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() }; + ASSERT(wantedJSValueRegs); // We don't support wanted FPRs on 32bit platforms + + GPRReg wantedTagGPR { wantedJSValueRegs.tagGPR() }; + GPRReg wantedPayloadGPR { wantedJSValueRegs.payloadGPR() }; + + if (wantedTagGPR != InvalidGPRReg) { + ASSERT(!m_lockedRegisters.get(wantedTagGPR)); + if (CachedRecovery* currentTag { m_registers[wantedTagGPR] }) { + if (currentTag == &location) { + if (verbose) + dataLog(" + ", wantedTagGPR, " is OK\n"); + } else { + // This can never happen on 32bit platforms since we + // have at most one wanted JSValueRegs, for the + // callee, and no callee-save registers. + RELEASE_ASSERT_NOT_REACHED(); + } + } + } + + if (wantedPayloadGPR != InvalidGPRReg) { + ASSERT(!m_lockedRegisters.get(wantedPayloadGPR)); + if (CachedRecovery* currentPayload { m_registers[wantedPayloadGPR] }) { + if (currentPayload == &location) { + if (verbose) + dataLog(" + ", wantedPayloadGPR, " is OK\n"); + } else { + // See above + RELEASE_ASSERT_NOT_REACHED(); + } + } + } + + if (location.recovery().technique() == InPair + || location.recovery().isInGPR()) { + GPRReg payloadGPR; + if (location.recovery().technique() == InPair) + payloadGPR = location.recovery().payloadGPR(); + else + payloadGPR = location.recovery().gpr(); + + if (wantedPayloadGPR == InvalidGPRReg) + wantedPayloadGPR = payloadGPR; + + if (payloadGPR != wantedPayloadGPR) { + if (location.recovery().technique() == InPair + && wantedPayloadGPR == location.recovery().tagGPR()) { + if (verbose) + dataLog(" * Swapping ", payloadGPR, " and ", wantedPayloadGPR, "\n"); + m_jit.swap(payloadGPR, wantedPayloadGPR); + updateRecovery(location, + ValueRecovery::inPair(payloadGPR, wantedPayloadGPR)); + } else { + if (verbose) + dataLog(" * Moving ", payloadGPR, " into ", wantedPayloadGPR, "\n"); + m_jit.move(payloadGPR, wantedPayloadGPR); + if (location.recovery().technique() == InPair) { + updateRecovery(location, + ValueRecovery::inPair(location.recovery().tagGPR(), + wantedPayloadGPR)); + } else { + updateRecovery(location, + ValueRecovery::inGPR(wantedPayloadGPR, location.recovery().dataFormat())); + } + } + } + + if (wantedTagGPR == InvalidGPRReg) + wantedTagGPR = getFreeGPR(); + switch (location.recovery().dataFormat()) { + case DataFormatInt32: + if (verbose) + dataLog(" * Moving int32 tag into ", wantedTagGPR, "\n"); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::Int32Tag), + wantedTagGPR); + break; + case DataFormatCell: + if (verbose) + dataLog(" * Moving cell tag into ", wantedTagGPR, "\n"); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), + wantedTagGPR); + break; + case DataFormatBoolean: + if (verbose) + dataLog(" * Moving boolean tag into ", wantedTagGPR, "\n"); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::BooleanTag), + wantedTagGPR); + break; + case DataFormatJS: + ASSERT(wantedTagGPR != location.recovery().payloadGPR()); + if (wantedTagGPR != location.recovery().tagGPR()) { + if (verbose) + dataLog(" * Moving ", location.recovery().tagGPR(), " into ", wantedTagGPR, "\n"); + m_jit.move(location.recovery().tagGPR(), wantedTagGPR); + } + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } else { + ASSERT(location.recovery().isInFPR()); + if (wantedTagGPR == InvalidGPRReg) { + ASSERT(wantedPayloadGPR != InvalidGPRReg); + m_lockedRegisters.set(wantedPayloadGPR); + wantedTagGPR = getFreeGPR(); + m_lockedRegisters.clear(wantedPayloadGPR); + } + if (wantedPayloadGPR == InvalidGPRReg) { + m_lockedRegisters.set(wantedTagGPR); + wantedPayloadGPR = getFreeGPR(); + m_lockedRegisters.clear(wantedTagGPR); + } + m_jit.boxDouble(location.recovery().fpr(), wantedTagGPR, wantedPayloadGPR); + } + updateRecovery(location, ValueRecovery::inPair(wantedTagGPR, wantedPayloadGPR)); +} + +} // namespace JSC + +#endif // ENABLE(JIT) && USE(JSVALUE32_64) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp new file mode 100644 index 000000000..2ef6ed111 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp @@ -0,0 +1,369 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffler.h" + +#if ENABLE(JIT) && USE(JSVALUE64) + +#include "CCallHelpers.h" +#include "DataFormat.h" +#include "JSCInlines.h" + +namespace JSC { + +DataFormat CallFrameShuffler::emitStore( + CachedRecovery& cachedRecovery, MacroAssembler::Address address) +{ + ASSERT(!cachedRecovery.recovery().isInJSStack()); + + switch (cachedRecovery.recovery().technique()) { + case InGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatJS; + case UnboxedInt32InGPR: + m_jit.store32(cachedRecovery.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatInt32; + case UnboxedInt52InGPR: + m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount), + cachedRecovery.recovery().gpr()); + FALLTHROUGH; + case UnboxedStrictInt52InGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatStrictInt52; + case UnboxedBooleanInGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatBoolean; + case UnboxedCellInGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatCell; + case UnboxedDoubleInFPR: + m_jit.storeDouble(cachedRecovery.recovery().fpr(), address); + return DataFormatDouble; + case InFPR: + m_jit.storeDouble(cachedRecovery.recovery().fpr(), address); + return DataFormatJS; + case Constant: + m_jit.storeTrustedValue(cachedRecovery.recovery().constant(), address); + return DataFormatJS; + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +void CallFrameShuffler::emitBox(CachedRecovery& cachedRecovery) +{ + ASSERT(canBox(cachedRecovery)); + if (cachedRecovery.recovery().isConstant()) + return; + + if (cachedRecovery.recovery().isInGPR()) { + switch (cachedRecovery.recovery().dataFormat()) { + case DataFormatInt32: + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + m_jit.zeroExtend32ToPtr( + cachedRecovery.recovery().gpr(), + cachedRecovery.recovery().gpr()); + m_lockedRegisters.set(cachedRecovery.recovery().gpr()); + if (tryAcquireTagTypeNumber()) + m_jit.or64(m_tagTypeNumber, cachedRecovery.recovery().gpr()); + else { + // We have to do this the hard way + m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber), + cachedRecovery.recovery().gpr()); + } + m_lockedRegisters.clear(cachedRecovery.recovery().gpr()); + cachedRecovery.setRecovery( + ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + return; + case DataFormatInt52: + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount), + cachedRecovery.recovery().gpr()); + cachedRecovery.setRecovery( + ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatStrictInt52)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + FALLTHROUGH; + case DataFormatStrictInt52: { + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + FPRReg resultFPR = getFreeFPR(); + ASSERT(resultFPR != InvalidFPRReg); + m_jit.convertInt64ToDouble(cachedRecovery.recovery().gpr(), resultFPR); + updateRecovery(cachedRecovery, ValueRecovery::inFPR(resultFPR, DataFormatDouble)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + break; + } + case DataFormatBoolean: + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + m_jit.add32(MacroAssembler::TrustedImm32(ValueFalse), + cachedRecovery.recovery().gpr()); + cachedRecovery.setRecovery( + ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + return; + default: + return; + } + } + + if (cachedRecovery.recovery().isInFPR()) { + if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) { + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + GPRReg resultGPR = cachedRecovery.wantedJSValueRegs().gpr(); + if (resultGPR == InvalidGPRReg || m_registers[resultGPR]) + resultGPR = getFreeGPR(); + ASSERT(resultGPR != InvalidGPRReg); + m_jit.purifyNaN(cachedRecovery.recovery().fpr()); + m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), resultGPR); + m_lockedRegisters.set(resultGPR); + if (tryAcquireTagTypeNumber()) + m_jit.sub64(m_tagTypeNumber, resultGPR); + else + m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR); + m_lockedRegisters.clear(resultGPR); + updateRecovery(cachedRecovery, ValueRecovery::inGPR(resultGPR, DataFormatJS)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + return; + } + ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +void CallFrameShuffler::emitLoad(CachedRecovery& cachedRecovery) +{ + if (!cachedRecovery.recovery().isInJSStack()) + return; + + if (verbose) + dataLog(" * Loading ", cachedRecovery.recovery(), " into "); + + VirtualRegister reg = cachedRecovery.recovery().virtualRegister(); + MacroAssembler::Address address { addressForOld(reg) }; + bool tryFPR { true }; + GPRReg resultGPR { cachedRecovery.wantedJSValueRegs().gpr() }; + + // If we want a GPR and it's available, that's better than loading + // into an FPR. + if (resultGPR != InvalidGPRReg && !m_registers[resultGPR] + && !m_lockedRegisters.get(resultGPR) && cachedRecovery.loadsIntoGPR()) + tryFPR = false; + + // Otherwise, we prefer loading into FPRs if possible + if (tryFPR && cachedRecovery.loadsIntoFPR()) { + FPRReg resultFPR { cachedRecovery.wantedFPR() }; + if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR)) + resultFPR = getFreeFPR(); + if (resultFPR != InvalidFPRReg) { + m_jit.loadDouble(address, resultFPR); + DataFormat dataFormat = DataFormatJS; + // We could be transforming a DataFormatCell into a + // DataFormatJS here - but that's OK. + if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) + dataFormat = DataFormatDouble; + updateRecovery(cachedRecovery, + ValueRecovery::inFPR(resultFPR, dataFormat)); + if (verbose) + dataLog(cachedRecovery.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); + return; + } + } + + ASSERT(cachedRecovery.loadsIntoGPR()); + if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR)) + resultGPR = getFreeGPR(); + ASSERT(resultGPR != InvalidGPRReg); + m_jit.loadPtr(address, resultGPR); + updateRecovery(cachedRecovery, + ValueRecovery::inGPR(resultGPR, cachedRecovery.recovery().dataFormat())); + if (verbose) + dataLog(cachedRecovery.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); +} + +bool CallFrameShuffler::canLoad(CachedRecovery& cachedRecovery) +{ + if (!cachedRecovery.recovery().isInJSStack()) + return true; + + ASSERT(cachedRecovery.loadsIntoFPR() || cachedRecovery.loadsIntoGPR()); + + if (cachedRecovery.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg) + return true; + + if (cachedRecovery.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg) + return true; + + return false; +} + +void CallFrameShuffler::emitDisplace(CachedRecovery& cachedRecovery) +{ + Reg wantedReg; + if (!(wantedReg = Reg { cachedRecovery.wantedJSValueRegs().gpr() })) + wantedReg = Reg { cachedRecovery.wantedFPR() }; + ASSERT(wantedReg); + ASSERT(!m_lockedRegisters.get(wantedReg)); + + if (CachedRecovery* current = m_registers[wantedReg]) { + if (current == &cachedRecovery) { + if (verbose) + dataLog(" + ", wantedReg, " is OK\n"); + return; + } + // We could do a more complex thing by finding cycles + // etc. in that case. + // However, ending up in this situation will be super + // rare, and should actually be outright impossible for + // non-FTL tiers, since: + // (a) All doubles have been converted into JSValues with + // ValueRep nodes, so FPRs are initially free + // + // (b) The only recoveries with wanted registers are the + // callee (which always starts out in a register) and + // the callee-save registers + // + // (c) The callee-save registers are the first things we + // load (after the return PC), and they are loaded as JSValues + // + // (d) We prefer loading JSValues into FPRs if their + // wanted GPR is not available + // + // (e) If we end up spilling some registers with a + // target, we won't load them again before the very + // end of the algorithm + // + // Combined, this means that we will never load a recovery + // with a wanted GPR into any GPR other than its wanted + // GPR. The callee could however have been initially in + // one of the callee-save registers - but since the wanted + // GPR for the callee is always regT0, it will be the + // first one to be displaced, and we won't see it when + // handling any of the callee-save registers. + // + // Thus, the only way we could ever reach this path is in + // the FTL, when there is so much pressure that we + // absolutely need to load the callee-save registers into + // different GPRs initially but not enough pressure to + // then have to spill all of them. And even in that case, + // depending on the order in which B3 saves the + // callee-saves, we will probably still be safe. Anyway, + // the couple extra move instructions compared to an + // efficient cycle-based algorithm are not going to hurt + // us. + if (wantedReg.isFPR()) { + FPRReg tempFPR = getFreeFPR(); + if (verbose) + dataLog(" * Moving ", wantedReg, " into ", tempFPR, "\n"); + m_jit.moveDouble(wantedReg.fpr(), tempFPR); + updateRecovery(*current, + ValueRecovery::inFPR(tempFPR, current->recovery().dataFormat())); + } else { + GPRReg tempGPR = getFreeGPR(); + if (verbose) + dataLog(" * Moving ", wantedReg.gpr(), " into ", tempGPR, "\n"); + m_jit.move(wantedReg.gpr(), tempGPR); + updateRecovery(*current, + ValueRecovery::inGPR(tempGPR, current->recovery().dataFormat())); + } + } + ASSERT(!m_registers[wantedReg]); + + if (cachedRecovery.recovery().isConstant()) { + // We only care about callee saves for wanted FPRs, and those are never constants + ASSERT(wantedReg.isGPR()); + if (verbose) + dataLog(" * Loading ", cachedRecovery.recovery().constant(), " into ", wantedReg, "\n"); + m_jit.moveTrustedValue(cachedRecovery.recovery().constant(), JSValueRegs { wantedReg.gpr() }); + updateRecovery( + cachedRecovery, + ValueRecovery::inRegister(wantedReg, DataFormatJS)); + } else if (cachedRecovery.recovery().isInGPR()) { + if (verbose) + dataLog(" * Moving ", cachedRecovery.recovery(), " into ", wantedReg, "\n"); + if (wantedReg.isGPR()) + m_jit.move(cachedRecovery.recovery().gpr(), wantedReg.gpr()); + else + m_jit.move64ToDouble(cachedRecovery.recovery().gpr(), wantedReg.fpr()); + RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS); + updateRecovery(cachedRecovery, + ValueRecovery::inRegister(wantedReg, DataFormatJS)); + } else { + ASSERT(cachedRecovery.recovery().isInFPR()); + if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) { + // We only care about callee saves for wanted FPRs, and those are always DataFormatJS + ASSERT(wantedReg.isGPR()); + // This will automatically pick the wanted GPR + emitBox(cachedRecovery); + } else { + if (verbose) + dataLog(" * Moving ", cachedRecovery.recovery().fpr(), " into ", wantedReg, "\n"); + if (wantedReg.isGPR()) + m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), wantedReg.gpr()); + else + m_jit.moveDouble(cachedRecovery.recovery().fpr(), wantedReg.fpr()); + RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS); + updateRecovery(cachedRecovery, + ValueRecovery::inRegister(wantedReg, DataFormatJS)); + } + } + + ASSERT(m_registers[wantedReg] == &cachedRecovery); +} + +bool CallFrameShuffler::tryAcquireTagTypeNumber() +{ + if (m_tagTypeNumber != InvalidGPRReg) + return true; + + m_tagTypeNumber = getFreeGPR(); + + if (m_tagTypeNumber == InvalidGPRReg) + return false; + + m_lockedRegisters.set(m_tagTypeNumber); + m_jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), m_tagTypeNumber); + return true; +} + +} // namespace JSC + +#endif // ENABLE(JIT) && USE(JSVALUE64) diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp deleted file mode 100644 index 1588f7fea..000000000 --- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "ClosureCallStubRoutine.h" - -#if ENABLE(JIT) - -#include "Executable.h" -#include "Heap.h" -#include "VM.h" -#include "Operations.h" -#include "SlotVisitor.h" -#include "Structure.h" - -namespace JSC { - -ClosureCallStubRoutine::ClosureCallStubRoutine( - const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, - Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin) - : GCAwareJITStubRoutine(code, vm, true) - , m_structure(vm, owner, structure) - , m_executable(vm, owner, executable) - , m_codeOrigin(codeOrigin) -{ -} - -ClosureCallStubRoutine::~ClosureCallStubRoutine() -{ -} - -void ClosureCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor) -{ - visitor.append(&m_structure); - visitor.append(&m_executable); -} - -} // namespace JSC - -#endif // ENABLE(JIT) - diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h deleted file mode 100644 index d951075e2..000000000 --- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef ClosureCallStubRoutine_h -#define ClosureCallStubRoutine_h - -#include - -#if ENABLE(JIT) - -#include "CodeOrigin.h" -#include "GCAwareJITStubRoutine.h" - -namespace JSC { - -class ClosureCallStubRoutine : public GCAwareJITStubRoutine { -public: - ClosureCallStubRoutine( - const MacroAssemblerCodeRef&, VM&, const JSCell* owner, - Structure*, ExecutableBase*, const CodeOrigin&); - - virtual ~ClosureCallStubRoutine(); - - Structure* structure() const { return m_structure.get(); } - ExecutableBase* executable() const { return m_executable.get(); } - const CodeOrigin& codeOrigin() const { return m_codeOrigin; } - -protected: - virtual void markRequiredObjectsInternal(SlotVisitor&); - -private: - WriteBarrier m_structure; - WriteBarrier m_executable; - // This allows us to figure out who a call is linked to by searching through - // stub routines. - CodeOrigin m_codeOrigin; -}; - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // ClosureCallStubRoutine_h - diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h index 45ab175ec..d5eaa4072 100644 --- a/Source/JavaScriptCore/jit/CompactJITCodeMap.h +++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -30,10 +30,8 @@ #define CompactJITCodeMap_h #include -#include #include -#include -#include +#include #include namespace JSC { @@ -47,7 +45,7 @@ namespace JSC { // CompactJITCodeMap::Encoder encoder(map); // encoder.append(a, b); // encoder.append(c, d); // preconditions: c >= a, d >= b -// OwnPtr map = encoder.finish(); +// auto map = encoder.finish(); // // At some later time: // @@ -80,6 +78,16 @@ struct BytecodeAndMachineOffset { class CompactJITCodeMap { WTF_MAKE_FAST_ALLOCATED; public: + CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries) + : m_buffer(buffer) +#if !ASSERT_DISABLED + , m_size(size) +#endif + , m_numberOfEntries(numberOfEntries) + { + UNUSED_PARAM(size); + } + ~CompactJITCodeMap() { if (m_buffer) @@ -94,16 +102,6 @@ public: void decode(Vector& result) const; private: - CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries) - : m_buffer(buffer) -#if !ASSERT_DISABLED - , m_size(size) -#endif - , m_numberOfEntries(numberOfEntries) - { - UNUSED_PARAM(size); - } - uint8_t at(unsigned index) const { ASSERT(index < m_size); @@ -138,8 +136,8 @@ public: void ensureCapacityFor(unsigned numberOfEntriesToAdd); void append(unsigned bytecodeIndex, unsigned machineCodeOffset); - PassOwnPtr finish(); - + std::unique_ptr finish(); + private: void appendByte(uint8_t value); void encodeNumber(uint32_t value); @@ -212,18 +210,18 @@ inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned m_numberOfEntries++; } -inline PassOwnPtr CompactJITCodeMap::Encoder::finish() +inline std::unique_ptr CompactJITCodeMap::Encoder::finish() { m_capacity = m_size; m_buffer = static_cast(fastRealloc(m_buffer, m_capacity)); - OwnPtr result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries)); + auto result = std::make_unique(m_buffer, m_size, m_numberOfEntries); m_buffer = 0; m_size = 0; m_capacity = 0; m_numberOfEntries = 0; m_previousBytecodeIndex = 0; m_previousMachineCodeOffset = 0; - return result.release(); + return result; } inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp new file mode 100644 index 000000000..b4f56650b --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExecutableAllocationFuzz.h" + +#include "TestRunnerUtils.h" +#include +#include + +namespace JSC { + +static Atomic s_numberOfExecutableAllocationFuzzChecks; +unsigned numberOfExecutableAllocationFuzzChecks() +{ + return s_numberOfExecutableAllocationFuzzChecks.load(); +} + +ExecutableAllocationFuzzResult doExecutableAllocationFuzzing() +{ + ASSERT(Options::useExecutableAllocationFuzz()); + + unsigned oldValue; + unsigned newValue; + do { + oldValue = s_numberOfExecutableAllocationFuzzChecks.load(); + newValue = oldValue + 1; + } while (!s_numberOfExecutableAllocationFuzzChecks.compareExchangeWeak(oldValue, newValue)); + + if (newValue == Options::fireExecutableAllocationFuzzAt()) { + if (Options::verboseExecutableAllocationFuzz()) { + dataLog("Will pretend to fail executable allocation.\n"); + WTFReportBacktrace(); + } + return PretendToFailExecutableAllocation; + } + + if (Options::fireExecutableAllocationFuzzAtOrAfter() + && newValue >= Options::fireExecutableAllocationFuzzAtOrAfter()) { + if (Options::verboseExecutableAllocationFuzz()) { + dataLog("Will pretend to fail executable allocation.\n"); + WTFReportBacktrace(); + } + return PretendToFailExecutableAllocation; + } + + return AllowNormalExecutableAllocation; +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h new file mode 100644 index 000000000..b15cdef44 --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef ExecutableAllocationFuzz_h +#define ExecutableAllocationFuzz_h + +#include "Options.h" + +namespace JSC { + +enum ExecutableAllocationFuzzResult { + AllowNormalExecutableAllocation, + PretendToFailExecutableAllocation +}; + +ExecutableAllocationFuzzResult doExecutableAllocationFuzzing(); + +inline ExecutableAllocationFuzzResult doExecutableAllocationFuzzingIfEnabled() +{ + if (LIKELY(!Options::useExecutableAllocationFuzz())) + return AllowNormalExecutableAllocation; + + return doExecutableAllocationFuzzing(); +} + +} // namespace JSC + +#endif // ExecutableAllocationFuzz_h + diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp index 5ac6cc412..4ede23531 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -24,18 +24,17 @@ */ #include "config.h" - #include "ExecutableAllocator.h" +#include "JSCInlines.h" + #if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) #include "CodeProfiling.h" #include +#include #include +#include #include -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -#include -#endif -#include #include #endif @@ -57,7 +56,7 @@ public: DemandExecutableAllocator() : MetaAllocator(jitAllocationGranule) { - MutexLocker lock(allocatorsMutex()); + std::lock_guard lock(allocatorsMutex()); allocators().add(this); // Don't preallocate any memory here. } @@ -65,7 +64,7 @@ public: virtual ~DemandExecutableAllocator() { { - MutexLocker lock(allocatorsMutex()); + std::lock_guard lock(allocatorsMutex()); allocators().remove(this); } for (unsigned i = 0; i < reservations.size(); ++i) @@ -75,7 +74,7 @@ public: static size_t bytesAllocatedByAllAllocators() { size_t total = 0; - MutexLocker lock(allocatorsMutex()); + std::lock_guard lock(allocatorsMutex()); for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) total += (*allocator)->bytesAllocated(); return total; @@ -84,7 +83,7 @@ public: static size_t bytesCommittedByAllocactors() { size_t total = 0; - MutexLocker lock(allocatorsMutex()); + std::lock_guard lock(allocatorsMutex()); for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) total += (*allocator)->bytesCommitted(); return total; @@ -93,7 +92,7 @@ public: #if ENABLE(META_ALLOCATOR_PROFILE) static void dumpProfileFromAllAllocators() { - MutexLocker lock(allocatorsMutex()); + std::lock_guard lock(allocatorsMutex()); for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) (*allocator)->dumpProfile(); } @@ -135,12 +134,14 @@ private: Vector reservations; static HashSet& allocators() { - DEFINE_STATIC_LOCAL(HashSet, sAllocators, ()); - return sAllocators; + static NeverDestroyed> set; + return set; } - static Mutex& allocatorsMutex() + + static StaticLock& allocatorsMutex() { - DEFINE_STATIC_LOCAL(Mutex, mutex, ()); + static StaticLock mutex; + return mutex; } }; @@ -169,7 +170,7 @@ void ExecutableAllocator::initializeAllocator() ExecutableAllocator::ExecutableAllocator(VM&) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - : m_allocator(adoptPtr(new DemandExecutableAllocator())) + : m_allocator(std::make_unique()) #endif { ASSERT(allocator()); @@ -212,11 +213,11 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) } -PassRefPtr ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) +RefPtr ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) { RefPtr result = allocator()->allocate(sizeInBytes, ownerUID); RELEASE_ASSERT(result || effort != JITCompilationMustSucceed); - return result.release(); + return result; } size_t ExecutableAllocator::committedByteCount() @@ -231,6 +232,16 @@ void ExecutableAllocator::dumpProfile() } #endif +Lock& ExecutableAllocator::getLock() const +{ + return gAllocator->getLock(); +} + +bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address) +{ + return gAllocator->isInAllocatedMemory(locker, address); +} + #endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) #if ENABLE(ASSEMBLER_WX_EXCLUSIVE) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h index 42e1f9594..09b768bed 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.h +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -29,10 +29,10 @@ #include // for ptrdiff_t #include #include +#include #include #include #include -#include #include #include @@ -40,7 +40,7 @@ #include #endif -#if OS(IOS) || OS(QNX) +#if OS(IOS) #include #endif @@ -55,44 +55,16 @@ #include #endif -#if OS(WINCE) -// From pkfuncs.h (private header file from the Platform Builder) -#define CACHE_SYNC_ALL 0x07F -extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); -#endif - #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4) -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE) -#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC) -#define EXECUTABLE_POOL_WRITABLE false -#else #define EXECUTABLE_POOL_WRITABLE true -#endif namespace JSC { class VM; -void releaseExecutableMemory(VM&); static const unsigned jitAllocationGranule = 32; -inline size_t roundUpAllocationSize(size_t request, size_t granularity) -{ - RELEASE_ASSERT((std::numeric_limits::max() - granularity) > request); - - // Round up to next page boundary - size_t size = request + (granularity - 1); - size = size & ~(granularity - 1); - ASSERT(size >= request); - return size; -} - -} - -namespace JSC { - typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle; #if ENABLE(ASSEMBLER) @@ -102,13 +74,20 @@ class DemandExecutableAllocator; #endif #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) -#if CPU(ARM) || CPU(MIPS) +#if CPU(ARM) static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024; -#elif CPU(X86_64) && !CPU(X32) +#elif CPU(ARM64) +static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; +#elif CPU(X86_64) static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024; #else static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; #endif +#if CPU(ARM) +static const double executablePoolReservationFraction = 0.15; +#else +static const double executablePoolReservationFraction = 0.25; +#endif extern uintptr_t startOfFixedExecutableMemoryPool; #endif @@ -134,36 +113,13 @@ public: static void dumpProfile() { } #endif - PassRefPtr allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort); - -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - static void makeWritable(void* start, size_t size) - { - reprotectRegion(start, size, Writable); - } + RefPtr allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort); - static void makeExecutable(void* start, size_t size) - { - reprotectRegion(start, size, Executable); - } -#else - static void makeWritable(void*, size_t) {} - static void makeExecutable(void*, size_t) {} -#endif + bool isValidExecutableMemory(const LockHolder&, void* address); static size_t committedByteCount(); -private: - -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - static void reprotectRegion(void*, size_t, ProtectionSetting); -#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) - // We create a MetaAllocator for each JS global object. - OwnPtr m_allocator; - DemandExecutableAllocator* allocator() { return m_allocator.get(); } -#endif -#endif - + Lock& getLock() const; }; #endif // ENABLE(JIT) && ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp index ea2217924..5f601767e 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -24,21 +24,19 @@ */ #include "config.h" - #include "ExecutableAllocator.h" +#include "JSCInlines.h" + #if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) #include "CodeProfiling.h" -#include +#include "ExecutableAllocationFuzz.h" #include #include -#include -#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 +#if OS(DARWIN) #include -// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case. -#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1 #endif using namespace WTF; @@ -53,12 +51,15 @@ public: FixedVMPoolExecutableAllocator() : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes { - m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); -#if !ENABLE(LLINT) - RELEASE_ASSERT(m_reservation); -#endif + size_t reservationSize; + if (Options::jitMemoryReservationSize()) + reservationSize = Options::jitMemoryReservationSize(); + else + reservationSize = fixedExecutableMemoryPoolSize; + reservationSize = roundUpToMultipleOf(pageSize(), reservationSize); + m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); if (m_reservation) { - ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize); + ASSERT(m_reservation.size() == reservationSize); addFreshFreeSpace(m_reservation.base(), m_reservation.size()); startOfFixedExecutableMemoryPool = reinterpret_cast(m_reservation.base()); @@ -68,13 +69,13 @@ public: virtual ~FixedVMPoolExecutableAllocator(); protected: - virtual void* allocateNewSpace(size_t&) + virtual void* allocateNewSpace(size_t&) override { // We're operating in a fixed pool, so new allocation is always prohibited. return 0; } - virtual void notifyNeedPage(void* page) + virtual void notifyNeedPage(void* page) override { #if USE(MADV_FREE_FOR_JIT_MEMORY) UNUSED_PARAM(page); @@ -83,7 +84,7 @@ protected: #endif } - virtual void notifyPageIsFree(void* page) + virtual void notifyPageIsFree(void* page) override { #if USE(MADV_FREE_FOR_JIT_MEMORY) for (;;) { @@ -144,28 +145,59 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) MetaAllocator::Statistics statistics = allocator->currentStatistics(); ASSERT(statistics.bytesAllocated <= statistics.bytesReserved); size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; - if (bytesAllocated >= statistics.bytesReserved) - bytesAllocated = statistics.bytesReserved; + size_t bytesAvailable = static_cast( + statistics.bytesReserved * (1 - executablePoolReservationFraction)); + if (bytesAllocated >= bytesAvailable) + bytesAllocated = bytesAvailable; double result = 1.0; - size_t divisor = statistics.bytesReserved - bytesAllocated; + size_t divisor = bytesAvailable - bytesAllocated; if (divisor) - result = static_cast(statistics.bytesReserved) / divisor; + result = static_cast(bytesAvailable) / divisor; if (result < 1.0) result = 1.0; return result; } -PassRefPtr ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) +RefPtr ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) { + if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) { + dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n"); + WTFReportBacktrace(); + } + + if (effort == JITCompilationCanFail + && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation) + return nullptr; + + if (effort == JITCompilationCanFail) { + // Don't allow allocations if we are down to reserve. + MetaAllocator::Statistics statistics = allocator->currentStatistics(); + size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes; + size_t bytesAvailable = static_cast( + statistics.bytesReserved * (1 - executablePoolReservationFraction)); + if (bytesAllocated > bytesAvailable) + return nullptr; + } + RefPtr result = allocator->allocate(sizeInBytes, ownerUID); if (!result) { - if (effort == JITCompilationCanFail) - return result; - releaseExecutableMemory(vm); - result = allocator->allocate(sizeInBytes, ownerUID); - RELEASE_ASSERT(result); + if (effort != JITCompilationCanFail) { + dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n"); + CRASH(); + } + return nullptr; } - return result.release(); + return result; +} + +bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address) +{ + return allocator->isInAllocatedMemory(locker, address); +} + +Lock& ExecutableAllocator::getLock() const +{ + return allocator->getLock(); } size_t ExecutableAllocator::committedByteCount() diff --git a/Source/JavaScriptCore/jit/FPRInfo.h b/Source/JavaScriptCore/jit/FPRInfo.h new file mode 100644 index 000000000..a19a1ac38 --- /dev/null +++ b/Source/JavaScriptCore/jit/FPRInfo.h @@ -0,0 +1,431 @@ +/* + * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef FPRInfo_h +#define FPRInfo_h + +#include "MacroAssembler.h" +#include + +namespace JSC { + +typedef MacroAssembler::FPRegisterID FPRReg; +#define InvalidFPRReg ((::JSC::FPRReg)-1) + +#if ENABLE(JIT) + +#if CPU(X86) || CPU(X86_64) + +class FPRInfo { +public: + typedef FPRReg RegisterType; + static const unsigned numberOfRegisters = 6; + static const unsigned numberOfArgumentRegisters = 8; + + // Temporary registers. + static const FPRReg fpRegT0 = X86Registers::xmm0; + static const FPRReg fpRegT1 = X86Registers::xmm1; + static const FPRReg fpRegT2 = X86Registers::xmm2; + static const FPRReg fpRegT3 = X86Registers::xmm3; + static const FPRReg fpRegT4 = X86Registers::xmm4; + static const FPRReg fpRegT5 = X86Registers::xmm5; +#if CPU(X86_64) + // Only X86_64 passes aguments in xmm registers + static const FPRReg argumentFPR0 = X86Registers::xmm0; // fpRegT0 + static const FPRReg argumentFPR1 = X86Registers::xmm1; // fpRegT1 + static const FPRReg argumentFPR2 = X86Registers::xmm2; // fpRegT2 + static const FPRReg argumentFPR3 = X86Registers::xmm3; // fpRegT3 + static const FPRReg argumentFPR4 = X86Registers::xmm4; // fpRegT4 + static const FPRReg argumentFPR5 = X86Registers::xmm5; // fpRegT5 + static const FPRReg argumentFPR6 = X86Registers::xmm6; + static const FPRReg argumentFPR7 = X86Registers::xmm7; +#endif + // On X86 the return will actually be on the x87 stack, + // so we'll copy to xmm0 for sanity! + static const FPRReg returnValueFPR = X86Registers::xmm0; // fpRegT0 + + // FPRReg mapping is direct, the machine regsiter numbers can + // be used directly as indices into the FPR RegisterBank. + COMPILE_ASSERT(X86Registers::xmm0 == 0, xmm0_is_0); + COMPILE_ASSERT(X86Registers::xmm1 == 1, xmm1_is_1); + COMPILE_ASSERT(X86Registers::xmm2 == 2, xmm2_is_2); + COMPILE_ASSERT(X86Registers::xmm3 == 3, xmm3_is_3); + COMPILE_ASSERT(X86Registers::xmm4 == 4, xmm4_is_4); + COMPILE_ASSERT(X86Registers::xmm5 == 5, xmm5_is_5); + static FPRReg toRegister(unsigned index) + { + return (FPRReg)index; + } + static unsigned toIndex(FPRReg reg) + { + unsigned result = (unsigned)reg; + if (result >= numberOfRegisters) + return InvalidIndex; + return result; + } + + static FPRReg toArgumentRegister(unsigned index) + { + return (FPRReg)index; + } + + static const char* debugName(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); +#if CPU(X86_64) + ASSERT(static_cast(reg) < 16); + static const char* nameForRegister[16] = { + "xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7", + "xmm8", "xmm9", "xmm10", "xmm11", + "xmm12", "xmm13", "xmm14", "xmm15" + }; +#elif CPU(X86) + ASSERT(static_cast(reg) < 8); + static const char* nameForRegister[8] = { + "xmm0", "xmm1", "xmm2", "xmm3", + "xmm4", "xmm5", "xmm6", "xmm7" + }; +#endif + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(X86) || CPU(X86_64) + +#if CPU(ARM) + +class FPRInfo { +public: + typedef FPRReg RegisterType; + static const unsigned numberOfRegisters = 6; + + // Temporary registers. + // d7 is use by the MacroAssembler as fpTempRegister. + static const FPRReg fpRegT0 = ARMRegisters::d0; + static const FPRReg fpRegT1 = ARMRegisters::d1; + static const FPRReg fpRegT2 = ARMRegisters::d2; + static const FPRReg fpRegT3 = ARMRegisters::d3; + static const FPRReg fpRegT4 = ARMRegisters::d4; + static const FPRReg fpRegT5 = ARMRegisters::d5; + // ARMv7 doesn't pass arguments in fp registers. The return + // value is also actually in integer registers, for now + // we'll return in d0 for simplicity. + static const FPRReg returnValueFPR = ARMRegisters::d0; // fpRegT0 + +#if CPU(ARM_HARDFP) + static const FPRReg argumentFPR0 = ARMRegisters::d0; // fpRegT0 + static const FPRReg argumentFPR1 = ARMRegisters::d1; // fpRegT1 +#endif + + // FPRReg mapping is direct, the machine regsiter numbers can + // be used directly as indices into the FPR RegisterBank. + COMPILE_ASSERT(ARMRegisters::d0 == 0, d0_is_0); + COMPILE_ASSERT(ARMRegisters::d1 == 1, d1_is_1); + COMPILE_ASSERT(ARMRegisters::d2 == 2, d2_is_2); + COMPILE_ASSERT(ARMRegisters::d3 == 3, d3_is_3); + COMPILE_ASSERT(ARMRegisters::d4 == 4, d4_is_4); + COMPILE_ASSERT(ARMRegisters::d5 == 5, d5_is_5); + static FPRReg toRegister(unsigned index) + { + return (FPRReg)index; + } + static unsigned toIndex(FPRReg reg) + { + return (unsigned)reg; + } + + static const char* debugName(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); + ASSERT(static_cast(reg) < 32); + static const char* nameForRegister[32] = { + "d0", "d1", "d2", "d3", + "d4", "d5", "d6", "d7", + "d8", "d9", "d10", "d11", + "d12", "d13", "d14", "d15", + "d16", "d17", "d18", "d19", + "d20", "d21", "d22", "d23", + "d24", "d25", "d26", "d27", + "d28", "d29", "d30", "d31" + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(ARM) + +#if CPU(ARM64) + +class FPRInfo { +public: + typedef FPRReg RegisterType; + static const unsigned numberOfRegisters = 23; + static const unsigned numberOfArgumentRegisters = 8; + + // Temporary registers. + // q8-q15 are callee saved, q31 is use by the MacroAssembler as fpTempRegister. + static const FPRReg fpRegT0 = ARM64Registers::q0; + static const FPRReg fpRegT1 = ARM64Registers::q1; + static const FPRReg fpRegT2 = ARM64Registers::q2; + static const FPRReg fpRegT3 = ARM64Registers::q3; + static const FPRReg fpRegT4 = ARM64Registers::q4; + static const FPRReg fpRegT5 = ARM64Registers::q5; + static const FPRReg fpRegT6 = ARM64Registers::q6; + static const FPRReg fpRegT7 = ARM64Registers::q7; + static const FPRReg fpRegT8 = ARM64Registers::q16; + static const FPRReg fpRegT9 = ARM64Registers::q17; + static const FPRReg fpRegT10 = ARM64Registers::q18; + static const FPRReg fpRegT11 = ARM64Registers::q19; + static const FPRReg fpRegT12 = ARM64Registers::q20; + static const FPRReg fpRegT13 = ARM64Registers::q21; + static const FPRReg fpRegT14 = ARM64Registers::q22; + static const FPRReg fpRegT15 = ARM64Registers::q23; + static const FPRReg fpRegT16 = ARM64Registers::q24; + static const FPRReg fpRegT17 = ARM64Registers::q25; + static const FPRReg fpRegT18 = ARM64Registers::q26; + static const FPRReg fpRegT19 = ARM64Registers::q27; + static const FPRReg fpRegT20 = ARM64Registers::q28; + static const FPRReg fpRegT21 = ARM64Registers::q29; + static const FPRReg fpRegT22 = ARM64Registers::q30; + static const FPRReg fpRegCS0 = ARM64Registers::q8; + static const FPRReg fpRegCS1 = ARM64Registers::q9; + static const FPRReg fpRegCS2 = ARM64Registers::q10; + static const FPRReg fpRegCS3 = ARM64Registers::q11; + static const FPRReg fpRegCS4 = ARM64Registers::q12; + static const FPRReg fpRegCS5 = ARM64Registers::q13; + static const FPRReg fpRegCS6 = ARM64Registers::q14; + static const FPRReg fpRegCS7 = ARM64Registers::q15; + + static const FPRReg argumentFPR0 = ARM64Registers::q0; // fpRegT0 + static const FPRReg argumentFPR1 = ARM64Registers::q1; // fpRegT1 + static const FPRReg argumentFPR2 = ARM64Registers::q2; // fpRegT2 + static const FPRReg argumentFPR3 = ARM64Registers::q3; // fpRegT3 + static const FPRReg argumentFPR4 = ARM64Registers::q4; // fpRegT4 + static const FPRReg argumentFPR5 = ARM64Registers::q5; // fpRegT5 + static const FPRReg argumentFPR6 = ARM64Registers::q6; // fpRegT6 + static const FPRReg argumentFPR7 = ARM64Registers::q7; // fpRegT7 + + static const FPRReg returnValueFPR = ARM64Registers::q0; // fpRegT0 + + static FPRReg toRegister(unsigned index) + { + ASSERT(index < numberOfRegisters); + static const FPRReg registerForIndex[numberOfRegisters] = { + fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6, fpRegT7, + fpRegT8, fpRegT9, fpRegT10, fpRegT11, fpRegT12, fpRegT13, fpRegT14, fpRegT15, + fpRegT16, fpRegT17, fpRegT18, fpRegT19, fpRegT20, fpRegT21, fpRegT22 + }; + return registerForIndex[index]; + } + + static unsigned toIndex(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); + ASSERT(static_cast(reg) < 32); + static const unsigned indexForRegister[32] = { + 0, 1, 2, 3, 4, 5, 6, 7, + InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, + 8, 9, 10, 11, 12, 13, 14, 15, + 16, 17, 18, 19, 20, 21, 22, InvalidIndex + }; + unsigned result = indexForRegister[reg]; + return result; + } + + static FPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < 8); + return static_cast(index); + } + + static const char* debugName(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); + ASSERT(static_cast(reg) < 32); + static const char* nameForRegister[32] = { + "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7", + "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15", + "q16", "q17", "q18", "q19", "q20", "q21", "q22", "q23", + "q24", "q25", "q26", "q27", "q28", "q29", "q30", "q31" + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(ARM64) + +#if CPU(MIPS) + +class FPRInfo { +public: + typedef FPRReg RegisterType; + static const unsigned numberOfRegisters = 7; + + // Temporary registers. + static const FPRReg fpRegT0 = MIPSRegisters::f0; + static const FPRReg fpRegT1 = MIPSRegisters::f2; + static const FPRReg fpRegT2 = MIPSRegisters::f4; + static const FPRReg fpRegT3 = MIPSRegisters::f6; + static const FPRReg fpRegT4 = MIPSRegisters::f8; + static const FPRReg fpRegT5 = MIPSRegisters::f10; + static const FPRReg fpRegT6 = MIPSRegisters::f18; + + static const FPRReg returnValueFPR = MIPSRegisters::f0; + + static const FPRReg argumentFPR0 = MIPSRegisters::f12; + static const FPRReg argumentFPR1 = MIPSRegisters::f14; + + static FPRReg toRegister(unsigned index) + { + static const FPRReg registerForIndex[numberOfRegisters] = { + fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6 }; + + ASSERT(index < numberOfRegisters); + return registerForIndex[index]; + } + + static unsigned toIndex(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); + ASSERT(reg < 20); + static const unsigned indexForRegister[20] = { + 0, InvalidIndex, 1, InvalidIndex, + 2, InvalidIndex, 3, InvalidIndex, + 4, InvalidIndex, 5, InvalidIndex, + InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, + InvalidIndex, InvalidIndex, 6, InvalidIndex, + }; + unsigned result = indexForRegister[reg]; + return result; + } + + static const char* debugName(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); + ASSERT(reg < 32); + static const char* nameForRegister[32] = { + "f0", "f1", "f2", "f3", + "f4", "f5", "f6", "f7", + "f8", "f9", "f10", "f11", + "f12", "f13", "f14", "f15" + "f16", "f17", "f18", "f19" + "f20", "f21", "f22", "f23" + "f24", "f25", "f26", "f27" + "f28", "f29", "f30", "f31" + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(MIPS) + +#if CPU(SH4) + +class FPRInfo { +public: + typedef FPRReg RegisterType; + static const unsigned numberOfRegisters = 6; + + // Temporary registers. + static const FPRReg fpRegT0 = SH4Registers::dr0; + static const FPRReg fpRegT1 = SH4Registers::dr2; + static const FPRReg fpRegT2 = SH4Registers::dr4; + static const FPRReg fpRegT3 = SH4Registers::dr6; + static const FPRReg fpRegT4 = SH4Registers::dr8; + static const FPRReg fpRegT5 = SH4Registers::dr10; + + static const FPRReg returnValueFPR = SH4Registers::dr0; + + static const FPRReg argumentFPR0 = SH4Registers::dr4; + static const FPRReg argumentFPR1 = SH4Registers::dr6; + + static FPRReg toRegister(unsigned index) + { + static const FPRReg registerForIndex[numberOfRegisters] = { + fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 }; + + ASSERT(index < numberOfRegisters); + return registerForIndex[index]; + } + + static unsigned toIndex(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); + ASSERT(reg < 16); + static const unsigned indexForRegister[16] = { + 0, InvalidIndex, 1, InvalidIndex, + 2, InvalidIndex, 3, InvalidIndex, + 4, InvalidIndex, 5, InvalidIndex, + InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex + }; + unsigned result = indexForRegister[reg]; + return result; + } + + static const char* debugName(FPRReg reg) + { + ASSERT(reg != InvalidFPRReg); + ASSERT(reg < 16); + static const char* nameForRegister[16] = { + "dr0", "fr1", "dr2", "fr3", + "dr4", "fr5", "dr6", "fr7", + "dr8", "fr9", "dr10", "fr11", + "dr12", "fr13", "dr14", "fr15" + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(SH4) + +#endif // ENABLE(JIT) + +} // namespace JSC + +namespace WTF { + +inline void printInternal(PrintStream& out, JSC::FPRReg reg) +{ +#if ENABLE(JIT) + out.print("%", JSC::FPRInfo::debugName(reg)); +#else + out.printf("%%fr%d", reg); +#endif +} + +} // namespace WTF + +#endif diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp index f681dd847..60c0c5514 100644 --- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp @@ -28,24 +28,25 @@ #if ENABLE(JIT) +#include "CodeBlock.h" +#include "DFGCommonData.h" #include "Heap.h" #include "VM.h" -#include "Operations.h" +#include "JSCInlines.h" #include "SlotVisitor.h" #include "Structure.h" namespace JSC { GCAwareJITStubRoutine::GCAwareJITStubRoutine( - const MacroAssemblerCodeRef& code, VM& vm, bool isClosureCall) + const MacroAssemblerCodeRef& code, VM& vm) : JITStubRoutine(code) , m_mayBeExecuting(false) , m_isJettisoned(false) - , m_isClosureCall(isClosureCall) { vm.heap.m_jitStubRoutines.add(this); } - + GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { } void GCAwareJITStubRoutine::observeZeroRefCount() @@ -95,29 +96,61 @@ void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(Slot visitor.append(&m_object); } -PassRefPtr createJITStubRoutine( - const MacroAssemblerCodeRef& code, - VM& vm, - const JSCell*, - bool makesCalls) + +GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler( + const MacroAssemblerCodeRef& code, VM& vm, + CodeBlock* codeBlockForExceptionHandlers, CallSiteIndex exceptionHandlerCallSiteIndex) + : GCAwareJITStubRoutine(code, vm) + , m_codeBlockWithExceptionHandler(codeBlockForExceptionHandlers) + , m_exceptionHandlerCallSiteIndex(exceptionHandlerCallSiteIndex) { - if (!makesCalls) - return adoptRef(new JITStubRoutine(code)); + RELEASE_ASSERT(m_codeBlockWithExceptionHandler); + ASSERT(!!m_codeBlockWithExceptionHandler->handlerForIndex(exceptionHandlerCallSiteIndex.bits())); +} - return static_pointer_cast( - adoptRef(new GCAwareJITStubRoutine(code, vm))); +void GCAwareJITStubRoutineWithExceptionHandler::aboutToDie() +{ + m_codeBlockWithExceptionHandler = nullptr; } +void GCAwareJITStubRoutineWithExceptionHandler::observeZeroRefCount() +{ +#if ENABLE(DFG_JIT) + if (m_codeBlockWithExceptionHandler) { + m_codeBlockWithExceptionHandler->jitCode()->dfgCommon()->removeCallSiteIndex(m_exceptionHandlerCallSiteIndex); + m_codeBlockWithExceptionHandler->removeExceptionHandlerForCallSite(m_exceptionHandlerCallSiteIndex); + m_codeBlockWithExceptionHandler = nullptr; + } +#endif + + Base::observeZeroRefCount(); +} + + PassRefPtr createJITStubRoutine( const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, bool makesCalls, - JSCell* object) + JSCell* object, + CodeBlock* codeBlockForExceptionHandlers, + CallSiteIndex exceptionHandlerCallSiteIndex) { if (!makesCalls) return adoptRef(new JITStubRoutine(code)); + if (codeBlockForExceptionHandlers) { + RELEASE_ASSERT(!object); // We're not a marking stub routine. + RELEASE_ASSERT(JITCode::isOptimizingJIT(codeBlockForExceptionHandlers->jitType())); + return static_pointer_cast( + adoptRef(new GCAwareJITStubRoutineWithExceptionHandler(code, vm, codeBlockForExceptionHandlers, exceptionHandlerCallSiteIndex))); + } + + if (!object) { + return static_pointer_cast( + adoptRef(new GCAwareJITStubRoutine(code, vm))); + } + return static_pointer_cast( adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, vm, owner, object))); } diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h index f0b282cf1..97d9016d6 100644 --- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,8 +26,6 @@ #ifndef GCAwareJITStubRoutine_h #define GCAwareJITStubRoutine_h -#include - #if ENABLE(JIT) #include "JITStubRoutine.h" @@ -54,7 +52,7 @@ class JITStubRoutineSet; // list which does not get reclaimed all at once). class GCAwareJITStubRoutine : public JITStubRoutine { public: - GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&, bool isClosureCall = false); + GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&); virtual ~GCAwareJITStubRoutine(); void markRequiredObjects(SlotVisitor& visitor) @@ -64,10 +62,8 @@ public: void deleteFromGC(); - bool isClosureCall() const { return m_isClosureCall; } - protected: - virtual void observeZeroRefCount(); + virtual void observeZeroRefCount() override; virtual void markRequiredObjectsInternal(SlotVisitor&); @@ -76,7 +72,6 @@ private: bool m_mayBeExecuting; bool m_isJettisoned; - bool m_isClosureCall; }; // Use this if you want to mark one additional object during GC if your stub @@ -88,12 +83,30 @@ public: virtual ~MarkingGCAwareJITStubRoutineWithOneObject(); protected: - virtual void markRequiredObjectsInternal(SlotVisitor&); + virtual void markRequiredObjectsInternal(SlotVisitor&) override; private: WriteBarrier m_object; }; + +// The stub has exception handlers in it. So it clears itself from exception +// handling table when it dies. It also frees space in CodeOrigin table +// for new exception handlers to use the same CallSiteIndex. +class GCAwareJITStubRoutineWithExceptionHandler : public GCAwareJITStubRoutine { +public: + typedef GCAwareJITStubRoutine Base; + + GCAwareJITStubRoutineWithExceptionHandler(const MacroAssemblerCodeRef&, VM&, CodeBlock*, CallSiteIndex); + + void aboutToDie() override; + void observeZeroRefCount() override; + +private: + CodeBlock* m_codeBlockWithExceptionHandler; + CallSiteIndex m_exceptionHandlerCallSiteIndex; +}; + // Helper for easily creating a GC-aware JIT stub routine. For the varargs, // pass zero or more JSCell*'s. This will either create a JITStubRoutine, a // GCAwareJITStubRoutine, or an ObjectMarkingGCAwareJITStubRoutine as @@ -113,11 +126,15 @@ private: // this function using varargs, I ended up with more code than this simple // way. -PassRefPtr createJITStubRoutine( - const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls); PassRefPtr createJITStubRoutine( const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls, - JSCell*); + JSCell* = nullptr, + CodeBlock* codeBlockForExceptionHandlers = nullptr, CallSiteIndex exceptionHandlingCallSiteIndex = CallSiteIndex(std::numeric_limits::max())); + +// Helper for the creation of simple stub routines that need no help from the GC. Note +// that codeBlock gets "executed" more than once. +#define FINALIZE_CODE_FOR_GC_AWARE_STUB(codeBlock, patchBuffer, makesCalls, cell, dataLogFArguments) \ + (createJITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments), *(codeBlock)->vm(), (codeBlock), (makesCalls), (cell))) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/GPRInfo.cpp b/Source/JavaScriptCore/jit/GPRInfo.cpp new file mode 100644 index 000000000..849354854 --- /dev/null +++ b/Source/JavaScriptCore/jit/GPRInfo.cpp @@ -0,0 +1,42 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GPRInfo.h" + +#if ENABLE(JIT) + +namespace JSC { + +// This is in the .cpp file to work around clang issues. +#if CPU(X86_64) +const GPRReg GPRInfo::patchpointScratchRegister = MacroAssembler::s_scratchRegister; +#elif CPU(ARM64) +const GPRReg GPRInfo::patchpointScratchRegister = ARM64Registers::ip0; +#endif + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/GPRInfo.h b/Source/JavaScriptCore/jit/GPRInfo.h new file mode 100644 index 000000000..14a2ebd3d --- /dev/null +++ b/Source/JavaScriptCore/jit/GPRInfo.h @@ -0,0 +1,918 @@ +/* + * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GPRInfo_h +#define GPRInfo_h + +#include "MacroAssembler.h" +#include +#include + +namespace JSC { + +// We use the same conventions in the basline JIT as in the LLint. If you +// change mappings in the GPRInfo, you should change them in the offlineasm +// compiler adequately. The register naming conventions are described at the +// top of the LowLevelInterpreter.asm file. + +typedef MacroAssembler::RegisterID GPRReg; +#define InvalidGPRReg ((::JSC::GPRReg)-1) + +#if ENABLE(JIT) + +#if USE(JSVALUE64) +class JSValueRegs { +public: + JSValueRegs() + : m_gpr(InvalidGPRReg) + { + } + + explicit JSValueRegs(GPRReg gpr) + : m_gpr(gpr) + { + } + + static JSValueRegs payloadOnly(GPRReg gpr) + { + return JSValueRegs(gpr); + } + + static JSValueRegs withTwoAvailableRegs(GPRReg gpr, GPRReg) + { + return JSValueRegs(gpr); + } + + bool operator!() const { return m_gpr == InvalidGPRReg; } + explicit operator bool() const { return m_gpr != InvalidGPRReg; } + + bool operator==(JSValueRegs other) { return m_gpr == other.m_gpr; } + bool operator!=(JSValueRegs other) { return !(*this == other); } + + GPRReg gpr() const { return m_gpr; } + GPRReg tagGPR() const { return InvalidGPRReg; } + GPRReg payloadGPR() const { return m_gpr; } + + bool uses(GPRReg gpr) const { return m_gpr == gpr; } + +private: + GPRReg m_gpr; +}; + +class JSValueSource { +public: + JSValueSource() + : m_offset(notAddress()) + , m_base(InvalidGPRReg) + { + } + + JSValueSource(JSValueRegs regs) + : m_offset(notAddress()) + , m_base(regs.gpr()) + { + } + + explicit JSValueSource(GPRReg gpr) + : m_offset(notAddress()) + , m_base(gpr) + { + } + + JSValueSource(MacroAssembler::Address address) + : m_offset(address.offset) + , m_base(address.base) + { + ASSERT(m_offset != notAddress()); + ASSERT(m_base != InvalidGPRReg); + } + + static JSValueSource unboxedCell(GPRReg payloadGPR) + { + return JSValueSource(payloadGPR); + } + + bool operator!() const { return m_base == InvalidGPRReg; } + explicit operator bool() const { return m_base != InvalidGPRReg; } + + bool isAddress() const { return m_offset != notAddress(); } + + int32_t offset() const + { + ASSERT(isAddress()); + return m_offset; + } + + GPRReg base() const + { + ASSERT(isAddress()); + return m_base; + } + + GPRReg gpr() const + { + ASSERT(!isAddress()); + return m_base; + } + + MacroAssembler::Address asAddress() const { return MacroAssembler::Address(base(), offset()); } + +private: + static inline int32_t notAddress() { return 0x80000000; } + + int32_t m_offset; + GPRReg m_base; +}; +#endif // USE(JSVALUE64) + +#if USE(JSVALUE32_64) +class JSValueRegs { +public: + JSValueRegs() + : m_tagGPR(static_cast(InvalidGPRReg)) + , m_payloadGPR(static_cast(InvalidGPRReg)) + { + } + + JSValueRegs(GPRReg tagGPR, GPRReg payloadGPR) + : m_tagGPR(tagGPR) + , m_payloadGPR(payloadGPR) + { + } + + static JSValueRegs withTwoAvailableRegs(GPRReg gpr1, GPRReg gpr2) + { + return JSValueRegs(gpr1, gpr2); + } + + static JSValueRegs payloadOnly(GPRReg gpr) + { + return JSValueRegs(InvalidGPRReg, gpr); + } + + bool operator!() const { return !static_cast(*this); } + explicit operator bool() const + { + return static_cast(m_tagGPR) != InvalidGPRReg + || static_cast(m_payloadGPR) != InvalidGPRReg; + } + + bool operator==(JSValueRegs other) const + { + return m_tagGPR == other.m_tagGPR + && m_payloadGPR == other.m_payloadGPR; + } + bool operator!=(JSValueRegs other) const { return !(*this == other); } + + GPRReg tagGPR() const { return static_cast(m_tagGPR); } + GPRReg payloadGPR() const { return static_cast(m_payloadGPR); } + GPRReg gpr(WhichValueWord which) const + { + switch (which) { + case TagWord: + return tagGPR(); + case PayloadWord: + return payloadGPR(); + } + ASSERT_NOT_REACHED(); + return tagGPR(); + } + + bool uses(GPRReg gpr) const { return m_tagGPR == gpr || m_payloadGPR == gpr; } + +private: + int8_t m_tagGPR; + int8_t m_payloadGPR; +}; + +class JSValueSource { +public: + JSValueSource() + : m_offset(notAddress()) + , m_baseOrTag(static_cast(InvalidGPRReg)) + , m_payload(static_cast(InvalidGPRReg)) + , m_tagType(0) + { + } + + JSValueSource(JSValueRegs regs) + : m_offset(notAddress()) + , m_baseOrTag(regs.tagGPR()) + , m_payload(regs.payloadGPR()) + , m_tagType(0) + { + } + + JSValueSource(GPRReg tagGPR, GPRReg payloadGPR) + : m_offset(notAddress()) + , m_baseOrTag(static_cast(tagGPR)) + , m_payload(static_cast(payloadGPR)) + , m_tagType(0) + { + } + + JSValueSource(MacroAssembler::Address address) + : m_offset(address.offset) + , m_baseOrTag(static_cast(address.base)) + , m_payload(static_cast(InvalidGPRReg)) + , m_tagType(0) + { + ASSERT(m_offset != notAddress()); + ASSERT(static_cast(m_baseOrTag) != InvalidGPRReg); + } + + static JSValueSource unboxedCell(GPRReg payloadGPR) + { + JSValueSource result; + result.m_offset = notAddress(); + result.m_baseOrTag = static_cast(InvalidGPRReg); + result.m_payload = static_cast(payloadGPR); + result.m_tagType = static_cast(JSValue::CellTag); + return result; + } + + bool operator!() const { return !static_cast(*this); } + explicit operator bool() const + { + return static_cast(m_baseOrTag) != InvalidGPRReg + || static_cast(m_payload) != InvalidGPRReg; + } + + bool isAddress() const + { + ASSERT(!!*this); + return m_offset != notAddress(); + } + + int32_t offset() const + { + ASSERT(isAddress()); + return m_offset; + } + + GPRReg base() const + { + ASSERT(isAddress()); + return static_cast(m_baseOrTag); + } + + GPRReg tagGPR() const + { + ASSERT(!isAddress() && static_cast(m_baseOrTag) != InvalidGPRReg); + return static_cast(m_baseOrTag); + } + + GPRReg payloadGPR() const + { + ASSERT(!isAddress()); + return static_cast(m_payload); + } + + bool hasKnownTag() const + { + ASSERT(!!*this); + ASSERT(!isAddress()); + return static_cast(m_baseOrTag) == InvalidGPRReg; + } + + uint32_t tag() const + { + return static_cast(m_tagType); + } + + MacroAssembler::Address asAddress(unsigned additionalOffset = 0) const { return MacroAssembler::Address(base(), offset() + additionalOffset); } + +private: + static inline int32_t notAddress() { return 0x80000000; } + + int32_t m_offset; + int8_t m_baseOrTag; + int8_t m_payload; + int8_t m_tagType; // Contains the low bits of the tag. +}; +#endif // USE(JSVALUE32_64) + +#if CPU(X86) +#define NUMBER_OF_ARGUMENT_REGISTERS 0u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u + +class GPRInfo { +public: + typedef GPRReg RegisterType; + static const unsigned numberOfRegisters = 6; + static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; + + // Temporary registers. + static const GPRReg regT0 = X86Registers::eax; + static const GPRReg regT1 = X86Registers::edx; + static const GPRReg regT2 = X86Registers::ecx; + static const GPRReg regT3 = X86Registers::ebx; // Callee-save + static const GPRReg regT4 = X86Registers::esi; // Callee-save + static const GPRReg regT5 = X86Registers::edi; // Callee-save + static const GPRReg callFrameRegister = X86Registers::ebp; + // These constants provide the names for the general purpose argument & return value registers. + static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2 + static const GPRReg argumentGPR1 = X86Registers::edx; // regT1 + static const GPRReg argumentGPR2 = X86Registers::eax; // regT0 + static const GPRReg argumentGPR3 = X86Registers::ebx; // regT3 + static const GPRReg nonArgGPR0 = X86Registers::esi; // regT4 + static const GPRReg returnValueGPR = X86Registers::eax; // regT0 + static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 + static const GPRReg nonPreservedNonReturnGPR = X86Registers::ecx; + + static GPRReg toRegister(unsigned index) + { + ASSERT(index < numberOfRegisters); + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5 }; + return registerForIndex[index]; + } + + static GPRReg toArgumentRegister(unsigned) + { + UNREACHABLE_FOR_PLATFORM(); + return InvalidGPRReg; + } + + static unsigned toIndex(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast(reg) < 8); + static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, 5 }; + unsigned result = indexForRegister[reg]; + return result; + } + + static const char* debugName(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast(reg) < 8); + static const char* nameForRegister[8] = { + "eax", "ecx", "edx", "ebx", + "esp", "ebp", "esi", "edi", + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(X86) + +#if CPU(X86_64) +#if !OS(WINDOWS) +#define NUMBER_OF_ARGUMENT_REGISTERS 6u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 5u +#else +#define NUMBER_OF_ARGUMENT_REGISTERS 4u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 7u +#endif + +class GPRInfo { +public: + typedef GPRReg RegisterType; + static const unsigned numberOfRegisters = 11; + static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; + + // These registers match the baseline JIT. + static const GPRReg callFrameRegister = X86Registers::ebp; + static const GPRReg tagTypeNumberRegister = X86Registers::r14; + static const GPRReg tagMaskRegister = X86Registers::r15; + + // Temporary registers. + static const GPRReg regT0 = X86Registers::eax; +#if !OS(WINDOWS) + static const GPRReg regT1 = X86Registers::esi; + static const GPRReg regT2 = X86Registers::edx; + static const GPRReg regT3 = X86Registers::ecx; + static const GPRReg regT4 = X86Registers::r8; + static const GPRReg regT5 = X86Registers::r10; + static const GPRReg regT6 = X86Registers::edi; + static const GPRReg regT7 = X86Registers::r9; +#else + static const GPRReg regT1 = X86Registers::edx; + static const GPRReg regT2 = X86Registers::r8; + static const GPRReg regT3 = X86Registers::r9; + static const GPRReg regT4 = X86Registers::r10; + static const GPRReg regT5 = X86Registers::ecx; +#endif + + static const GPRReg regCS0 = X86Registers::ebx; + +#if !OS(WINDOWS) + static const GPRReg regCS1 = X86Registers::r12; + static const GPRReg regCS2 = X86Registers::r13; + static const GPRReg regCS3 = X86Registers::r14; + static const GPRReg regCS4 = X86Registers::r15; +#else + static const GPRReg regCS1 = X86Registers::esi; + static const GPRReg regCS2 = X86Registers::edi; + static const GPRReg regCS3 = X86Registers::r12; + static const GPRReg regCS4 = X86Registers::r13; + static const GPRReg regCS5 = X86Registers::r14; + static const GPRReg regCS6 = X86Registers::r15; +#endif + + // These constants provide the names for the general purpose argument & return value registers. +#if !OS(WINDOWS) + static const GPRReg argumentGPR0 = X86Registers::edi; // regT6 + static const GPRReg argumentGPR1 = X86Registers::esi; // regT1 + static const GPRReg argumentGPR2 = X86Registers::edx; // regT2 + static const GPRReg argumentGPR3 = X86Registers::ecx; // regT3 + static const GPRReg argumentGPR4 = X86Registers::r8; // regT4 + static const GPRReg argumentGPR5 = X86Registers::r9; // regT7 +#else + static const GPRReg argumentGPR0 = X86Registers::ecx; // regT5 + static const GPRReg argumentGPR1 = X86Registers::edx; // regT1 + static const GPRReg argumentGPR2 = X86Registers::r8; // regT2 + static const GPRReg argumentGPR3 = X86Registers::r9; // regT3 +#endif + static const GPRReg nonArgGPR0 = X86Registers::r10; // regT5 (regT4 on Windows) + static const GPRReg returnValueGPR = X86Registers::eax; // regT0 + static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 or regT2 + static const GPRReg nonPreservedNonReturnGPR = X86Registers::r10; // regT5 (regT4 on Windows) + static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; // regT5 (regT4 on Windows) + + // FIXME: I believe that all uses of this are dead in the sense that it just causes the scratch + // register allocator to select a different register and potentially spill things. It would be better + // if we instead had a more explicit way of saying that we don't have a scratch register. + static const GPRReg patchpointScratchRegister; + + static GPRReg toRegister(unsigned index) + { + ASSERT(index < numberOfRegisters); +#if !OS(WINDOWS) + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regCS0, regCS1, regCS2 }; +#else + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regCS0, regCS1, regCS2, regCS3, regCS4 }; +#endif + return registerForIndex[index]; + } + + static GPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < numberOfArgumentRegisters); +#if !OS(WINDOWS) + static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3, argumentGPR4, argumentGPR5 }; +#else + static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 }; +#endif + return registerForIndex[index]; + } + + static unsigned toIndex(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast(reg) < 16); +#if !OS(WINDOWS) + static const unsigned indexForRegister[16] = { 0, 3, 2, 8, InvalidIndex, InvalidIndex, 1, 6, 4, 7, 5, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; +#else + static const unsigned indexForRegister[16] = { 0, 5, 1, 6, InvalidIndex, InvalidIndex, 7, 8, 2, 3, 4, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; +#endif + return indexForRegister[reg]; + } + + static const char* debugName(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast(reg) < 16); + static const char* nameForRegister[16] = { + "rax", "rcx", "rdx", "rbx", + "rsp", "rbp", "rsi", "rdi", + "r8", "r9", "r10", "r11", + "r12", "r13", "r14", "r15" + }; + return nameForRegister[reg]; + } + + static const std::array& reservedRegisters() + { + static const std::array reservedRegisters { { + MacroAssembler::s_scratchRegister, + tagTypeNumberRegister, + tagMaskRegister, + } }; + return reservedRegisters; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(X86_64) + +#if CPU(ARM) +#define NUMBER_OF_ARGUMENT_REGISTERS 4u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u + +class GPRInfo { +public: + typedef GPRReg RegisterType; + static const unsigned numberOfRegisters = 9; + static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; + + // Temporary registers. + static const GPRReg regT0 = ARMRegisters::r0; + static const GPRReg regT1 = ARMRegisters::r1; + static const GPRReg regT2 = ARMRegisters::r2; + static const GPRReg regT3 = ARMRegisters::r3; + static const GPRReg regT4 = ARMRegisters::r8; + static const GPRReg regT5 = ARMRegisters::r9; + static const GPRReg regT6 = ARMRegisters::r10; +#if CPU(ARM_THUMB2) + static const GPRReg regT7 = ARMRegisters::r11; +#else + static const GPRReg regT7 = ARMRegisters::r7; +#endif + static const GPRReg regT8 = ARMRegisters::r4; + // These registers match the baseline JIT. + static const GPRReg callFrameRegister = ARMRegisters::fp; + // These constants provide the names for the general purpose argument & return value registers. + static const GPRReg argumentGPR0 = ARMRegisters::r0; // regT0 + static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1 + static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2 + static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT3 + static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT8 + static const GPRReg nonArgGPR1 = ARMRegisters::r8; // regT4 + static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0 + static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1 + static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5; + + static GPRReg toRegister(unsigned index) + { + ASSERT(index < numberOfRegisters); + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8 }; + return registerForIndex[index]; + } + + static GPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < numberOfArgumentRegisters); + static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 }; + return registerForIndex[index]; + } + + static unsigned toIndex(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast(reg) < 16); + static const unsigned indexForRegister[16] = +#if CPU(ARM_THUMB2) + { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; +#else + { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; +#endif + unsigned result = indexForRegister[reg]; + return result; + } + + static const char* debugName(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast(reg) < 16); + static const char* nameForRegister[16] = { + "r0", "r1", "r2", "r3", + "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", + "r12", "r13", "r14", "r15" + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(ARM) + +#if CPU(ARM64) +#define NUMBER_OF_ARGUMENT_REGISTERS 8u +// Callee Saves includes x19..x28 and FP registers q8..q15 +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 18u + +class GPRInfo { +public: + typedef GPRReg RegisterType; + static const unsigned numberOfRegisters = 16; + static const unsigned numberOfArgumentRegisters = 8; + + // These registers match the baseline JIT. + static const GPRReg callFrameRegister = ARM64Registers::fp; + static const GPRReg tagTypeNumberRegister = ARM64Registers::x27; + static const GPRReg tagMaskRegister = ARM64Registers::x28; + static const GPRReg dataTempRegister = MacroAssembler::dataTempRegister; + static const GPRReg memoryTempRegister = MacroAssembler::memoryTempRegister; + // Temporary registers. + static const GPRReg regT0 = ARM64Registers::x0; + static const GPRReg regT1 = ARM64Registers::x1; + static const GPRReg regT2 = ARM64Registers::x2; + static const GPRReg regT3 = ARM64Registers::x3; + static const GPRReg regT4 = ARM64Registers::x4; + static const GPRReg regT5 = ARM64Registers::x5; + static const GPRReg regT6 = ARM64Registers::x6; + static const GPRReg regT7 = ARM64Registers::x7; + static const GPRReg regT8 = ARM64Registers::x8; + static const GPRReg regT9 = ARM64Registers::x9; + static const GPRReg regT10 = ARM64Registers::x10; + static const GPRReg regT11 = ARM64Registers::x11; + static const GPRReg regT12 = ARM64Registers::x12; + static const GPRReg regT13 = ARM64Registers::x13; + static const GPRReg regT14 = ARM64Registers::x14; + static const GPRReg regT15 = ARM64Registers::x15; + static const GPRReg regCS0 = ARM64Registers::x19; // Used by FTL only + static const GPRReg regCS1 = ARM64Registers::x20; // Used by FTL only + static const GPRReg regCS2 = ARM64Registers::x21; // Used by FTL only + static const GPRReg regCS3 = ARM64Registers::x22; // Used by FTL only + static const GPRReg regCS4 = ARM64Registers::x23; // Used by FTL only + static const GPRReg regCS5 = ARM64Registers::x24; // Used by FTL only + static const GPRReg regCS6 = ARM64Registers::x25; // Used by FTL only + static const GPRReg regCS7 = ARM64Registers::x26; + static const GPRReg regCS8 = ARM64Registers::x27; // tagTypeNumber + static const GPRReg regCS9 = ARM64Registers::x28; // tagMask + // These constants provide the names for the general purpose argument & return value registers. + static const GPRReg argumentGPR0 = ARM64Registers::x0; // regT0 + static const GPRReg argumentGPR1 = ARM64Registers::x1; // regT1 + static const GPRReg argumentGPR2 = ARM64Registers::x2; // regT2 + static const GPRReg argumentGPR3 = ARM64Registers::x3; // regT3 + static const GPRReg argumentGPR4 = ARM64Registers::x4; // regT4 + static const GPRReg argumentGPR5 = ARM64Registers::x5; // regT5 + static const GPRReg argumentGPR6 = ARM64Registers::x6; // regT6 + static const GPRReg argumentGPR7 = ARM64Registers::x7; // regT7 + static const GPRReg nonArgGPR0 = ARM64Registers::x8; // regT8 + static const GPRReg nonArgGPR1 = ARM64Registers::x9; // regT9 + static const GPRReg returnValueGPR = ARM64Registers::x0; // regT0 + static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1 + static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2; + static const GPRReg nonPreservedNonArgumentGPR = ARM64Registers::x8; + static const GPRReg patchpointScratchRegister; + + // GPRReg mapping is direct, the machine register numbers can + // be used directly as indices into the GPR RegisterBank. + COMPILE_ASSERT(ARM64Registers::q0 == 0, q0_is_0); + COMPILE_ASSERT(ARM64Registers::q1 == 1, q1_is_1); + COMPILE_ASSERT(ARM64Registers::q2 == 2, q2_is_2); + COMPILE_ASSERT(ARM64Registers::q3 == 3, q3_is_3); + COMPILE_ASSERT(ARM64Registers::q4 == 4, q4_is_4); + COMPILE_ASSERT(ARM64Registers::q5 == 5, q5_is_5); + COMPILE_ASSERT(ARM64Registers::q6 == 6, q6_is_6); + COMPILE_ASSERT(ARM64Registers::q7 == 7, q7_is_7); + COMPILE_ASSERT(ARM64Registers::q8 == 8, q8_is_8); + COMPILE_ASSERT(ARM64Registers::q9 == 9, q9_is_9); + COMPILE_ASSERT(ARM64Registers::q10 == 10, q10_is_10); + COMPILE_ASSERT(ARM64Registers::q11 == 11, q11_is_11); + COMPILE_ASSERT(ARM64Registers::q12 == 12, q12_is_12); + COMPILE_ASSERT(ARM64Registers::q13 == 13, q13_is_13); + COMPILE_ASSERT(ARM64Registers::q14 == 14, q14_is_14); + COMPILE_ASSERT(ARM64Registers::q15 == 15, q15_is_15); + static GPRReg toRegister(unsigned index) + { + return (GPRReg)index; + } + static unsigned toIndex(GPRReg reg) + { + if (reg > regT15) + return InvalidIndex; + return (unsigned)reg; + } + + static GPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < numberOfArgumentRegisters); + return toRegister(index); + } + + static const char* debugName(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(static_cast(reg) < 32); + static const char* nameForRegister[32] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15", + "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23", + "r24", "r25", "r26", "r27", "r28", "fp", "lr", "sp" + }; + return nameForRegister[reg]; + } + + static const std::array& reservedRegisters() + { + static const std::array reservedRegisters { { + dataTempRegister, + memoryTempRegister, + tagTypeNumberRegister, + tagMaskRegister, + } }; + return reservedRegisters; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(ARM64) + +#if CPU(MIPS) +#define NUMBER_OF_ARGUMENT_REGISTERS 4u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u + +class GPRInfo { +public: + typedef GPRReg RegisterType; + static const unsigned numberOfRegisters = 7; + static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; + + // regT0 must be v0 for returning a 32-bit value. + // regT1 must be v1 for returning a pair of 32-bit value. + + // Temporary registers. + static const GPRReg regT0 = MIPSRegisters::v0; + static const GPRReg regT1 = MIPSRegisters::v1; + static const GPRReg regT2 = MIPSRegisters::t2; + static const GPRReg regT3 = MIPSRegisters::t3; + static const GPRReg regT4 = MIPSRegisters::t4; + static const GPRReg regT5 = MIPSRegisters::t5; + static const GPRReg regT6 = MIPSRegisters::t6; + // These registers match the baseline JIT. + static const GPRReg callFrameRegister = MIPSRegisters::fp; + // These constants provide the names for the general purpose argument & return value registers. + static const GPRReg argumentGPR0 = MIPSRegisters::a0; + static const GPRReg argumentGPR1 = MIPSRegisters::a1; + static const GPRReg argumentGPR2 = MIPSRegisters::a2; + static const GPRReg argumentGPR3 = MIPSRegisters::a3; + static const GPRReg nonArgGPR0 = regT0; + static const GPRReg returnValueGPR = regT0; + static const GPRReg returnValueGPR2 = regT1; + static const GPRReg nonPreservedNonReturnGPR = regT2; + + static GPRReg toRegister(unsigned index) + { + ASSERT(index < numberOfRegisters); + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6 }; + return registerForIndex[index]; + } + + static GPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < numberOfArgumentRegisters); + static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 }; + return registerForIndex[index]; + } + + static unsigned toIndex(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(reg < 32); + static const unsigned indexForRegister[32] = { + InvalidIndex, InvalidIndex, 0, 1, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, + InvalidIndex, InvalidIndex, 2, 3, 4, 5, 6, InvalidIndex, + InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, + InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex + }; + unsigned result = indexForRegister[reg]; + return result; + } + + static const char* debugName(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(reg < 16); + static const char* nameForRegister[16] = { + "zero", "at", "v0", "v1", + "a0", "a1", "a2", "a3", + "t0", "t1", "t2", "t3", + "t4", "t5", "t6", "t7" + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(MIPS) + +#if CPU(SH4) +#define NUMBER_OF_ARGUMENT_REGISTERS 4u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u + +class GPRInfo { +public: + typedef GPRReg RegisterType; + static const unsigned numberOfRegisters = 10; + + // Note: regT3 is required to be callee-preserved. + + // Temporary registers. + static const GPRReg regT0 = SH4Registers::r0; + static const GPRReg regT1 = SH4Registers::r1; + static const GPRReg regT2 = SH4Registers::r6; + static const GPRReg regT3 = SH4Registers::r7; + static const GPRReg regT4 = SH4Registers::r2; + static const GPRReg regT5 = SH4Registers::r3; + static const GPRReg regT6 = SH4Registers::r4; + static const GPRReg regT7 = SH4Registers::r5; + static const GPRReg regT8 = SH4Registers::r8; + static const GPRReg regT9 = SH4Registers::r9; + // These registers match the baseline JIT. + static const GPRReg cachedResultRegister = regT0; + static const GPRReg cachedResultRegister2 = regT1; + static const GPRReg callFrameRegister = SH4Registers::fp; + // These constants provide the names for the general purpose argument & return value registers. + static const GPRReg argumentGPR0 = SH4Registers::r4; // regT6 + static const GPRReg argumentGPR1 = SH4Registers::r5; // regT7 + static const GPRReg argumentGPR2 = SH4Registers::r6; // regT2 + static const GPRReg argumentGPR3 = SH4Registers::r7; // regT3 + static const GPRReg nonArgGPR0 = regT4; + static const GPRReg returnValueGPR = regT0; + static const GPRReg returnValueGPR2 = regT1; + static const GPRReg nonPreservedNonReturnGPR = regT2; + + static GPRReg toRegister(unsigned index) + { + ASSERT(index < numberOfRegisters); + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9 }; + return registerForIndex[index]; + } + + static unsigned toIndex(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(reg < 14); + static const unsigned indexForRegister[14] = { 0, 1, 4, 5, 6, 7, 2, 3, 8, 9, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; + unsigned result = indexForRegister[reg]; + return result; + } + + static const char* debugName(GPRReg reg) + { + ASSERT(reg != InvalidGPRReg); + ASSERT(reg < 16); + static const char* nameForRegister[16] = { + "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", + "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" + }; + return nameForRegister[reg]; + } + + static const unsigned InvalidIndex = 0xffffffff; +}; + +#endif // CPU(SH4) + +inline GPRReg argumentRegisterFor(unsigned argumentIndex) +{ +#if USE(JSVALUE64) + if (argumentIndex >= NUMBER_OF_ARGUMENT_REGISTERS) + return InvalidGPRReg; + + return GPRInfo::toArgumentRegister(argumentIndex); +#else + UNUSED_PARAM(argumentIndex); + + return InvalidGPRReg; +#endif +} + +// The baseline JIT uses "accumulator" style execution with regT0 (for 64-bit) +// and regT0 + regT1 (for 32-bit) serving as the accumulator register(s) for +// passing results of one opcode to the next. Hence: +COMPILE_ASSERT(GPRInfo::regT0 == GPRInfo::returnValueGPR, regT0_must_equal_returnValueGPR); +#if USE(JSVALUE32_64) +COMPILE_ASSERT(GPRInfo::regT1 == GPRInfo::returnValueGPR2, regT1_must_equal_returnValueGPR2); +#endif + +#endif // ENABLE(JIT) + +} // namespace JSC + +namespace WTF { + +inline void printInternal(PrintStream& out, JSC::GPRReg reg) +{ +#if ENABLE(JIT) + out.print("%", JSC::GPRInfo::debugName(reg)); +#else + out.printf("%%r%d", reg); +#endif +} + +} // namespace WTF + +#endif diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp index 528fb2bc4..e8d01916b 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp @@ -29,6 +29,7 @@ #include "CallFrame.h" #include "JSCJSValueInlines.h" #include "JSObject.h" +#include "JSCInlines.h" #include diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h index f4c8bc703..71ff4e5bd 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.h +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h @@ -28,7 +28,6 @@ #include "JSCJSValue.h" #include "MacroAssemblerCodeRef.h" -#include #if ENABLE(JIT) @@ -42,7 +41,7 @@ namespace JSC { extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL; -#if COMPILER(GCC) +#if COMPILER(GCC_OR_CLANG) // This is a public declaration only to convince CLANG not to elide it. extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL; @@ -52,11 +51,11 @@ inline void initializeHostCallReturnValue() getHostCallReturnValueWithExecState(0); } -#else // COMPILER(GCC) +#else // COMPILER(GCC_OR_CLANG) inline void initializeHostCallReturnValue() { } -#endif // COMPILER(GCC) +#endif // COMPILER(GCC_OR_CLANG) } // namespace JSC diff --git a/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp new file mode 100644 index 000000000..5243b49ea --- /dev/null +++ b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(JIT) + +#include "CCallHelpers.h" +#include "CallFrame.h" +#include "CodeBlock.h" +#include "JSArrayBufferView.h" +#include "JSCJSValueInlines.h" +#include "JSCellInlines.h" +#include "PolymorphicAccess.h" + +namespace JSC { + +typedef CCallHelpers::TrustedImm32 TrustedImm32; +typedef CCallHelpers::Imm32 Imm32; +typedef CCallHelpers::TrustedImmPtr TrustedImmPtr; +typedef CCallHelpers::ImmPtr ImmPtr; +typedef CCallHelpers::TrustedImm64 TrustedImm64; +typedef CCallHelpers::Imm64 Imm64; + +bool AccessCase::canEmitIntrinsicGetter(JSFunction* getter, Structure* structure) +{ + + switch (getter->intrinsic()) { + case TypedArrayByteOffsetIntrinsic: + case TypedArrayByteLengthIntrinsic: + case TypedArrayLengthIntrinsic: { + TypedArrayType type = structure->classInfo()->typedArrayStorageType; + + if (!isTypedView(type)) + return false; + + return true; + } + default: + return false; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void AccessCase::emitIntrinsicGetter(AccessGenerationState& state) +{ + CCallHelpers& jit = *state.jit; + JSValueRegs valueRegs = state.valueRegs; + GPRReg baseGPR = state.baseGPR; + GPRReg valueGPR = valueRegs.payloadGPR(); + + switch (intrinsic()) { + case TypedArrayLengthIntrinsic: { + jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR); + jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters); + state.succeed(); + return; + } + + case TypedArrayByteLengthIntrinsic: { + TypedArrayType type = structure()->classInfo()->typedArrayStorageType; + + jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR); + + if (elementSize(type) > 1) { + // We can use a bitshift here since we TypedArrays cannot have byteLength that overflows an int32. + jit.lshift32(valueGPR, Imm32(logElementSize(type)), valueGPR); + } + + jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters); + state.succeed(); + return; + } + + case TypedArrayByteOffsetIntrinsic: { + GPRReg scratchGPR = state.scratchGPR; + + CCallHelpers::Jump emptyByteOffset = jit.branch32( + MacroAssembler::NotEqual, + MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()), + TrustedImm32(WastefulTypedArray)); + + jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); + jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), valueGPR); + jit.loadPtr(MacroAssembler::Address(scratchGPR, Butterfly::offsetOfArrayBuffer()), scratchGPR); + jit.loadPtr(MacroAssembler::Address(scratchGPR, ArrayBuffer::offsetOfData()), scratchGPR); + jit.subPtr(scratchGPR, valueGPR); + + CCallHelpers::Jump done = jit.jump(); + + emptyByteOffset.link(&jit); + jit.move(TrustedImmPtr(0), valueGPR); + + done.link(&jit); + + jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters); + state.succeed(); + return; + } + + default: + break; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index 9b46d8792..ac8c132aa 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,77 +26,54 @@ #include "config.h" #if ENABLE(JIT) -#include "JIT.h" -// This probably does not belong here; adding here for now as a quick Windows build fix. -#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) -#include "MacroAssembler.h" -JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; -#endif +#include "JIT.h" #include "CodeBlock.h" -#include -#include "DFGNode.h" // for DFG_SUCCESS_STATS +#include "CodeBlockWithJITType.h" +#include "DFGCapabilities.h" #include "Interpreter.h" #include "JITInlines.h" -#include "JITStubCall.h" +#include "JITOperations.h" #include "JSArray.h" #include "JSFunction.h" #include "LinkBuffer.h" -#include "Operations.h" -#include "RepatchBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "JSCInlines.h" +#include "PCToCodeOriginMap.h" +#include "ProfilerDatabase.h" #include "ResultType.h" #include "SamplingTool.h" +#include "SlowPathCall.h" +#include "StackAlignment.h" +#include "TypeProfilerLog.h" +#include using namespace std; namespace JSC { -void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) +void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) { - RepatchBuffer repatchBuffer(codeblock); - repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); -} - -void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) -{ - RepatchBuffer repatchBuffer(codeblock); - repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); -} - -void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) -{ - RepatchBuffer repatchBuffer(codeblock); - repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); + MacroAssembler::repatchCall( + CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), + newCalleeFunction); } JIT::JIT(VM* vm, CodeBlock* codeBlock) - : m_interpreter(vm->interpreter) - , m_vm(vm) - , m_codeBlock(codeBlock) - , m_labels(0) - , m_bytecodeOffset((unsigned)-1) - , m_propertyAccessInstructionIndex(UINT_MAX) + : JSInterfaceJIT(vm, codeBlock) + , m_interpreter(vm->interpreter) + , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) + , m_bytecodeOffset(std::numeric_limits::max()) + , m_getByIdIndex(UINT_MAX) + , m_putByIdIndex(UINT_MAX) , m_byValInstructionIndex(UINT_MAX) - , m_globalResolveInfoIndex(UINT_MAX) , m_callLinkInfoIndex(UINT_MAX) -#if USE(JSVALUE32_64) - , m_jumpTargetIndex(0) - , m_mappedBytecodeOffset((unsigned)-1) - , m_mappedVirtualRegisterIndex(JSStack::ReturnPC) - , m_mappedTag((RegisterID)-1) - , m_mappedPayload((RegisterID)-1) -#else - , m_lastResultBytecodeRegister(std::numeric_limits::max()) - , m_jumpTargetsPosition(0) -#endif , m_randomGenerator(cryptographicallyRandomNumber()) -#if ENABLE(VALUE_PROFILER) + , m_pcToCodeOriginMapBuilder(*vm) , m_canBeOptimized(false) , m_shouldEmitProfiling(false) -#endif { - m_labels.reserveCapacity(codeBlock ? codeBlock->numberOfInstructions() : 0); } #if ENABLE(DFG_JIT) @@ -105,56 +82,55 @@ void JIT::emitEnterOptimizationCheck() if (!canBeOptimized()) return; - Jump skipOptimize = branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())); - JITStubCall stubCall(this, cti_optimize); - stubCall.addArgument(TrustedImm32(m_bytecodeOffset)); + JumpList skipOptimize; + + skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); ASSERT(!m_bytecodeOffset); - stubCall.call(); + + copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer(); + + callOperation(operationOptimize, m_bytecodeOffset); + skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); + move(returnValueGPR2, stackPointerRegister); + jump(returnValueGPR); skipOptimize.link(this); } #endif -#define NEXT_OPCODE(name) \ - m_bytecodeOffset += OPCODE_LENGTH(name); \ - break; - -#if USE(JSVALUE32_64) -#define DEFINE_BINARY_OP(name) \ - case name: { \ - JITStubCall stubCall(this, cti_##name); \ - stubCall.addArgument(currentInstruction[2].u.operand); \ - stubCall.addArgument(currentInstruction[3].u.operand); \ - stubCall.call(currentInstruction[1].u.operand); \ - NEXT_OPCODE(name); \ - } +void JIT::emitNotifyWrite(WatchpointSet* set) +{ + if (!set || set->state() == IsInvalidated) + return; + + addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated))); +} -#define DEFINE_UNARY_OP(name) \ - case name: { \ - JITStubCall stubCall(this, cti_##name); \ - stubCall.addArgument(currentInstruction[2].u.operand); \ - stubCall.call(currentInstruction[1].u.operand); \ - NEXT_OPCODE(name); \ - } +void JIT::emitNotifyWrite(GPRReg pointerToSet) +{ + addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); +} -#else // USE(JSVALUE32_64) +void JIT::assertStackPointerOffset() +{ + if (ASSERT_DISABLED) + return; + + addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0); + Jump ok = branchPtr(Equal, regT0, stackPointerRegister); + breakpoint(); + ok.link(this); +} -#define DEFINE_BINARY_OP(name) \ - case name: { \ - JITStubCall stubCall(this, cti_##name); \ - stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ - stubCall.addArgument(currentInstruction[3].u.operand, regT2); \ - stubCall.call(currentInstruction[1].u.operand); \ - NEXT_OPCODE(name); \ - } +#define NEXT_OPCODE(name) \ + m_bytecodeOffset += OPCODE_LENGTH(name); \ + break; -#define DEFINE_UNARY_OP(name) \ - case name: { \ - JITStubCall stubCall(this, cti_##name); \ - stubCall.addArgument(currentInstruction[2].u.operand, regT2); \ - stubCall.call(currentInstruction[1].u.operand); \ - NEXT_OPCODE(name); \ +#define DEFINE_SLOW_OP(name) \ + case op_##name: { \ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \ + slowPathCall.call(); \ + NEXT_OPCODE(op_##name); \ } -#endif // USE(JSVALUE32_64) #define DEFINE_OP(name) \ case name: { \ @@ -170,12 +146,13 @@ void JIT::emitEnterOptimizationCheck() void JIT::privateCompileMainPass() { + jitAssertTagsInPlace(); + jitAssertArgumentCountSane(); + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); unsigned instructionCount = m_codeBlock->instructions().size(); - m_globalResolveInfoIndex = 0; m_callLinkInfoIndex = 0; - m_labels.resize(instructionCount); for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { if (m_disassembler) @@ -183,16 +160,13 @@ void JIT::privateCompileMainPass() Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); + m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); + #if ENABLE(OPCODE_SAMPLING) if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. sampleInstruction(currentInstruction); #endif -#if USE(JSVALUE64) - if (atJumpTarget()) - killLastResultRegister(); -#endif - m_labels[m_bytecodeOffset] = label(); #if ENABLE(JIT_VERBOSE) @@ -201,60 +175,71 @@ void JIT::privateCompileMainPass() OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode); - if (m_compilation && opcodeID != op_call_put_result) { + if (m_compilation) { add64( TrustedImm32(1), AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( m_compilation->bytecodes(), m_bytecodeOffset)))->address())); } + + if (Options::eagerlyUpdateTopCallFrame()) + updateTopCallFrame(); + unsigned bytecodeOffset = m_bytecodeOffset; + switch (opcodeID) { - DEFINE_BINARY_OP(op_del_by_val) - DEFINE_BINARY_OP(op_in) - DEFINE_BINARY_OP(op_less) - DEFINE_BINARY_OP(op_lesseq) - DEFINE_BINARY_OP(op_greater) - DEFINE_BINARY_OP(op_greatereq) - DEFINE_UNARY_OP(op_is_function) - DEFINE_UNARY_OP(op_is_object) - DEFINE_UNARY_OP(op_typeof) + DEFINE_SLOW_OP(del_by_val) + DEFINE_SLOW_OP(in) + DEFINE_SLOW_OP(less) + DEFINE_SLOW_OP(lesseq) + DEFINE_SLOW_OP(greater) + DEFINE_SLOW_OP(greatereq) + DEFINE_SLOW_OP(is_function) + DEFINE_SLOW_OP(is_object_or_null) + DEFINE_SLOW_OP(typeof) DEFINE_OP(op_add) DEFINE_OP(op_bitand) DEFINE_OP(op_bitor) DEFINE_OP(op_bitxor) DEFINE_OP(op_call) + DEFINE_OP(op_tail_call) DEFINE_OP(op_call_eval) DEFINE_OP(op_call_varargs) + DEFINE_OP(op_tail_call_varargs) + DEFINE_OP(op_construct_varargs) DEFINE_OP(op_catch) DEFINE_OP(op_construct) - DEFINE_OP(op_get_callee) DEFINE_OP(op_create_this) - DEFINE_OP(op_convert_this) - DEFINE_OP(op_init_lazy_reg) - DEFINE_OP(op_create_arguments) + DEFINE_OP(op_to_this) + DEFINE_OP(op_create_direct_arguments) + DEFINE_OP(op_create_scoped_arguments) + DEFINE_OP(op_create_out_of_band_arguments) + DEFINE_OP(op_copy_rest) + DEFINE_OP(op_get_rest_length) + DEFINE_OP(op_check_tdz) + DEFINE_OP(op_assert) + DEFINE_OP(op_save) + DEFINE_OP(op_resume) DEFINE_OP(op_debug) DEFINE_OP(op_del_by_id) DEFINE_OP(op_div) DEFINE_OP(op_end) DEFINE_OP(op_enter) - DEFINE_OP(op_create_activation) + DEFINE_OP(op_get_scope) DEFINE_OP(op_eq) DEFINE_OP(op_eq_null) - case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_OP(op_get_by_id) - DEFINE_OP(op_get_arguments_length) DEFINE_OP(op_get_by_val) - DEFINE_OP(op_get_argument_by_val) - DEFINE_OP(op_get_by_pname) - DEFINE_OP(op_get_pnames) - DEFINE_OP(op_check_has_instance) + DEFINE_OP(op_overrides_has_instance) DEFINE_OP(op_instanceof) + DEFINE_OP(op_instanceof_custom) DEFINE_OP(op_is_undefined) DEFINE_OP(op_is_boolean) DEFINE_OP(op_is_number) DEFINE_OP(op_is_string) + DEFINE_OP(op_is_object) DEFINE_OP(op_jeq_null) DEFINE_OP(op_jfalse) DEFINE_OP(op_jmp) @@ -270,6 +255,7 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_jngreatereq) DEFINE_OP(op_jtrue) DEFINE_OP(op_loop_hint) + DEFINE_OP(op_watchdog) DEFINE_OP(op_lshift) DEFINE_OP(op_mod) DEFINE_OP(op_mov) @@ -282,54 +268,35 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_new_array_buffer) DEFINE_OP(op_new_func) DEFINE_OP(op_new_func_exp) + DEFINE_OP(op_new_generator_func) + DEFINE_OP(op_new_generator_func_exp) + DEFINE_OP(op_new_arrow_func_exp) DEFINE_OP(op_new_object) DEFINE_OP(op_new_regexp) - DEFINE_OP(op_next_pname) DEFINE_OP(op_not) DEFINE_OP(op_nstricteq) - DEFINE_OP(op_pop_scope) DEFINE_OP(op_dec) DEFINE_OP(op_inc) DEFINE_OP(op_profile_did_call) DEFINE_OP(op_profile_will_call) - DEFINE_OP(op_push_name_scope) + DEFINE_OP(op_profile_type) + DEFINE_OP(op_profile_control_flow) DEFINE_OP(op_push_with_scope) - case op_put_by_id_out_of_line: - case op_put_by_id_transition_direct: - case op_put_by_id_transition_normal: - case op_put_by_id_transition_direct_out_of_line: - case op_put_by_id_transition_normal_out_of_line: + DEFINE_OP(op_create_lexical_environment) + DEFINE_OP(op_get_parent_scope) DEFINE_OP(op_put_by_id) DEFINE_OP(op_put_by_index) + case op_put_by_val_direct: DEFINE_OP(op_put_by_val) - DEFINE_OP(op_put_getter_setter) - case op_init_global_const_nop: - NEXT_OPCODE(op_init_global_const_nop); - DEFINE_OP(op_init_global_const) - DEFINE_OP(op_init_global_const_check) - - case op_resolve_global_property: - case op_resolve_global_var: - case op_resolve_scoped_var: - case op_resolve_scoped_var_on_top_scope: - case op_resolve_scoped_var_with_top_scope_check: - DEFINE_OP(op_resolve) - - case op_resolve_base_to_global: - case op_resolve_base_to_global_dynamic: - case op_resolve_base_to_scope: - case op_resolve_base_to_scope_with_top_scope_check: - DEFINE_OP(op_resolve_base) - - case op_put_to_base_variable: - DEFINE_OP(op_put_to_base) - - DEFINE_OP(op_resolve_with_base) - DEFINE_OP(op_resolve_with_this) + DEFINE_OP(op_put_getter_by_id) + DEFINE_OP(op_put_setter_by_id) + DEFINE_OP(op_put_getter_setter_by_id) + DEFINE_OP(op_put_getter_by_val) + DEFINE_OP(op_put_setter_by_val) + DEFINE_OP(op_ret) - DEFINE_OP(op_call_put_result) - DEFINE_OP(op_ret_object_or_this) DEFINE_OP(op_rshift) + DEFINE_OP(op_unsigned) DEFINE_OP(op_urshift) DEFINE_OP(op_strcat) DEFINE_OP(op_stricteq) @@ -337,39 +304,40 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_switch_char) DEFINE_OP(op_switch_imm) DEFINE_OP(op_switch_string) - DEFINE_OP(op_tear_off_activation) - DEFINE_OP(op_tear_off_arguments) DEFINE_OP(op_throw) DEFINE_OP(op_throw_static_error) DEFINE_OP(op_to_number) + DEFINE_OP(op_to_string) DEFINE_OP(op_to_primitive) - DEFINE_OP(op_get_scoped_var) - DEFINE_OP(op_put_scoped_var) - - case op_get_by_id_chain: - case op_get_by_id_generic: - case op_get_by_id_proto: - case op_get_by_id_self: - case op_get_by_id_getter_chain: - case op_get_by_id_getter_proto: - case op_get_by_id_getter_self: - case op_get_by_id_custom_chain: - case op_get_by_id_custom_proto: - case op_get_by_id_custom_self: - case op_get_string_length: - case op_put_by_id_generic: - case op_put_by_id_replace: - case op_put_by_id_transition: + DEFINE_OP(op_resolve_scope) + DEFINE_OP(op_get_from_scope) + DEFINE_OP(op_put_to_scope) + DEFINE_OP(op_get_from_arguments) + DEFINE_OP(op_put_to_arguments) + + DEFINE_OP(op_get_enumerable_length) + DEFINE_OP(op_has_generic_property) + DEFINE_OP(op_has_structure_property) + DEFINE_OP(op_has_indexed_property) + DEFINE_OP(op_get_direct_pname) + DEFINE_OP(op_get_property_enumerator) + DEFINE_OP(op_enumerator_structure_pname) + DEFINE_OP(op_enumerator_generic_pname) + DEFINE_OP(op_to_index_string) + default: RELEASE_ASSERT_NOT_REACHED(); } + + if (false) + dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); } - RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = (unsigned)-1; + m_bytecodeOffset = std::numeric_limits::max(); #endif } @@ -385,12 +353,11 @@ void JIT::privateCompileSlowCases() { Instruction* instructionsBegin = m_codeBlock->instructions().begin(); - m_propertyAccessInstructionIndex = 0; + m_getByIdIndex = 0; + m_putByIdIndex = 0; m_byValInstructionIndex = 0; - m_globalResolveInfoIndex = 0; m_callLinkInfoIndex = 0; -#if ENABLE(VALUE_PROFILER) // Use this to assert that slow-path code associates new profiling sites with existing // ValueProfiles rather than creating new ones. This ensures that for a given instruction // (say, get_by_id) we get combined statistics for both the fast-path executions of that @@ -398,24 +365,19 @@ void JIT::privateCompileSlowCases() // new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset, // which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset(). unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles(); -#endif for (Vector::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { -#if USE(JSVALUE64) - killLastResultRegister(); -#endif - m_bytecodeOffset = iter->to; + m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); + unsigned firstTo = m_bytecodeOffset; Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; -#if ENABLE(VALUE_PROFILER) RareCaseProfile* rareCaseProfile = 0; if (shouldEmitProfiling()) rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset); -#endif #if ENABLE(JIT_VERBOSE) dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset()); @@ -430,22 +392,22 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_bitor) DEFINE_SLOWCASE_OP(op_bitxor) DEFINE_SLOWCASE_OP(op_call) + DEFINE_SLOWCASE_OP(op_tail_call) DEFINE_SLOWCASE_OP(op_call_eval) DEFINE_SLOWCASE_OP(op_call_varargs) + DEFINE_SLOWCASE_OP(op_tail_call_varargs) + DEFINE_SLOWCASE_OP(op_construct_varargs) DEFINE_SLOWCASE_OP(op_construct) - DEFINE_SLOWCASE_OP(op_convert_this) + DEFINE_SLOWCASE_OP(op_to_this) + DEFINE_SLOWCASE_OP(op_check_tdz) DEFINE_SLOWCASE_OP(op_create_this) DEFINE_SLOWCASE_OP(op_div) DEFINE_SLOWCASE_OP(op_eq) - case op_get_by_id_out_of_line: case op_get_array_length: DEFINE_SLOWCASE_OP(op_get_by_id) - DEFINE_SLOWCASE_OP(op_get_arguments_length) DEFINE_SLOWCASE_OP(op_get_by_val) - DEFINE_SLOWCASE_OP(op_get_argument_by_val) - DEFINE_SLOWCASE_OP(op_get_by_pname) - DEFINE_SLOWCASE_OP(op_check_has_instance) DEFINE_SLOWCASE_OP(op_instanceof) + DEFINE_SLOWCASE_OP(op_instanceof_custom) DEFINE_SLOWCASE_OP(op_jfalse) DEFINE_SLOWCASE_OP(op_jless) DEFINE_SLOWCASE_OP(op_jlesseq) @@ -457,6 +419,7 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_jngreatereq) DEFINE_SLOWCASE_OP(op_jtrue) DEFINE_SLOWCASE_OP(op_loop_hint) + DEFINE_SLOWCASE_OP(op_watchdog) DEFINE_SLOWCASE_OP(op_lshift) DEFINE_SLOWCASE_OP(op_mod) DEFINE_SLOWCASE_OP(op_mul) @@ -467,122 +430,63 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_nstricteq) DEFINE_SLOWCASE_OP(op_dec) DEFINE_SLOWCASE_OP(op_inc) - case op_put_by_id_out_of_line: - case op_put_by_id_transition_direct: - case op_put_by_id_transition_normal: - case op_put_by_id_transition_direct_out_of_line: - case op_put_by_id_transition_normal_out_of_line: DEFINE_SLOWCASE_OP(op_put_by_id) + case op_put_by_val_direct: DEFINE_SLOWCASE_OP(op_put_by_val) - DEFINE_SLOWCASE_OP(op_init_global_const_check); DEFINE_SLOWCASE_OP(op_rshift) + DEFINE_SLOWCASE_OP(op_unsigned) DEFINE_SLOWCASE_OP(op_urshift) DEFINE_SLOWCASE_OP(op_stricteq) DEFINE_SLOWCASE_OP(op_sub) DEFINE_SLOWCASE_OP(op_to_number) + DEFINE_SLOWCASE_OP(op_to_string) DEFINE_SLOWCASE_OP(op_to_primitive) + DEFINE_SLOWCASE_OP(op_has_indexed_property) + DEFINE_SLOWCASE_OP(op_has_structure_property) + DEFINE_SLOWCASE_OP(op_get_direct_pname) - case op_resolve_global_property: - case op_resolve_global_var: - case op_resolve_scoped_var: - case op_resolve_scoped_var_on_top_scope: - case op_resolve_scoped_var_with_top_scope_check: - DEFINE_SLOWCASE_OP(op_resolve) - - case op_resolve_base_to_global: - case op_resolve_base_to_global_dynamic: - case op_resolve_base_to_scope: - case op_resolve_base_to_scope_with_top_scope_check: - DEFINE_SLOWCASE_OP(op_resolve_base) - DEFINE_SLOWCASE_OP(op_resolve_with_base) - DEFINE_SLOWCASE_OP(op_resolve_with_this) - - case op_put_to_base_variable: - DEFINE_SLOWCASE_OP(op_put_to_base) + DEFINE_SLOWCASE_OP(op_resolve_scope) + DEFINE_SLOWCASE_OP(op_get_from_scope) + DEFINE_SLOWCASE_OP(op_put_to_scope) default: RELEASE_ASSERT_NOT_REACHED(); } + if (false) + dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n"); + RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen."); RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); -#if ENABLE(VALUE_PROFILER) if (shouldEmitProfiling()) add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter)); -#endif emitJumpSlowToHot(jump(), 0); } - RELEASE_ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size()); - RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); -#if ENABLE(VALUE_PROFILER) + RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size()); + RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); -#endif #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = (unsigned)-1; -#endif -} - -ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer) -{ - ASSERT(bytecodeIndex != std::numeric_limits::max()); - info.bytecodeIndex = bytecodeIndex; - info.callReturnLocation = linkBuffer.locationOf(callReturnLocation); - info.hotPathBegin = linkBuffer.locationOf(hotPathBegin); - - switch (m_type) { - case GetById: { - CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin); - info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare)); - info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck)); - info.patch.baseline.u.get.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad)); -#if USE(JSVALUE64) - info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel)); -#else - info.patch.baseline.u.get.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel1)); - info.patch.baseline.u.get.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel2)); + m_bytecodeOffset = std::numeric_limits::max(); #endif - info.patch.baseline.u.get.putResult = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getPutResult)); - info.patch.baseline.u.get.coldPathBegin = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(getColdPathBegin), linkBuffer.locationOf(callReturnLocation)); - break; - } - case PutById: - CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin); - info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare)); - info.patch.baseline.u.put.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad)); -#if USE(JSVALUE64) - info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel)); -#else - info.patch.baseline.u.put.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel1)); - info.patch.baseline.u.put.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel2)); -#endif - break; - } } -JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort effort) +CompilationResult JIT::privateCompile(JITCompilationEffort effort) { -#if ENABLE(JIT_VERBOSE_OSR) - printf("Compiling JIT code!\n"); -#endif - -#if ENABLE(VALUE_PROFILER) - DFG::CapabilityLevel level = m_codeBlock->canCompileWithDFG(); + DFG::CapabilityLevel level = m_codeBlock->capabilityLevel(); switch (level) { case DFG::CannotCompile: m_canBeOptimized = false; + m_canBeOptimizedOrInlined = false; m_shouldEmitProfiling = false; break; - case DFG::MayInline: - m_canBeOptimized = false; - m_canBeOptimizedOrInlined = true; - m_shouldEmitProfiling = true; - break; case DFG::CanCompile: + case DFG::CanCompileAndInline: m_canBeOptimized = true; m_canBeOptimizedOrInlined = true; m_shouldEmitProfiling = true; @@ -591,15 +495,38 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo RELEASE_ASSERT_NOT_REACHED(); break; } -#endif - if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler) - m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock)); + switch (m_codeBlock->codeType()) { + case GlobalCode: + case ModuleCode: + case EvalCode: + m_codeBlock->m_shouldAlwaysBeInlined = false; + break; + case FunctionCode: + // We could have already set it to false because we detected an uninlineable call. + // Don't override that observation. + m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); + break; + } + + m_codeBlock->setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); // Might be able to remove as this is probably already set to this value. + + // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type. + if (m_vm->typeProfiler()) + m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation.")); + + if (Options::dumpDisassembly() || m_vm->m_perBytecodeProfiler) + m_disassembler = std::make_unique(m_codeBlock); if (m_vm->m_perBytecodeProfiler) { - m_compilation = m_vm->m_perBytecodeProfiler->newCompilation(m_codeBlock, Profiler::Baseline); + m_compilation = adoptRef( + new Profiler::Compilation( + m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock), + Profiler::Baseline)); m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); } + m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr)); + if (m_disassembler) m_disassembler->setStartOfCode(label()); @@ -607,9 +534,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo if (m_randomGenerator.getUint32() & 1) nop(); - preserveReturnAddressAfterCall(regT2); - emitPutToCallFrameHeader(regT2, JSStack::ReturnPC); - emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); + emitFunctionPrologue(); + emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); Label beginLabel(this); @@ -618,17 +544,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo sampleInstruction(m_codeBlock->instructions().begin()); #endif - Jump stackCheck; if (m_codeBlock->codeType() == FunctionCode) { -#if ENABLE(DFG_JIT) -#if DFG_ENABLE(SUCCESS_STATS) - static SamplingCounter counter("orignalJIT"); - emitCount(counter); -#endif -#endif - -#if ENABLE(VALUE_PROFILER) - ASSERT(m_bytecodeOffset == (unsigned)-1); + ASSERT(m_bytecodeOffset == std::numeric_limits::max()); if (shouldEmitProfiling()) { for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { // If this is a constructor, then we want to put in a dummy profiling site (to @@ -645,43 +562,54 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); } } -#endif - - addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); - stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1); } - Label functionBody = label(); - + addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1); + Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1); + + move(regT1, stackPointerRegister); + checkStackPointerAlignment(); + + emitSaveCalleeSaves(); + emitMaterializeTagCheckRegisters(); + privateCompileMainPass(); privateCompileLinkPass(); privateCompileSlowCases(); if (m_disassembler) m_disassembler->setEndOfSlowPath(label()); + m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); + + stackOverflow.link(this); + m_bytecodeOffset = 0; + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); + callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); Label arityCheck; if (m_codeBlock->codeType() == FunctionCode) { - stackCheck.link(this); - m_bytecodeOffset = 0; - JITStubCall(this, cti_stack_check).call(); -#ifndef NDEBUG - m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. -#endif - jump(functionBody); - arityCheck = label(); - preserveReturnAddressAfterCall(regT2); - emitPutToCallFrameHeader(regT2, JSStack::ReturnPC); - emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); + store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); + emitFunctionPrologue(); + emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); load32(payloadFor(JSStack::ArgumentCount), regT1); branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); m_bytecodeOffset = 0; - JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister); + + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); + callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); + branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); + move(returnValueGPR, GPRInfo::argumentGPR0); + emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code()); + #if !ASSERT_DISABLED - m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = std::numeric_limits::max(); // Reset this, in order to guard its use with ASSERTs. #endif jump(beginLabel); @@ -689,14 +617,17 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo ASSERT(m_jmpTable.isEmpty()); + privateCompileExceptionHandlers(); + if (m_disassembler) m_disassembler->setEndOfCode(label()); + m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); - LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort); + + LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort); if (patchBuffer.didFailToAllocate()) - return JITCode(); + return CompilationFailed; - ASSERT(m_labels.size() >= m_codeBlock->instructionCount()); // Translate vPC offsets into addresses in JIT generated code, for switch tables. for (unsigned i = 0; i < m_switches.size(); ++i) { SwitchRecord record = m_switches[i]; @@ -735,110 +666,132 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo patchBuffer.link(iter->from, FunctionPtr(iter->to)); } - m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size()); - for (Vector::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) - m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset)); - - m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size()); - for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) - m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer); - m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); - for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { - CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); - CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); - CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); - CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); - - m_codeBlock->byValInfo(i) = ByValInfo( - m_byValCompilationInfo[i].bytecodeIndex, + for (unsigned i = m_getByIds.size(); i--;) + m_getByIds[i].finalize(patchBuffer); + for (unsigned i = m_putByIds.size(); i--;) + m_putByIds[i].finalize(patchBuffer); + + for (const auto& byValCompilationInfo : m_byValCompilationInfo) { + PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump; + CodeLocationJump notIndexJump = CodeLocationJump(); + if (Jump(patchableNotIndexJump).isSet()) + notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump)); + CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump)); + CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget); + CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget); + CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget); + CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress); + + *byValCompilationInfo.byValInfo = ByValInfo( + byValCompilationInfo.bytecodeIndex, + notIndexJump, badTypeJump, - m_byValCompilationInfo[i].arrayMode, + byValCompilationInfo.arrayMode, + byValCompilationInfo.arrayProfile, differenceBetweenCodePtr(badTypeJump, doneTarget), + differenceBetweenCodePtr(badTypeJump, nextHotPathTarget), differenceBetweenCodePtr(returnAddress, slowPathTarget)); } - m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); - for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { - CallLinkInfo& info = m_codeBlock->callLinkInfo(i); - info.callType = m_callStructureStubCompilationInfo[i].callType; - info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex); - info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); - info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); - info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); - info.calleeGPR = regT0; + for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) { + CallCompilationInfo& compilationInfo = m_callCompilationInfo[i]; + CallLinkInfo& info = *compilationInfo.callLinkInfo; + info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation), + patchBuffer.locationOf(compilationInfo.hotPathBegin), + patchBuffer.locationOfNearCall(compilationInfo.hotPathOther)); } -#if ENABLE(DFG_JIT) || ENABLE(LLINT) - if (canBeOptimizedOrInlined() -#if ENABLE(LLINT) - || true -#endif - ) { - CompactJITCodeMap::Encoder jitCodeMapEncoder; - for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { - if (m_labels[bytecodeOffset].isSet()) - jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); - } - m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); + CompactJITCodeMap::Encoder jitCodeMapEncoder; + for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { + if (m_labels[bytecodeOffset].isSet()) + jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); } -#endif + m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); - if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck) - *functionEntryArityCheck = patchBuffer.locationOf(arityCheck); + MacroAssemblerCodePtr withArityCheck; + if (m_codeBlock->codeType() == FunctionCode) + withArityCheck = patchBuffer.locationOf(arityCheck); - if (Options::showDisassembly()) + if (Options::dumpDisassembly()) { m_disassembler->dump(patchBuffer); - if (m_compilation) + patchBuffer.didAlreadyDisassemble(); + } + if (m_compilation) { m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); + m_vm->m_perBytecodeProfiler->addCompilation(m_compilation); + } + + if (m_pcToCodeOriginMapBuilder.didBuildMapping()) + m_codeBlock->setPCToCodeOriginMap(std::make_unique(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer)); - CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly(); + CodeRef result = FINALIZE_CODE( + patchBuffer, + ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data())); m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add( static_cast(result.size()) / static_cast(m_codeBlock->instructions().size())); - + m_codeBlock->shrinkToFit(CodeBlock::LateShrink); + m_codeBlock->setJITCode( + adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); #if ENABLE(JIT_VERBOSE) dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); #endif - return JITCode(result, JITCode::BaselineJIT); + return CompilationSuccessful; } -void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind) +void JIT::privateCompileExceptionHandlers() { - RepatchBuffer repatchBuffer(callerCodeBlock); - - ASSERT(!callLinkInfo->isLinked()); - callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee); - callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee); - repatchBuffer.relink(callLinkInfo->hotPathOther, code); - - if (calleeCodeBlock) - calleeCodeBlock->linkIncomingCall(callLinkInfo); - - // Patch the slow patch so we do not continue to try to link. - if (kind == CodeForCall) { - ASSERT(callLinkInfo->callType == CallLinkInfo::Call - || callLinkInfo->callType == CallLinkInfo::CallVarargs); - if (callLinkInfo->callType == CallLinkInfo::Call) { - repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallGenerator).code()); - return; - } + if (!m_exceptionChecksWithCallFrameRollback.empty()) { + m_exceptionChecksWithCallFrameRollback.link(this); - repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallGenerator).code()); - return; + copyCalleeSavesToVMCalleeSavesBuffer(); + + // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). + + move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + +#if CPU(X86) + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); + poke(GPRInfo::argumentGPR1, 1); +#endif + m_calls.append(CallRecord(call(), std::numeric_limits::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value())); + jumpToExceptionHandler(); } - ASSERT(kind == CodeForConstruct); - repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructGenerator).code()); + if (!m_exceptionChecks.empty()) { + m_exceptionChecks.link(this); + + copyCalleeSavesToVMCalleeSavesBuffer(); + + // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). + move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + +#if CPU(X86) + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); + poke(GPRInfo::argumentGPR1, 1); +#endif + m_calls.append(CallRecord(call(), std::numeric_limits::max(), FunctionPtr(lookupExceptionHandler).value())); + jumpToExceptionHandler(); + } } -void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo) +unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) { - RepatchBuffer repatchBuffer(callerCodeBlock); + ASSERT(static_cast(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast(codeBlock->m_numCalleeLocals))); + + return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters); +} - repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallGenerator).code()); +int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) +{ + return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index df8a19fd8..e81824268 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,14 +28,9 @@ #if ENABLE(JIT) -// Verbose logging of code generation -#define ENABLE_JIT_VERBOSE 0 -// Verbose logging for OSR-related code. -#define ENABLE_JIT_VERBOSE_OSR 0 - // We've run into some problems where changing the size of the class JIT leads to // performance fluctuations. Try forcing alignment in an attempt to stabalize this. -#if COMPILER(GCC) +#if COMPILER(GCC_OR_CLANG) #define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32))) #else #define JIT_CLASS_ALIGNMENT @@ -47,33 +42,34 @@ #include "CompactJITCodeMap.h" #include "Interpreter.h" #include "JITDisassembler.h" +#include "JITInlineCacheGenerator.h" #include "JSInterfaceJIT.h" -#include "LegacyProfiler.h" #include "Opcode.h" +#include "PCToCodeOriginMap.h" #include "ResultType.h" +#include "SamplingTool.h" #include "UnusedPointer.h" -#include namespace JSC { + class ArrayAllocationProfile; + class CallLinkInfo; class CodeBlock; class FunctionExecutable; class JIT; - class JSPropertyNameIterator; + class Identifier; class Interpreter; class JSScope; class JSStack; class MarkedAllocator; class Register; class StructureChain; + class StructureStubInfo; - struct CallLinkInfo; struct Instruction; struct OperandTypes; - struct PolymorphicAccessStructureList; struct SimpleJumpTable; struct StringJumpTable; - struct StructureStubInfo; struct CallRecord { MacroAssembler::Call from; @@ -150,146 +146,45 @@ namespace JSC { } }; - enum PropertyStubGetById_T { PropertyStubGetById }; - enum PropertyStubPutById_T { PropertyStubPutById }; - - struct PropertyStubCompilationInfo { - enum Type { GetById, PutById } m_type; - - unsigned bytecodeIndex; - MacroAssembler::Call callReturnLocation; - MacroAssembler::Label hotPathBegin; - MacroAssembler::DataLabelPtr getStructureToCompare; - MacroAssembler::PatchableJump getStructureCheck; - MacroAssembler::ConvertibleLoadLabel propertyStorageLoad; -#if USE(JSVALUE64) - MacroAssembler::DataLabelCompact getDisplacementLabel; -#else - MacroAssembler::DataLabelCompact getDisplacementLabel1; - MacroAssembler::DataLabelCompact getDisplacementLabel2; -#endif - MacroAssembler::Label getPutResult; - MacroAssembler::Label getColdPathBegin; - MacroAssembler::DataLabelPtr putStructureToCompare; -#if USE(JSVALUE64) - MacroAssembler::DataLabel32 putDisplacementLabel; -#else - MacroAssembler::DataLabel32 putDisplacementLabel1; - MacroAssembler::DataLabel32 putDisplacementLabel2; -#endif - -#if !ASSERT_DISABLED - PropertyStubCompilationInfo() - : bytecodeIndex(std::numeric_limits::max()) - { - } -#endif - - - PropertyStubCompilationInfo( - PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, - MacroAssembler::DataLabelPtr structureToCompare, - MacroAssembler::PatchableJump structureCheck, - MacroAssembler::ConvertibleLoadLabel propertyStorageLoad, -#if USE(JSVALUE64) - MacroAssembler::DataLabelCompact displacementLabel, -#else - MacroAssembler::DataLabelCompact displacementLabel1, - MacroAssembler::DataLabelCompact displacementLabel2, -#endif - MacroAssembler::Label putResult) - : m_type(GetById) - , bytecodeIndex(bytecodeIndex) - , hotPathBegin(hotPathBegin) - , getStructureToCompare(structureToCompare) - , getStructureCheck(structureCheck) - , propertyStorageLoad(propertyStorageLoad) -#if USE(JSVALUE64) - , getDisplacementLabel(displacementLabel) -#else - , getDisplacementLabel1(displacementLabel1) - , getDisplacementLabel2(displacementLabel2) -#endif - , getPutResult(putResult) - { - } - - PropertyStubCompilationInfo( - PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, - MacroAssembler::DataLabelPtr structureToCompare, - MacroAssembler::ConvertibleLoadLabel propertyStorageLoad, -#if USE(JSVALUE64) - MacroAssembler::DataLabel32 displacementLabel -#else - MacroAssembler::DataLabel32 displacementLabel1, - MacroAssembler::DataLabel32 displacementLabel2 -#endif - ) - : m_type(PutById) - , bytecodeIndex(bytecodeIndex) - , hotPathBegin(hotPathBegin) - , propertyStorageLoad(propertyStorageLoad) - , putStructureToCompare(structureToCompare) -#if USE(JSVALUE64) - , putDisplacementLabel(displacementLabel) -#else - , putDisplacementLabel1(displacementLabel1) - , putDisplacementLabel2(displacementLabel2) -#endif - { - } - - void slowCaseInfo(PropertyStubGetById_T, MacroAssembler::Label coldPathBegin, MacroAssembler::Call call) - { - ASSERT(m_type == GetById); - callReturnLocation = call; - getColdPathBegin = coldPathBegin; - } - - void slowCaseInfo(PropertyStubPutById_T, MacroAssembler::Call call) - { - ASSERT(m_type == PutById); - callReturnLocation = call; - } - - void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer); - }; - struct ByValCompilationInfo { ByValCompilationInfo() { } - ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget) - : bytecodeIndex(bytecodeIndex) + ByValCompilationInfo(ByValInfo* byValInfo, unsigned bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget) + : byValInfo(byValInfo) + , bytecodeIndex(bytecodeIndex) + , notIndexJump(notIndexJump) , badTypeJump(badTypeJump) , arrayMode(arrayMode) + , arrayProfile(arrayProfile) , doneTarget(doneTarget) + , nextHotPathTarget(nextHotPathTarget) { } - + + ByValInfo* byValInfo; unsigned bytecodeIndex; + MacroAssembler::PatchableJump notIndexJump; MacroAssembler::PatchableJump badTypeJump; JITArrayMode arrayMode; + ArrayProfile* arrayProfile; MacroAssembler::Label doneTarget; + MacroAssembler::Label nextHotPathTarget; MacroAssembler::Label slowPathTarget; MacroAssembler::Call returnAddress; }; - struct StructureStubCompilationInfo { + struct CallCompilationInfo { MacroAssembler::DataLabelPtr hotPathBegin; MacroAssembler::Call hotPathOther; MacroAssembler::Call callReturnLocation; - CallLinkInfo::CallType callType; - unsigned bytecodeIndex; + CallLinkInfo* callLinkInfo; }; - // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. - void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); - void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); - void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction); + void ctiPatchCallByReturnAddress(ReturnAddressPtr, FunctionPtr newCalleeFunction); class JIT : private JSInterfaceJIT { + friend class JITSlowPathCall; friend class JITStubCall; - friend struct PropertyStubCompilationInfo; using MacroAssembler::Jump; using MacroAssembler::JumpList; @@ -302,104 +197,64 @@ namespace JSC { static const int patchPutByIdDefaultOffset = 256; public: - static JITCode compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, CodePtr* functionEntryArityCheck = 0) + static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort) { - return JIT(vm, codeBlock).privateCompile(functionEntryArityCheck, effort); + return JIT(vm, codeBlock).privateCompile(effort); } - static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr) - { - JIT jit(vm, callerCodeBlock); - jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex; - jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr); - } - - static void compileGetByIdProto(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress) + static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = stubInfo->bytecodeIndex; - jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode); } - static void compileGetByIdSelfList(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) - { - JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = stubInfo->bytecodeIndex; - jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset); - } - static void compileGetByIdProtoList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) + static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) { JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = stubInfo->bytecodeIndex; - jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame); - } - static void compileGetByIdChainList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) - { - JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = stubInfo->bytecodeIndex; - jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName); } - static void compileGetByIdChain(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress) + static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = stubInfo->bytecodeIndex; - jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode); } - static void compilePutByIdTransition(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) + static void compileDirectPutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { JIT jit(vm, codeBlock); - jit.m_bytecodeOffset = stubInfo->bytecodeIndex; - jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode); } - - static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) + + static void compilePutByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) { JIT jit(vm, codeBlock); jit.m_bytecodeOffset = byValInfo->bytecodeIndex; - jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode); + jit.privateCompilePutByValWithCachedId(byValInfo, returnAddress, putKind, propertyName); } - static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) + static void compileHasIndexedProperty(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { JIT jit(vm, codeBlock); jit.m_bytecodeOffset = byValInfo->bytecodeIndex; - jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode); + jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode); } static CodeRef compileCTINativeCall(VM* vm, NativeFunction func) { if (!vm->canUseJIT()) { -#if ENABLE(LLINT) return CodeRef::createLLIntCodeRef(llint_native_call_trampoline); -#else - return CodeRef(); -#endif } JIT jit(vm, 0); return jit.privateCompileCTINativeCall(vm, func); } - static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*); - static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*); - static void patchGetByIdSelf(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr); - static void patchPutByIdReplace(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr, bool direct); - - static void compilePatchGetArrayLength(VM* vm, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) - { - JIT jit(vm, codeBlock); -#if ENABLE(DFG_JIT) - // Force profiling to be enabled during stub generation. - jit.m_canBeOptimized = true; - jit.m_canBeOptimizedOrInlined = true; - jit.m_shouldEmitProfiling = true; -#endif // ENABLE(DFG_JIT) - return jit.privateCompilePatchGetArrayLength(returnAddress); - } - - static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind); - static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*); + static unsigned frameRegisterCountFor(CodeBlock*); + static int stackPointerOffsetFor(CodeBlock*); private: JIT(VM*, CodeBlock* = 0); @@ -407,25 +262,52 @@ namespace JSC { void privateCompileMainPass(); void privateCompileLinkPass(); void privateCompileSlowCases(); - JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort); - - void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr); - - void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*); - void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset); - void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*); - void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*); - void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*); - void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, PropertyOffset cachedOffset, StructureChain*, ReturnAddressPtr, bool direct); + CompilationResult privateCompile(JITCompilationEffort); void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); + void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&); void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); + void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); + + void privateCompileHasIndexedProperty(ByValInfo*, ReturnAddressPtr, JITArrayMode); Label privateCompileCTINativeCall(VM*, bool isConstruct = false); CodeRef privateCompileCTINativeCall(VM*, NativeFunction); void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress); - static bool isDirectPutById(StructureStubInfo*); + // Add a call out from JIT code, without an exception check. + Call appendCall(const FunctionPtr& function) + { + Call functionCall = call(); + m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value())); + return functionCall; + } + +#if OS(WINDOWS) && CPU(X86_64) + Call appendCallWithSlowPathReturnType(const FunctionPtr& function) + { + Call functionCall = callWithSlowPathReturnType(); + m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value())); + return functionCall; + } +#endif + + void exceptionCheck(Jump jumpToHandler) + { + m_exceptionChecks.append(jumpToHandler); + } + + void exceptionCheck() + { + m_exceptionChecks.append(emitExceptionCheck()); + } + + void exceptionCheckWithCallFrameRollback() + { + m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck()); + } + + void privateCompileExceptionHandlers(); void addSlowCase(Jump); void addSlowCase(JumpList); @@ -435,43 +317,37 @@ namespace JSC { void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex); void compileOpCallSlowCase(OpcodeID, Instruction*, Vector::iterator&, unsigned callLinkInfoIndex); - void compileLoadVarargs(Instruction*); - void compileCallEval(); - void compileCallEvalSlowCase(Vector::iterator&); + void compileSetupVarargsFrame(Instruction*, CallLinkInfo*); + void compileCallEval(Instruction*); + void compileCallEvalSlowCase(Instruction*, Vector::iterator&); + void emitPutCallResult(Instruction*); enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq }; void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type); - bool isOperandConstantImmediateDouble(unsigned src); + bool isOperandConstantDouble(int src); void emitLoadDouble(int index, FPRegisterID value); void emitLoadInt32ToDouble(int index, FPRegisterID value); - Jump emitJumpIfNotObject(RegisterID structureReg); - - Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch); - void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch); - void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*); + Jump emitJumpIfCellObject(RegisterID cellReg); + Jump emitJumpIfCellNotObject(RegisterID cellReg); - enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterImmediates }; + enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue }; // value register in write barrier is used before any scratch registers // so may safely be the same as either of the scratch registers. - void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind); - void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind); + void emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode); + void emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode); + void emitWriteBarrier(JSCell* owner); template // StructureType can be RegisterID or ImmPtr. void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch); -#if ENABLE(VALUE_PROFILER) // This assumes that the value to profile is in regT0 and that regT3 is available for // scratch. void emitValueProfilingSite(ValueProfile*); void emitValueProfilingSite(unsigned bytecodeOffset); void emitValueProfilingSite(); -#else - void emitValueProfilingSite(unsigned) { } - void emitValueProfilingSite() { } -#endif - void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*); - void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex); + void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*); + void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex); void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*); void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*); @@ -481,14 +357,22 @@ namespace JSC { // Property is int-checked and zero extended. Base is cell checked. // Structure is already profiled. Returns the slow cases. Fall-through // case contains result in regT0, and it is not yet profiled. + JumpList emitInt32Load(Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); } + JumpList emitDoubleLoad(Instruction*, PatchableJump& badType); + JumpList emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); + JumpList emitArrayStorageLoad(Instruction*, PatchableJump& badType); + JumpList emitLoadForArrayMode(Instruction*, JITArrayMode, PatchableJump& badType); + JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); } JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType); JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType); - JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness); - JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize); + JumpList emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType); + JumpList emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType); + JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); + JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); - // Property is in regT0, base is in regT0. regT2 contains indecing type. + // Property is in regT1, base is in regT0. regT2 contains indecing type. // The value to store is not yet loaded. Property is int-checked and // zero-extended. Base is cell checked. Structure is already profiled. // returns the slow cases. @@ -506,13 +390,25 @@ namespace JSC { } JumpList emitGenericContiguousPutByVal(Instruction*, PatchableJump& badType, IndexingType indexingShape = ContiguousShape); JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType); - JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness, TypedArrayRounding); - JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize); - + JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); + JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); + + // Identifier check helper for GetByVal and PutByVal. + void emitIdentifierCheck(RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases); + + JITGetByIdGenerator emitGetByValWithCachedId(Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases); + JITPutByIdGenerator emitPutByValWithCachedId(Instruction*, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases); + enum FinalObjectMode { MayBeFinal, KnownNotFinal }; + void emitGetVirtualRegister(int src, JSValueRegs dst); + void emitPutVirtualRegister(int dst, JSValueRegs src); + + int32_t getOperandConstantInt(int src); + double getOperandConstantDouble(int src); + #if USE(JSVALUE32_64) - bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant); + bool getOperandConstantInt(int op1, int op2, int& op, int32_t& constant); void emitLoadTag(int index, RegisterID tag); void emitLoadPayload(int index, RegisterID payload); @@ -525,156 +421,103 @@ namespace JSC { void emitStore(int index, const JSValue constant, RegisterID base = callFrameRegister); void emitStoreInt32(int index, RegisterID payload, bool indexIsInt32 = false); void emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32 = false); - void emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength); void emitStoreCell(int index, RegisterID payload, bool indexIsCell = false); void emitStoreBool(int index, RegisterID payload, bool indexIsBool = false); void emitStoreDouble(int index, FPRegisterID value); - bool isLabeled(unsigned bytecodeOffset); - void map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload); - void unmap(RegisterID); - void unmap(); - bool isMapped(int virtualRegisterIndex); - bool getMappedPayload(int virtualRegisterIndex, RegisterID& payload); - bool getMappedTag(int virtualRegisterIndex, RegisterID& tag); - void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex); void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag); - void compileGetByIdHotPath(Identifier*); - void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector::iterator&); - void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); - void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); - void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal); - void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset); + void compileGetByIdHotPath(const Identifier*); // Arithmetic opcode helpers - void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); - void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); - void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); - -#if CPU(ARM_TRADITIONAL) - // sequenceOpCall - static const int sequenceOpCallInstructionSpace = 12; - static const int sequenceOpCallConstantSpace = 2; - // sequenceGetByIdHotPath - static const int sequenceGetByIdHotPathInstructionSpace = 36; - static const int sequenceGetByIdHotPathConstantSpace = 4; - // sequenceGetByIdSlowCase - static const int sequenceGetByIdSlowCaseInstructionSpace = 80; - static const int sequenceGetByIdSlowCaseConstantSpace = 4; - // sequencePutById - static const int sequencePutByIdInstructionSpace = 36; - static const int sequencePutByIdConstantSpace = 4; -#elif CPU(SH4) - // sequenceOpCall - static const int sequenceOpCallInstructionSpace = 12; - static const int sequenceOpCallConstantSpace = 2; - // sequenceGetByIdHotPath - static const int sequenceGetByIdHotPathInstructionSpace = 36; - static const int sequenceGetByIdHotPathConstantSpace = 5; - // sequenceGetByIdSlowCase - static const int sequenceGetByIdSlowCaseInstructionSpace = 38; - static const int sequenceGetByIdSlowCaseConstantSpace = 4; - // sequencePutById - static const int sequencePutByIdInstructionSpace = 36; - static const int sequencePutByIdConstantSpace = 5; -#endif + void emitSub32Constant(int dst, int op, int32_t constant, ResultType opType); + void emitBinaryDoubleOp(OpcodeID, int dst, int op1, int op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); #else // USE(JSVALUE32_64) - /* This function is deprecated. */ - void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst); - void emitGetVirtualRegister(int src, RegisterID dst); + void emitGetVirtualRegister(VirtualRegister src, RegisterID dst); void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2); - void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0); - void emitStoreCell(unsigned dst, RegisterID payload, bool /* only used in JSValue32_64 */ = false) + void emitGetVirtualRegisters(VirtualRegister src1, RegisterID dst1, VirtualRegister src2, RegisterID dst2); + void emitPutVirtualRegister(int dst, RegisterID from = regT0); + void emitPutVirtualRegister(VirtualRegister dst, RegisterID from = regT0); + void emitStoreCell(int dst, RegisterID payload, bool /* only used in JSValue32_64 */ = false) + { + emitPutVirtualRegister(dst, payload); + } + void emitStoreCell(VirtualRegister dst, RegisterID payload) { emitPutVirtualRegister(dst, payload); } - - int32_t getConstantOperandImmediateInt(unsigned src); - - void killLastResultRegister(); Jump emitJumpIfJSCell(RegisterID); Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID); void emitJumpSlowCaseIfJSCell(RegisterID); void emitJumpSlowCaseIfNotJSCell(RegisterID); void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg); - Jump emitJumpIfImmediateInteger(RegisterID); - Jump emitJumpIfNotImmediateInteger(RegisterID); - Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); - void emitJumpSlowCaseIfNotImmediateInteger(RegisterID); - void emitJumpSlowCaseIfNotImmediateNumber(RegisterID); - void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); - - void emitFastArithReTagImmediate(RegisterID src, RegisterID dest); - - void emitTagAsBoolImmediate(RegisterID reg); - void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi); - void compileBinaryArithOpSlowCase(OpcodeID, Vector::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase); - - void compileGetByIdHotPath(int baseVReg, Identifier*); - void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector::iterator&); - void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset); - void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset); - void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal); - void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset); + Jump emitJumpIfInt(RegisterID); + Jump emitJumpIfNotInt(RegisterID); + Jump emitJumpIfNotInt(RegisterID, RegisterID, RegisterID scratch); + PatchableJump emitPatchableJumpIfNotInt(RegisterID); + void emitJumpSlowCaseIfNotInt(RegisterID); + void emitJumpSlowCaseIfNotNumber(RegisterID); + void emitJumpSlowCaseIfNotInt(RegisterID, RegisterID, RegisterID scratch); -#endif // USE(JSVALUE32_64) - -#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL) -#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false) -#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace, dst); } while (false) -#define END_UNINTERRUPTED_SEQUENCE(name) END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, 0) + void emitTagBool(RegisterID); - void beginUninterruptedSequence(int, int); - void endUninterruptedSequence(int, int, int); + void compileGetByIdHotPath(int baseVReg, const Identifier*); -#else -#define BEGIN_UNINTERRUPTED_SEQUENCE(name) -#define END_UNINTERRUPTED_SEQUENCE(name) -#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) -#endif +#endif // USE(JSVALUE32_64) - void emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition); - void emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector::iterator&); + void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition); + void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector::iterator&); + + void assertStackPointerOffset(); void emit_op_add(Instruction*); void emit_op_bitand(Instruction*); void emit_op_bitor(Instruction*); void emit_op_bitxor(Instruction*); void emit_op_call(Instruction*); + void emit_op_tail_call(Instruction*); void emit_op_call_eval(Instruction*); void emit_op_call_varargs(Instruction*); - void emit_op_call_put_result(Instruction*); + void emit_op_tail_call_varargs(Instruction*); + void emit_op_construct_varargs(Instruction*); void emit_op_catch(Instruction*); void emit_op_construct(Instruction*); - void emit_op_get_callee(Instruction*); void emit_op_create_this(Instruction*); - void emit_op_convert_this(Instruction*); - void emit_op_create_arguments(Instruction*); + void emit_op_to_this(Instruction*); + void emit_op_create_direct_arguments(Instruction*); + void emit_op_create_scoped_arguments(Instruction*); + void emit_op_create_out_of_band_arguments(Instruction*); + void emit_op_copy_rest(Instruction*); + void emit_op_get_rest_length(Instruction*); + void emit_op_check_tdz(Instruction*); + void emit_op_assert(Instruction*); + void emit_op_save(Instruction*); + void emit_op_resume(Instruction*); void emit_op_debug(Instruction*); void emit_op_del_by_id(Instruction*); void emit_op_div(Instruction*); void emit_op_end(Instruction*); void emit_op_enter(Instruction*); - void emit_op_create_activation(Instruction*); + void emit_op_get_scope(Instruction*); void emit_op_eq(Instruction*); void emit_op_eq_null(Instruction*); void emit_op_get_by_id(Instruction*); void emit_op_get_arguments_length(Instruction*); void emit_op_get_by_val(Instruction*); void emit_op_get_argument_by_val(Instruction*); - void emit_op_get_by_pname(Instruction*); void emit_op_init_lazy_reg(Instruction*); - void emit_op_check_has_instance(Instruction*); + void emit_op_overrides_has_instance(Instruction*); void emit_op_instanceof(Instruction*); + void emit_op_instanceof_custom(Instruction*); void emit_op_is_undefined(Instruction*); void emit_op_is_boolean(Instruction*); void emit_op_is_number(Instruction*); void emit_op_is_string(Instruction*); + void emit_op_is_object(Instruction*); void emit_op_jeq_null(Instruction*); void emit_op_jfalse(Instruction*); void emit_op_jmp(Instruction*); @@ -690,6 +533,7 @@ namespace JSC { void emit_op_jngreatereq(Instruction*); void emit_op_jtrue(Instruction*); void emit_op_loop_hint(Instruction*); + void emit_op_watchdog(Instruction*); void emit_op_lshift(Instruction*); void emit_op_mod(Instruction*); void emit_op_mov(Instruction*); @@ -702,34 +546,31 @@ namespace JSC { void emit_op_new_array_buffer(Instruction*); void emit_op_new_func(Instruction*); void emit_op_new_func_exp(Instruction*); + void emit_op_new_generator_func(Instruction*); + void emit_op_new_generator_func_exp(Instruction*); + void emit_op_new_arrow_func_exp(Instruction*); void emit_op_new_object(Instruction*); void emit_op_new_regexp(Instruction*); - void emit_op_get_pnames(Instruction*); - void emit_op_next_pname(Instruction*); void emit_op_not(Instruction*); void emit_op_nstricteq(Instruction*); - void emit_op_pop_scope(Instruction*); void emit_op_dec(Instruction*); void emit_op_inc(Instruction*); void emit_op_profile_did_call(Instruction*); void emit_op_profile_will_call(Instruction*); - void emit_op_push_name_scope(Instruction*); + void emit_op_profile_type(Instruction*); + void emit_op_profile_control_flow(Instruction*); void emit_op_push_with_scope(Instruction*); + void emit_op_create_lexical_environment(Instruction*); + void emit_op_get_parent_scope(Instruction*); void emit_op_put_by_id(Instruction*); void emit_op_put_by_index(Instruction*); void emit_op_put_by_val(Instruction*); - void emit_op_put_getter_setter(Instruction*); - void emit_op_init_global_const(Instruction*); - void emit_op_init_global_const_check(Instruction*); - void emit_resolve_operations(ResolveOperations*, const int* base, const int* value); - void emitSlow_link_resolve_operations(ResolveOperations*, Vector::iterator&); - void emit_op_resolve(Instruction*); - void emit_op_resolve_base(Instruction*); - void emit_op_resolve_with_base(Instruction*); - void emit_op_resolve_with_this(Instruction*); - void emit_op_put_to_base(Instruction*); + void emit_op_put_getter_by_id(Instruction*); + void emit_op_put_setter_by_id(Instruction*); + void emit_op_put_getter_setter_by_id(Instruction*); + void emit_op_put_getter_by_val(Instruction*); + void emit_op_put_setter_by_val(Instruction*); void emit_op_ret(Instruction*); - void emit_op_ret_object_or_this(Instruction*); void emit_op_rshift(Instruction*); void emit_op_strcat(Instruction*); void emit_op_stricteq(Instruction*); @@ -737,36 +578,48 @@ namespace JSC { void emit_op_switch_char(Instruction*); void emit_op_switch_imm(Instruction*); void emit_op_switch_string(Instruction*); - void emit_op_tear_off_activation(Instruction*); void emit_op_tear_off_arguments(Instruction*); void emit_op_throw(Instruction*); void emit_op_throw_static_error(Instruction*); void emit_op_to_number(Instruction*); + void emit_op_to_string(Instruction*); void emit_op_to_primitive(Instruction*); void emit_op_unexpected_load(Instruction*); + void emit_op_unsigned(Instruction*); void emit_op_urshift(Instruction*); - void emit_op_get_scoped_var(Instruction*); - void emit_op_put_scoped_var(Instruction*); + void emit_op_get_enumerable_length(Instruction*); + void emit_op_has_generic_property(Instruction*); + void emit_op_has_structure_property(Instruction*); + void emit_op_has_indexed_property(Instruction*); + void emit_op_get_direct_pname(Instruction*); + void emit_op_get_property_enumerator(Instruction*); + void emit_op_enumerator_structure_pname(Instruction*); + void emit_op_enumerator_generic_pname(Instruction*); + void emit_op_to_index_string(Instruction*); void emitSlow_op_add(Instruction*, Vector::iterator&); void emitSlow_op_bitand(Instruction*, Vector::iterator&); void emitSlow_op_bitor(Instruction*, Vector::iterator&); void emitSlow_op_bitxor(Instruction*, Vector::iterator&); void emitSlow_op_call(Instruction*, Vector::iterator&); + void emitSlow_op_tail_call(Instruction*, Vector::iterator&); void emitSlow_op_call_eval(Instruction*, Vector::iterator&); void emitSlow_op_call_varargs(Instruction*, Vector::iterator&); + void emitSlow_op_tail_call_varargs(Instruction*, Vector::iterator&); + void emitSlow_op_construct_varargs(Instruction*, Vector::iterator&); void emitSlow_op_construct(Instruction*, Vector::iterator&); - void emitSlow_op_convert_this(Instruction*, Vector::iterator&); + void emitSlow_op_to_this(Instruction*, Vector::iterator&); void emitSlow_op_create_this(Instruction*, Vector::iterator&); + void emitSlow_op_check_tdz(Instruction*, Vector::iterator&); void emitSlow_op_div(Instruction*, Vector::iterator&); void emitSlow_op_eq(Instruction*, Vector::iterator&); + void emitSlow_op_get_callee(Instruction*, Vector::iterator&); void emitSlow_op_get_by_id(Instruction*, Vector::iterator&); void emitSlow_op_get_arguments_length(Instruction*, Vector::iterator&); void emitSlow_op_get_by_val(Instruction*, Vector::iterator&); void emitSlow_op_get_argument_by_val(Instruction*, Vector::iterator&); - void emitSlow_op_get_by_pname(Instruction*, Vector::iterator&); - void emitSlow_op_check_has_instance(Instruction*, Vector::iterator&); void emitSlow_op_instanceof(Instruction*, Vector::iterator&); + void emitSlow_op_instanceof_custom(Instruction*, Vector::iterator&); void emitSlow_op_jfalse(Instruction*, Vector::iterator&); void emitSlow_op_jless(Instruction*, Vector::iterator&); void emitSlow_op_jlesseq(Instruction*, Vector::iterator&); @@ -778,6 +631,7 @@ namespace JSC { void emitSlow_op_jngreatereq(Instruction*, Vector::iterator&); void emitSlow_op_jtrue(Instruction*, Vector::iterator&); void emitSlow_op_loop_hint(Instruction*, Vector::iterator&); + void emitSlow_op_watchdog(Instruction*, Vector::iterator&); void emitSlow_op_lshift(Instruction*, Vector::iterator&); void emitSlow_op_mod(Instruction*, Vector::iterator&); void emitSlow_op_mul(Instruction*, Vector::iterator&); @@ -790,37 +644,56 @@ namespace JSC { void emitSlow_op_inc(Instruction*, Vector::iterator&); void emitSlow_op_put_by_id(Instruction*, Vector::iterator&); void emitSlow_op_put_by_val(Instruction*, Vector::iterator&); - void emitSlow_op_init_global_const_check(Instruction*, Vector::iterator&); void emitSlow_op_rshift(Instruction*, Vector::iterator&); void emitSlow_op_stricteq(Instruction*, Vector::iterator&); void emitSlow_op_sub(Instruction*, Vector::iterator&); void emitSlow_op_to_number(Instruction*, Vector::iterator&); + void emitSlow_op_to_string(Instruction*, Vector::iterator&); void emitSlow_op_to_primitive(Instruction*, Vector::iterator&); + void emitSlow_op_unsigned(Instruction*, Vector::iterator&); void emitSlow_op_urshift(Instruction*, Vector::iterator&); - - void emitSlow_op_resolve(Instruction*, Vector::iterator&); - void emitSlow_op_resolve_base(Instruction*, Vector::iterator&); - void emitSlow_op_resolve_with_base(Instruction*, Vector::iterator&); - void emitSlow_op_resolve_with_this(Instruction*, Vector::iterator&); - void emitSlow_op_put_to_base(Instruction*, Vector::iterator&); + void emitSlow_op_has_indexed_property(Instruction*, Vector::iterator&); + void emitSlow_op_has_structure_property(Instruction*, Vector::iterator&); + void emitSlow_op_get_direct_pname(Instruction*, Vector::iterator&); + + void emit_op_resolve_scope(Instruction*); + void emit_op_get_from_scope(Instruction*); + void emit_op_put_to_scope(Instruction*); + void emit_op_get_from_arguments(Instruction*); + void emit_op_put_to_arguments(Instruction*); + void emitSlow_op_resolve_scope(Instruction*, Vector::iterator&); + void emitSlow_op_get_from_scope(Instruction*, Vector::iterator&); + void emitSlow_op_put_to_scope(Instruction*, Vector::iterator&); void emitRightShift(Instruction*, bool isUnsigned); void emitRightShiftSlowCase(Instruction*, Vector::iterator&, bool isUnsigned); - void emitInitRegister(unsigned dst); - - void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); - void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); - void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); + void emitNewFuncCommon(Instruction*); + void emitNewFuncExprCommon(Instruction*); + void emitVarInjectionCheck(bool needsVarInjectionChecks); + void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth); + void emitLoadWithStructureCheck(int scope, Structure** structureSlot); #if USE(JSVALUE64) - void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); + void emitGetVarFromPointer(JSValue* operand, GPRReg); + void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg); +#else + void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload); + void emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload); #endif + void emitGetClosureVar(int scope, uintptr_t operand); + void emitNotifyWrite(WatchpointSet*); + void emitNotifyWrite(GPRReg pointerToSet); + void emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet*); + void emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet**); + void emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet*); - JSValue getConstantOperand(unsigned src); - bool isOperandConstantImmediateInt(unsigned src); - bool isOperandConstantImmediateChar(unsigned src); + void emitInitRegister(int dst); - bool atJumpTarget(); + void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); + + JSValue getConstantOperand(int src); + bool isOperandConstantInt(int src); + bool isOperandConstantChar(int src); Jump getSlowCase(Vector::iterator& iter) { @@ -837,13 +710,138 @@ namespace JSC { ++iter; } void linkSlowCaseIfNotJSCell(Vector::iterator&, int virtualRegisterIndex); + void linkAllSlowCasesForBytecodeOffset(Vector& slowCases, + Vector::iterator&, unsigned bytecodeOffset); + + MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr&); +#if OS(WINDOWS) && CPU(X86_64) + MacroAssembler::Call appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr&); +#endif + MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr&); + MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr&, int); + MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr&, int); + + enum WithProfileTag { WithProfile }; + + MacroAssembler::Call callOperation(C_JITOperation_E); + MacroAssembler::Call callOperation(C_JITOperation_EO, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EL, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EL, TrustedImmPtr); + MacroAssembler::Call callOperation(C_JITOperation_ESt, Structure*); + MacroAssembler::Call callOperation(C_JITOperation_EZ, int32_t); + MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, int32_t, int32_t); + MacroAssembler::Call callOperation(J_JITOperation_E, int); + MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, GPRReg, int32_t); + MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, const JSValue*, int32_t); + MacroAssembler::Call callOperation(J_JITOperation_EC, int, JSCell*); + MacroAssembler::Call callOperation(V_JITOperation_EC, JSCell*); + MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg); +#if USE(JSVALUE64) + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*); +#else + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*); +#endif + MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, const Identifier*); + MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, ArrayProfile*); + MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, ByValInfo*); + MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EJsc, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJscC, int, GPRReg, JSCell*); + MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EJscZ, GPRReg, int32_t); + MacroAssembler::Call callOperation(C_JITOperation_EJscZ, int, GPRReg, int32_t); +#if USE(JSVALUE64) + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg); +#else + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg); +#endif + MacroAssembler::Call callOperation(J_JITOperation_EP, int, void*); + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EPc, int, Instruction*); + MacroAssembler::Call callOperation(J_JITOperation_EPc, int, Instruction*); + MacroAssembler::Call callOperation(J_JITOperation_EZ, int, int32_t); + MacroAssembler::Call callOperation(J_JITOperation_EZZ, int, int32_t, int32_t); + MacroAssembler::Call callOperation(P_JITOperation_E); + MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, size_t); + MacroAssembler::Call callOperation(S_JITOperation_ECC, RegisterID, RegisterID); + MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID); + MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID); + MacroAssembler::Call callOperation(S_JITOperation_EOJss, RegisterID, RegisterID); + MacroAssembler::Call callOperation(Sprt_JITOperation_EZ, int32_t); + MacroAssembler::Call callOperation(V_JITOperation_E); + MacroAssembler::Call callOperation(V_JITOperation_EC, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ECC, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ECIZC, RegisterID, UniquedStringImpl*, int32_t, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ECIZCC, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID); +#if USE(JSVALUE64) + MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, int32_t, RegisterID); +#else + MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, RegisterID, int32_t, RegisterID); +#endif + MacroAssembler::Call callOperation(J_JITOperation_EE, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID); + MacroAssembler::Call callOperation(J_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID); + MacroAssembler::Call callOperationNoExceptionCheck(Z_JITOperation_E); +#if USE(JSVALUE64) + MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID); +#else + MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID, RegisterID); +#endif +#if USE(JSVALUE64) + MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, int32_t, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, UniquedStringImpl*); + MacroAssembler::Call callOperation(V_JITOperation_ECIZJJ, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID); +#else + MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, UniquedStringImpl*); +#endif + MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, ArrayProfile*); + MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, ByValInfo*); + MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, int32_t, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, int32_t); + MacroAssembler::Call callOperation(V_JITOperation_EPc, Instruction*); + MacroAssembler::Call callOperation(V_JITOperation_EZ, int32_t); + MacroAssembler::Call callOperation(V_JITOperation_EZJ, int, GPRReg); + MacroAssembler::Call callOperationWithCallFrameRollbackOnException(J_JITOperation_E); + MacroAssembler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb, CodeBlock*); + MacroAssembler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E); +#if USE(JSVALUE32_64) + MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, RegisterID, int32_t, RegisterID); + MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, GPRReg, int32_t, int32_t); + MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, GPRReg, const Identifier*); + MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg); + MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg, GPRReg, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, GPRReg, GPRReg, ArrayProfile*); + MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, GPRReg, GPRReg, ByValInfo*); + MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, GPRReg, size_t); + MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID, RegisterID); + MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ArrayProfile*); + MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ByValInfo*); + MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, RegisterID, int32_t); + MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, RegisterID, int32_t, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EZJ, int32_t, RegisterID, RegisterID); + MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg, GPRReg); +#endif + + template + void emitBitBinaryOpFastPath(Instruction* currentInstruction); + + void emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID); Jump checkStructure(RegisterID reg, Structure* structure); - void restoreArgumentReferenceForTrampoline(); void updateTopCallFrame(); Call emitNakedCall(CodePtr function = CodePtr()); + Call emitNakedTailCall(CodePtr function = CodePtr()); // Loads the character value of a single character string into dst. void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures); @@ -855,7 +853,7 @@ namespace JSC { #endif #ifndef NDEBUG - void printBytecodeOperandTypes(unsigned src1, unsigned src2); + void printBytecodeOperandTypes(int src1, int src2); #endif #if ENABLE(SAMPLING_FLAGS) @@ -890,52 +888,37 @@ namespace JSC { #endif Interpreter* m_interpreter; - VM* m_vm; - CodeBlock* m_codeBlock; Vector m_calls; Vector