From 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c Mon Sep 17 00:00:00 2001 From: Lorry Tar Creator Date: Tue, 27 Jun 2017 06:07:23 +0000 Subject: webkitgtk-2.16.5 --- Source/JavaScriptCore/jit/AssemblyHelpers.cpp | 630 ++++- Source/JavaScriptCore/jit/AssemblyHelpers.h | 1342 ++++++++++- Source/JavaScriptCore/jit/BinarySwitch.cpp | 391 +++ Source/JavaScriptCore/jit/BinarySwitch.h | 143 ++ Source/JavaScriptCore/jit/CCallHelpers.cpp | 73 + Source/JavaScriptCore/jit/CCallHelpers.h | 1185 +++++++++- Source/JavaScriptCore/jit/CachedRecovery.cpp | 71 + Source/JavaScriptCore/jit/CachedRecovery.h | 134 ++ Source/JavaScriptCore/jit/CallFrameShuffleData.cpp | 68 + Source/JavaScriptCore/jit/CallFrameShuffleData.h | 52 + Source/JavaScriptCore/jit/CallFrameShuffler.cpp | 776 ++++++ Source/JavaScriptCore/jit/CallFrameShuffler.h | 804 +++++++ .../JavaScriptCore/jit/CallFrameShuffler32_64.cpp | 305 +++ Source/JavaScriptCore/jit/CallFrameShuffler64.cpp | 369 +++ .../JavaScriptCore/jit/ClosureCallStubRoutine.cpp | 63 - Source/JavaScriptCore/jit/ClosureCallStubRoutine.h | 66 - Source/JavaScriptCore/jit/CompactJITCodeMap.h | 41 +- .../jit/ExecutableAllocationFuzz.cpp | 73 + .../JavaScriptCore/jit/ExecutableAllocationFuzz.h | 47 + Source/JavaScriptCore/jit/ExecutableAllocator.cpp | 463 ++-- Source/JavaScriptCore/jit/ExecutableAllocator.h | 113 +- .../jit/ExecutableAllocatorFixedVMPool.cpp | 194 -- Source/JavaScriptCore/jit/FPRInfo.h | 112 +- .../JavaScriptCore/jit/GCAwareJITStubRoutine.cpp | 85 +- Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h | 57 +- Source/JavaScriptCore/jit/GPRInfo.cpp | 51 + Source/JavaScriptCore/jit/GPRInfo.h | 392 +-- Source/JavaScriptCore/jit/HostCallReturnValue.cpp | 1 + Source/JavaScriptCore/jit/HostCallReturnValue.h | 12 +- Source/JavaScriptCore/jit/ICStats.cpp | 128 + Source/JavaScriptCore/jit/ICStats.h | 194 ++ Source/JavaScriptCore/jit/IntrinsicEmitter.cpp | 136 ++ Source/JavaScriptCore/jit/JIT.cpp | 655 +++-- Source/JavaScriptCore/jit/JIT.h | 462 ++-- Source/JavaScriptCore/jit/JITAddGenerator.cpp | 187 ++ Source/JavaScriptCore/jit/JITAddGenerator.h | 79 + Source/JavaScriptCore/jit/JITArithmetic.cpp | 1065 +++++---- Source/JavaScriptCore/jit/JITArithmetic32_64.cpp | 782 +----- Source/JavaScriptCore/jit/JITBitAndGenerator.cpp | 85 + Source/JavaScriptCore/jit/JITBitAndGenerator.h | 46 + .../JavaScriptCore/jit/JITBitBinaryOpGenerator.h | 68 + Source/JavaScriptCore/jit/JITBitOrGenerator.cpp | 74 + Source/JavaScriptCore/jit/JITBitOrGenerator.h | 46 + Source/JavaScriptCore/jit/JITBitXorGenerator.cpp | 73 + Source/JavaScriptCore/jit/JITBitXorGenerator.h | 46 + Source/JavaScriptCore/jit/JITCall.cpp | 297 ++- Source/JavaScriptCore/jit/JITCall32_64.cpp | 319 ++- Source/JavaScriptCore/jit/JITCode.cpp | 172 +- Source/JavaScriptCore/jit/JITCode.h | 82 +- Source/JavaScriptCore/jit/JITCompilationEffort.h | 8 +- Source/JavaScriptCore/jit/JITDisassembler.cpp | 9 +- Source/JavaScriptCore/jit/JITDisassembler.h | 35 +- Source/JavaScriptCore/jit/JITDivGenerator.cpp | 139 ++ Source/JavaScriptCore/jit/JITDivGenerator.h | 82 + Source/JavaScriptCore/jit/JITExceptions.cpp | 51 +- Source/JavaScriptCore/jit/JITExceptions.h | 17 +- .../JavaScriptCore/jit/JITInlineCacheGenerator.cpp | 126 +- .../JavaScriptCore/jit/JITInlineCacheGenerator.h | 59 +- Source/JavaScriptCore/jit/JITInlines.h | 619 +++-- .../JavaScriptCore/jit/JITLeftShiftGenerator.cpp | 84 + Source/JavaScriptCore/jit/JITLeftShiftGenerator.h | 46 + Source/JavaScriptCore/jit/JITMathIC.h | 290 +++ Source/JavaScriptCore/jit/JITMathICForwards.h | 46 + Source/JavaScriptCore/jit/JITMathICInlineResult.h | 40 + Source/JavaScriptCore/jit/JITMulGenerator.cpp | 254 ++ Source/JavaScriptCore/jit/JITMulGenerator.h | 79 + Source/JavaScriptCore/jit/JITNegGenerator.cpp | 127 + Source/JavaScriptCore/jit/JITNegGenerator.h | 57 + Source/JavaScriptCore/jit/JITOpcodes.cpp | 1283 ++++++---- Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 946 ++++---- Source/JavaScriptCore/jit/JITOperationWrappers.h | 413 ---- Source/JavaScriptCore/jit/JITOperations.cpp | 2496 ++++++++++++++------ Source/JavaScriptCore/jit/JITOperations.h | 552 +++-- Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp | 46 + Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 1297 ++++++---- .../JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 901 ++++--- .../JavaScriptCore/jit/JITRightShiftGenerator.cpp | 140 ++ Source/JavaScriptCore/jit/JITRightShiftGenerator.h | 60 + Source/JavaScriptCore/jit/JITStubRoutine.cpp | 9 +- Source/JavaScriptCore/jit/JITStubRoutine.h | 48 +- Source/JavaScriptCore/jit/JITStubs.cpp | 54 - Source/JavaScriptCore/jit/JITStubs.h | 59 - Source/JavaScriptCore/jit/JITStubsARM.h | 302 --- Source/JavaScriptCore/jit/JITStubsARMv7.h | 351 --- Source/JavaScriptCore/jit/JITStubsMSVC64.asm | 44 + Source/JavaScriptCore/jit/JITStubsX86.h | 649 ----- Source/JavaScriptCore/jit/JITStubsX86Common.h | 148 -- Source/JavaScriptCore/jit/JITStubsX86_64.h | 218 -- Source/JavaScriptCore/jit/JITSubGenerator.cpp | 142 ++ Source/JavaScriptCore/jit/JITSubGenerator.h | 76 + Source/JavaScriptCore/jit/JITThunks.cpp | 72 +- Source/JavaScriptCore/jit/JITThunks.h | 72 +- .../jit/JITToDFGDeferredCompilationCallback.cpp | 16 +- .../jit/JITToDFGDeferredCompilationCallback.h | 15 +- Source/JavaScriptCore/jit/JITWorklist.cpp | 330 +++ Source/JavaScriptCore/jit/JITWorklist.h | 83 + Source/JavaScriptCore/jit/JITWriteBarrier.h | 147 -- Source/JavaScriptCore/jit/JSInterfaceJIT.h | 56 +- Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp | 306 +++ Source/JavaScriptCore/jit/PCToCodeOriginMap.h | 101 + .../jit/PolymorphicCallStubRoutine.cpp | 137 ++ .../jit/PolymorphicCallStubRoutine.h | 111 + Source/JavaScriptCore/jit/Reg.cpp | 58 + Source/JavaScriptCore/jit/Reg.h | 248 ++ Source/JavaScriptCore/jit/RegisterAtOffset.cpp | 45 + Source/JavaScriptCore/jit/RegisterAtOffset.h | 77 + Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp | 72 + Source/JavaScriptCore/jit/RegisterAtOffsetList.h | 72 + Source/JavaScriptCore/jit/RegisterMap.h | 110 + Source/JavaScriptCore/jit/RegisterSet.cpp | 301 ++- Source/JavaScriptCore/jit/RegisterSet.h | 135 +- Source/JavaScriptCore/jit/Repatch.cpp | 2095 ++++++---------- Source/JavaScriptCore/jit/Repatch.h | 52 +- .../jit/ScratchRegisterAllocator.cpp | 302 +++ .../JavaScriptCore/jit/ScratchRegisterAllocator.h | 198 +- Source/JavaScriptCore/jit/SetupVarargsFrame.cpp | 141 ++ Source/JavaScriptCore/jit/SetupVarargsFrame.h | 43 + Source/JavaScriptCore/jit/SlowPathCall.h | 9 +- Source/JavaScriptCore/jit/SnippetOperand.h | 104 + Source/JavaScriptCore/jit/SpecializedThunkJIT.h | 51 +- Source/JavaScriptCore/jit/SpillRegistersMode.h | 32 + Source/JavaScriptCore/jit/TagRegistersMode.cpp | 50 + Source/JavaScriptCore/jit/TagRegistersMode.h | 42 + Source/JavaScriptCore/jit/TempRegisterSet.cpp | 3 + Source/JavaScriptCore/jit/TempRegisterSet.h | 27 +- Source/JavaScriptCore/jit/ThunkGenerator.h | 8 +- Source/JavaScriptCore/jit/ThunkGenerators.cpp | 930 ++++---- Source/JavaScriptCore/jit/ThunkGenerators.h | 33 +- Source/JavaScriptCore/jit/UnusedPointer.h | 5 +- 129 files changed, 21589 insertions(+), 10611 deletions(-) create mode 100644 Source/JavaScriptCore/jit/BinarySwitch.cpp create mode 100644 Source/JavaScriptCore/jit/BinarySwitch.h create mode 100644 Source/JavaScriptCore/jit/CCallHelpers.cpp create mode 100644 Source/JavaScriptCore/jit/CachedRecovery.cpp create mode 100644 Source/JavaScriptCore/jit/CachedRecovery.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffleData.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffleData.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler.h create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp create mode 100644 Source/JavaScriptCore/jit/CallFrameShuffler64.cpp delete mode 100644 Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp delete mode 100644 Source/JavaScriptCore/jit/ClosureCallStubRoutine.h create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp create mode 100644 Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h delete mode 100644 Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp create mode 100644 Source/JavaScriptCore/jit/GPRInfo.cpp create mode 100644 Source/JavaScriptCore/jit/ICStats.cpp create mode 100644 Source/JavaScriptCore/jit/ICStats.h create mode 100644 Source/JavaScriptCore/jit/IntrinsicEmitter.cpp create mode 100644 Source/JavaScriptCore/jit/JITAddGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITAddGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitAndGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitAndGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitOrGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitOrGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITBitXorGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITBitXorGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITDivGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITDivGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITLeftShiftGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITMathIC.h create mode 100644 Source/JavaScriptCore/jit/JITMathICForwards.h create mode 100644 Source/JavaScriptCore/jit/JITMathICInlineResult.h create mode 100644 Source/JavaScriptCore/jit/JITMulGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITMulGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITNegGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITNegGenerator.h delete mode 100644 Source/JavaScriptCore/jit/JITOperationWrappers.h create mode 100644 Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp create mode 100644 Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITRightShiftGenerator.h delete mode 100644 Source/JavaScriptCore/jit/JITStubs.cpp delete mode 100644 Source/JavaScriptCore/jit/JITStubs.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsARM.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsARMv7.h create mode 100644 Source/JavaScriptCore/jit/JITStubsMSVC64.asm delete mode 100644 Source/JavaScriptCore/jit/JITStubsX86.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsX86Common.h delete mode 100644 Source/JavaScriptCore/jit/JITStubsX86_64.h create mode 100644 Source/JavaScriptCore/jit/JITSubGenerator.cpp create mode 100644 Source/JavaScriptCore/jit/JITSubGenerator.h create mode 100644 Source/JavaScriptCore/jit/JITWorklist.cpp create mode 100644 Source/JavaScriptCore/jit/JITWorklist.h delete mode 100644 Source/JavaScriptCore/jit/JITWriteBarrier.h create mode 100644 Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp create mode 100644 Source/JavaScriptCore/jit/PCToCodeOriginMap.h create mode 100644 Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp create mode 100644 Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h create mode 100644 Source/JavaScriptCore/jit/Reg.cpp create mode 100644 Source/JavaScriptCore/jit/Reg.h create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffset.cpp create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffset.h create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp create mode 100644 Source/JavaScriptCore/jit/RegisterAtOffsetList.h create mode 100644 Source/JavaScriptCore/jit/RegisterMap.h create mode 100644 Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp create mode 100644 Source/JavaScriptCore/jit/SetupVarargsFrame.cpp create mode 100644 Source/JavaScriptCore/jit/SetupVarargsFrame.h create mode 100644 Source/JavaScriptCore/jit/SnippetOperand.h create mode 100644 Source/JavaScriptCore/jit/SpillRegistersMode.h create mode 100644 Source/JavaScriptCore/jit/TagRegistersMode.cpp create mode 100644 Source/JavaScriptCore/jit/TagRegistersMode.h (limited to 'Source/JavaScriptCore/jit') diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp index ddf1d6359..783204792 100644 --- a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp +++ b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,6 +28,10 @@ #if ENABLE(JIT) +#include "JITOperations.h" +#include "JSCInlines.h" +#include "LinkBuffer.h" + namespace JSC { ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin) @@ -35,7 +39,7 @@ ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin) if (!codeOrigin.inlineCallFrame) return m_codeBlock->ownerExecutable(); - return codeOrigin.inlineCallFrame->executable.get(); + return codeOrigin.inlineCallFrame->baselineCodeBlock->ownerExecutable(); } Vector& AssemblyHelpers::decodedCodeMapFor(CodeBlock* codeBlock) @@ -52,6 +56,117 @@ Vector& AssemblyHelpers::decodedCodeMapFor(CodeBlock* return result.iterator->value; } +AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType( + JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode) +{ + AssemblyHelpers::JumpList result; + + switch (descriptor.kind()) { + case InferredType::Bottom: + result.append(jump()); + break; + + case InferredType::Boolean: + result.append(branchIfNotBoolean(regs, tempGPR)); + break; + + case InferredType::Other: + result.append(branchIfNotOther(regs, tempGPR)); + break; + + case InferredType::Int32: + result.append(branchIfNotInt32(regs, mode)); + break; + + case InferredType::Number: + result.append(branchIfNotNumber(regs, tempGPR, mode)); + break; + + case InferredType::String: + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotString(regs.payloadGPR())); + break; + + case InferredType::Symbol: + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotSymbol(regs.payloadGPR())); + break; + + case InferredType::ObjectWithStructure: + result.append(branchIfNotCell(regs, mode)); + result.append( + branchStructure( + NotEqual, + Address(regs.payloadGPR(), JSCell::structureIDOffset()), + descriptor.structure())); + break; + + case InferredType::ObjectWithStructureOrOther: { + Jump ok = branchIfOther(regs, tempGPR); + result.append(branchIfNotCell(regs, mode)); + result.append( + branchStructure( + NotEqual, + Address(regs.payloadGPR(), JSCell::structureIDOffset()), + descriptor.structure())); + ok.link(this); + break; + } + + case InferredType::Object: + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotObject(regs.payloadGPR())); + break; + + case InferredType::ObjectOrOther: { + Jump ok = branchIfOther(regs, tempGPR); + result.append(branchIfNotCell(regs, mode)); + result.append(branchIfNotObject(regs.payloadGPR())); + ok.link(this); + break; + } + + case InferredType::Top: + break; + } + + return result; +} + +AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR) +{ + return branch32( + Equal, + Address(baseGPR, JSArrayBufferView::offsetOfMode()), + TrustedImm32(FastTypedArray)); +} + +AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR) +{ + return branch32( + NotEqual, + Address(baseGPR, JSArrayBufferView::offsetOfMode()), + TrustedImm32(FastTypedArray)); +} + +void AssemblyHelpers::incrementSuperSamplerCount() +{ + add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast(&g_superSamplerCount))); +} + +void AssemblyHelpers::decrementSuperSamplerCount() +{ + sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast(&g_superSamplerCount))); +} + +void AssemblyHelpers::purifyNaN(FPRReg fpr) +{ + MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr); + static const double NaN = PNaN; + loadDouble(TrustedImmPtr(&NaN), fpr); + notNaN.link(this); +} + #if ENABLE(SAMPLING_FLAGS) void AssemblyHelpers::setSamplingFlag(int32_t flag) { @@ -74,7 +189,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) { #if CPU(X86_64) Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast(0xFFFFFFFFu))); - breakpoint(); + abortWithReason(AHIsNotInt32); checkInt32.link(this); #else UNUSED_PARAM(gpr); @@ -84,14 +199,14 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr) { Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister); - breakpoint(); + abortWithReason(AHIsNotJSInt32); checkJSInt32.link(this); } void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) { Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister); - breakpoint(); + abortWithReason(AHIsNotJSNumber); checkJSNumber.link(this); } @@ -100,16 +215,28 @@ void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr) Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister); Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister); checkJSInt32.link(this); - breakpoint(); + abortWithReason(AHIsNotJSDouble); checkJSNumber.link(this); } void AssemblyHelpers::jitAssertIsCell(GPRReg gpr) { Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister); - breakpoint(); + abortWithReason(AHIsNotCell); checkCell.link(this); } + +void AssemblyHelpers::jitAssertTagsInPlace() +{ + Jump ok = branch64(Equal, GPRInfo::tagTypeNumberRegister, TrustedImm64(TagTypeNumber)); + abortWithReason(AHTagTypeNumberNotInPlace); + breakpoint(); + ok.link(this); + + ok = branch64(Equal, GPRInfo::tagMaskRegister, TrustedImm64(TagMask)); + abortWithReason(AHTagMaskNotInPlace); + ok.link(this); +} #elif USE(JSVALUE32_64) void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) { @@ -119,7 +246,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr) void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr) { Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag)); - breakpoint(); + abortWithReason(AHIsNotJSInt32); checkJSInt32.link(this); } @@ -127,7 +254,7 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) { Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag)); Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag)); - breakpoint(); + abortWithReason(AHIsNotJSNumber); checkJSInt32.link(this); checkJSDouble.link(this); } @@ -135,33 +262,510 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr) void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr) { Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag)); - breakpoint(); + abortWithReason(AHIsNotJSDouble); checkJSDouble.link(this); } void AssemblyHelpers::jitAssertIsCell(GPRReg gpr) { Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag)); - breakpoint(); + abortWithReason(AHIsNotCell); checkCell.link(this); } + +void AssemblyHelpers::jitAssertTagsInPlace() +{ +} #endif // USE(JSVALUE32_64) void AssemblyHelpers::jitAssertHasValidCallFrame() { Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7)); - breakpoint(); + abortWithReason(AHCallFrameMisaligned); checkCFR.link(this); } void AssemblyHelpers::jitAssertIsNull(GPRReg gpr) { Jump checkNull = branchTestPtr(Zero, gpr); - breakpoint(); + abortWithReason(AHIsNotNull); checkNull.link(this); } + +void AssemblyHelpers::jitAssertArgumentCountSane() +{ + Jump ok = branch32(Below, payloadFor(CallFrameSlot::argumentCount), TrustedImm32(10000000)); + abortWithReason(AHInsaneArgumentCount); + ok.link(this); +} + #endif // !ASSERT_DISABLED +void AssemblyHelpers::jitReleaseAssertNoException() +{ + Jump noException; +#if USE(JSVALUE64) + noException = branchTest64(Zero, AbsoluteAddress(vm()->addressOfException())); +#elif USE(JSVALUE32_64) + noException = branch32(Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0)); +#endif + abortWithReason(JITUncoughtExceptionAfterCall); + noException.link(this); +} + +void AssemblyHelpers::callExceptionFuzz() +{ + if (!Options::useExceptionFuzz()) + return; + + EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters)); + + for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { +#if USE(JSVALUE64) + store64(GPRInfo::toRegister(i), buffer + i); +#else + store32(GPRInfo::toRegister(i), buffer + i); +#endif + } + for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { + move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); + storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0)); + } + + // Set up one argument. +#if CPU(X86) + poke(GPRInfo::callFrameRegister, 0); +#else + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); +#endif + move(TrustedImmPtr(bitwise_cast(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR); + call(GPRInfo::nonPreservedNonReturnGPR); + + for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) { + move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0); + loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i)); + } + for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) { +#if USE(JSVALUE64) + load64(buffer + i, GPRInfo::toRegister(i)); +#else + load32(buffer + i, GPRInfo::toRegister(i)); +#endif + } +} + +AssemblyHelpers::Jump AssemblyHelpers::emitJumpIfException() +{ + return emitExceptionCheck(NormalExceptionCheck); +} + +AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind, ExceptionJumpWidth width) +{ + callExceptionFuzz(); + + if (width == FarJumpWidth) + kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck); + + Jump result; +#if USE(JSVALUE64) + result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException())); +#elif USE(JSVALUE32_64) + result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0)); +#endif + + if (width == NormalJumpWidth) + return result; + + PatchableJump realJump = patchableJump(); + result.link(this); + + return realJump.m_jump; +} + +AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck() +{ + callExceptionFuzz(); + + Jump result; +#if USE(JSVALUE64) + result = branchTest64(NonZero, AbsoluteAddress(vm()->addressOfException())); +#elif USE(JSVALUE32_64) + result = branch32(NotEqual, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0)); +#endif + + return result; +} + +void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest) +{ + const Structure* structurePtr = static_cast(structure.m_value); +#if USE(JSVALUE64) + jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset())); + if (!ASSERT_DISABLED) { + Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id())); + jit.abortWithReason(AHStructureIDIsValid); + correctStructure.link(&jit); + + Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()), TrustedImm32(structurePtr->indexingTypeIncludingHistory())); + jit.abortWithReason(AHIndexingTypeIsValid); + correctIndexingType.link(&jit); + + Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type())); + jit.abortWithReason(AHTypeInfoIsValid); + correctType.link(&jit); + + Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags())); + jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid); + correctFlags.link(&jit); + } +#else + // Do a 32-bit wide store to initialize the cell's fields. + jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset())); + jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset())); +#endif +} + +void AssemblyHelpers::loadProperty(GPRReg object, GPRReg offset, JSValueRegs result) +{ + Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset)); + + loadPtr(Address(object, JSObject::butterflyOffset()), result.payloadGPR()); + neg32(offset); + signExtend32ToPtr(offset, offset); + Jump ready = jump(); + + isInline.link(this); + addPtr( + TrustedImm32( + static_cast(sizeof(JSObject)) - + (static_cast(firstOutOfLineOffset) - 2) * static_cast(sizeof(EncodedJSValue))), + object, result.payloadGPR()); + + ready.link(this); + + loadValue( + BaseIndex( + result.payloadGPR(), offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), + result); +} + +void AssemblyHelpers::emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch) +{ +#if USE(JSVALUE64) + ASSERT(dest != scratch); + load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest); + loadPtr(vm()->heap.structureIDTable().base(), scratch); + loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest); +#else + UNUSED_PARAM(scratch); + loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest); +#endif +} + +void AssemblyHelpers::makeSpaceOnStackForCCall() +{ + unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall); + if (stackOffset) + subPtr(TrustedImm32(stackOffset), stackPointerRegister); +} + +void AssemblyHelpers::reclaimSpaceOnStackForCCall() +{ + unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall); + if (stackOffset) + addPtr(TrustedImm32(stackOffset), stackPointerRegister); +} + +#if USE(JSVALUE64) +template +void emitRandomThunkImpl(AssemblyHelpers& jit, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result, const LoadFromHigh& loadFromHigh, const StoreToHigh& storeToHigh, const LoadFromLow& loadFromLow, const StoreToLow& storeToLow) +{ + // Inlined WeakRandom::advance(). + // uint64_t x = m_low; + loadFromLow(scratch0); + // uint64_t y = m_high; + loadFromHigh(scratch1); + // m_low = y; + storeToLow(scratch1); + + // x ^= x << 23; + jit.move(scratch0, scratch2); + jit.lshift64(AssemblyHelpers::TrustedImm32(23), scratch2); + jit.xor64(scratch2, scratch0); + + // x ^= x >> 17; + jit.move(scratch0, scratch2); + jit.rshift64(AssemblyHelpers::TrustedImm32(17), scratch2); + jit.xor64(scratch2, scratch0); + + // x ^= y ^ (y >> 26); + jit.move(scratch1, scratch2); + jit.rshift64(AssemblyHelpers::TrustedImm32(26), scratch2); + jit.xor64(scratch1, scratch2); + jit.xor64(scratch2, scratch0); + + // m_high = x; + storeToHigh(scratch0); + + // return x + y; + jit.add64(scratch1, scratch0); + + // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation. + jit.move(AssemblyHelpers::TrustedImm64((1ULL << 53) - 1), scratch1); + jit.and64(scratch1, scratch0); + // Now, scratch0 is always in range of int64_t. Safe to convert it to double with cvtsi2sdq. + jit.convertInt64ToDouble(scratch0, result); + + // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`. + // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)). + static const double scale = 1.0 / (1ULL << 53); + + // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer. + // It just reduces the exp part of the given 53bit double integer. + // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.) + // Now we get 53bit precision random double value in [0, 1). + jit.move(AssemblyHelpers::TrustedImmPtr(&scale), scratch1); + jit.mulDouble(AssemblyHelpers::Address(scratch1), result); +} + +void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result) +{ + void* lowAddress = reinterpret_cast(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset(); + void* highAddress = reinterpret_cast(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset(); + + auto loadFromHigh = [&](GPRReg high) { + load64(highAddress, high); + }; + auto storeToHigh = [&](GPRReg high) { + store64(high, highAddress); + }; + auto loadFromLow = [&](GPRReg low) { + load64(lowAddress, low); + }; + auto storeToLow = [&](GPRReg low) { + store64(low, lowAddress); + }; + + emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow); +} + +void AssemblyHelpers::emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result) +{ + emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, scratch3); + emitLoadStructure(scratch3, scratch3, scratch0); + loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3); + // Now, scratch3 holds JSGlobalObject*. + + auto loadFromHigh = [&](GPRReg high) { + load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()), high); + }; + auto storeToHigh = [&](GPRReg high) { + store64(high, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset())); + }; + auto loadFromLow = [&](GPRReg low) { + load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()), low); + }; + auto storeToLow = [&](GPRReg low) { + store64(low, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset())); + }; + + emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow); +} +#endif + +void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer() +{ +#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 + RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets(); + RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters(); + unsigned registerCount = allCalleeSaves->size(); + + GPRReg scratch = InvalidGPRReg; + unsigned scratchGPREntryIndex = 0; + + // Use the first GPR entry's register as our scratch. + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = allCalleeSaves->at(i); + if (dontRestoreRegisters.get(entry.reg())) + continue; + if (entry.reg().isGPR()) { + scratchGPREntryIndex = i; + scratch = entry.reg().gpr(); + break; + } + } + ASSERT(scratch != InvalidGPRReg); + + loadPtr(&m_vm->topVMEntryFrame, scratch); + addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), scratch); + + // Restore all callee saves except for the scratch. + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = allCalleeSaves->at(i); + if (dontRestoreRegisters.get(entry.reg())) + continue; + if (entry.reg().isGPR()) { + if (i != scratchGPREntryIndex) + loadPtr(Address(scratch, entry.offset()), entry.reg().gpr()); + } else + loadDouble(Address(scratch, entry.offset()), entry.reg().fpr()); + } + + // Restore the callee save value of the scratch. + RegisterAtOffset entry = allCalleeSaves->at(scratchGPREntryIndex); + ASSERT(!dontRestoreRegisters.get(entry.reg())); + ASSERT(entry.reg().isGPR()); + ASSERT(scratch == entry.reg().gpr()); + loadPtr(Address(scratch, entry.offset()), scratch); +#endif +} + +void AssemblyHelpers::emitDumbVirtualCall(CallLinkInfo* info) +{ + move(TrustedImmPtr(info), GPRInfo::regT2); + Call call = nearCall(); + addLinkTask( + [=] (LinkBuffer& linkBuffer) { + MacroAssemblerCodeRef virtualThunk = virtualThunkFor(&linkBuffer.vm(), *info); + info->setSlowStub(createJITStubRoutine(virtualThunk, linkBuffer.vm(), nullptr, true)); + linkBuffer.link(call, CodeLocationLabel(virtualThunk.code())); + }); +} + +#if USE(JSVALUE64) +void AssemblyHelpers::wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch) +{ + GPRReg input = inputAndResult; + // key += ~(key << 32); + move(input, scratch); + lshift64(TrustedImm32(32), scratch); + not64(scratch); + add64(scratch, input); + // key ^= (key >> 22); + move(input, scratch); + urshift64(TrustedImm32(22), scratch); + xor64(scratch, input); + // key += ~(key << 13); + move(input, scratch); + lshift64(TrustedImm32(13), scratch); + not64(scratch); + add64(scratch, input); + // key ^= (key >> 8); + move(input, scratch); + urshift64(TrustedImm32(8), scratch); + xor64(scratch, input); + // key += (key << 3); + move(input, scratch); + lshift64(TrustedImm32(3), scratch); + add64(scratch, input); + // key ^= (key >> 15); + move(input, scratch); + urshift64(TrustedImm32(15), scratch); + xor64(scratch, input); + // key += ~(key << 27); + move(input, scratch); + lshift64(TrustedImm32(27), scratch); + not64(scratch); + add64(scratch, input); + // key ^= (key >> 31); + move(input, scratch); + urshift64(TrustedImm32(31), scratch); + xor64(scratch, input); + + // return static_cast(result) + void* mask = bitwise_cast(static_cast(UINT_MAX)); + and64(TrustedImmPtr(mask), inputAndResult); +} +#endif // USE(JSVALUE64) + +void AssemblyHelpers::emitConvertValueToBoolean(JSValueRegs value, GPRReg result, GPRReg scratch, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject, bool negateResult) +{ + // Implements the following control flow structure: + // if (value is boolean) { + // result = value === true + // } else if (value is integer) { + // result = value !== 0 + // } else if (value is double) { + // result = value !== 0.0 && !isNaN(value); + // } else if (value is cell) { + // if (value is string) { + // result = value.length() !== 0; + // } else { + // do crazy things for masquerades as undefined + // } + // } else { + // result = false; + // } + // + // if (negateResult) + // result = !result; + + JumpList done; + auto notBoolean = branchIfNotBoolean(value, result); +#if USE(JSVALUE64) + compare32(negateResult ? NotEqual : Equal, value.gpr(), TrustedImm32(ValueTrue), result); +#else + compare32(negateResult ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result); +#endif + done.append(jump()); + + notBoolean.link(this); +#if USE(JSVALUE64) + auto isNotNumber = branchIfNotNumber(value.gpr()); +#else + ASSERT(scratch != InvalidGPRReg); + auto isNotNumber = branchIfNotNumber(value, scratch); +#endif + auto isDouble = branchIfNotInt32(value); + + // It's an int32. + compare32(negateResult ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result); + done.append(jump()); + + isDouble.link(this); +#if USE(JSVALUE64) + unboxDouble(value.gpr(), result, valueAsFPR); +#else + unboxDouble(value, valueAsFPR, tempFPR); +#endif + auto isZeroOrNaN = branchDoubleZeroOrNaN(valueAsFPR, tempFPR); + move(negateResult ? TrustedImm32(0) : TrustedImm32(1), result); + done.append(jump()); + isZeroOrNaN.link(this); + move(negateResult ? TrustedImm32(1) : TrustedImm32(0), result); + done.append(jump()); + + isNotNumber.link(this); + auto isNotCellAndIsNotNumberAndIsNotBoolean = branchIfNotCell(value); + auto isCellButNotString = branch8(NotEqual, + Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)); + load32(Address(value.payloadGPR(), JSString::offsetOfLength()), result); + compare32(negateResult ? Equal : NotEqual, result, TrustedImm32(0), result); + done.append(jump()); + + isCellButNotString.link(this); + if (shouldCheckMasqueradesAsUndefined) { + ASSERT(scratch != InvalidGPRReg); + JumpList isNotMasqueradesAsUndefined; + isNotMasqueradesAsUndefined.append(branchTest8(Zero, Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined))); + emitLoadStructure(value.payloadGPR(), result, scratch); + move(TrustedImmPtr(globalObject), scratch); + isNotMasqueradesAsUndefined.append(branchPtr(NotEqual, Address(result, Structure::globalObjectOffset()), scratch)); + // We act like we are "undefined" here. + move(negateResult ? TrustedImm32(1) : TrustedImm32(0), result); + done.append(jump()); + isNotMasqueradesAsUndefined.link(this); + } + move(negateResult ? TrustedImm32(0) : TrustedImm32(1), result); + done.append(jump()); + + // null or undefined. + isNotCellAndIsNotNumberAndIsNotBoolean.link(this); + move(negateResult ? TrustedImm32(1) : TrustedImm32(0), result); + + done.link(this); +} + } // namespace JSC #endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.h b/Source/JavaScriptCore/jit/AssemblyHelpers.h index 36d583139..49c7f9c87 100644 --- a/Source/JavaScriptCore/jit/AssemblyHelpers.h +++ b/Source/JavaScriptCore/jit/AssemblyHelpers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011-2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,18 +23,23 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef AssemblyHelpers_h -#define AssemblyHelpers_h - -#include +#pragma once #if ENABLE(JIT) #include "CodeBlock.h" #include "FPRInfo.h" #include "GPRInfo.h" +#include "Heap.h" +#include "InlineCallFrame.h" #include "JITCode.h" #include "MacroAssembler.h" +#include "MarkedSpace.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "RegisterAtOffsetList.h" +#include "RegisterSet.h" +#include "SuperSampler.h" +#include "TypeofType.h" #include "VM.h" namespace JSC { @@ -54,12 +59,395 @@ public: ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType())); } } - + CodeBlock* codeBlock() { return m_codeBlock; } VM* vm() { return m_vm; } AssemblerType_T& assembler() { return m_assembler; } + + void checkStackPointerAlignment() + { + // This check is both unneeded and harder to write correctly for ARM64 +#if !defined(NDEBUG) && !CPU(ARM64) + Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf)); + abortWithReason(AHStackPointerMisaligned); + stackPointerAligned.link(this); +#endif + } + + template + void storeCell(T cell, Address address) + { +#if USE(JSVALUE64) + store64(cell, address); +#else + store32(cell, address.withOffset(PayloadOffset)); + store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset)); +#endif + } + + void loadCell(Address address, GPRReg gpr) + { +#if USE(JSVALUE64) + load64(address, gpr); +#else + load32(address.withOffset(PayloadOffset), gpr); +#endif + } + + void storeValue(JSValueRegs regs, Address address) + { +#if USE(JSVALUE64) + store64(regs.gpr(), address); +#else + store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); + store32(regs.tagGPR(), address.withOffset(TagOffset)); +#endif + } + + void storeValue(JSValueRegs regs, BaseIndex address) + { +#if USE(JSVALUE64) + store64(regs.gpr(), address); +#else + store32(regs.payloadGPR(), address.withOffset(PayloadOffset)); + store32(regs.tagGPR(), address.withOffset(TagOffset)); +#endif + } + + void storeValue(JSValueRegs regs, void* address) + { +#if USE(JSVALUE64) + store64(regs.gpr(), address); +#else + store32(regs.payloadGPR(), bitwise_cast(bitwise_cast(address) + PayloadOffset)); + store32(regs.tagGPR(), bitwise_cast(bitwise_cast(address) + TagOffset)); +#endif + } + + void loadValue(Address address, JSValueRegs regs) + { +#if USE(JSVALUE64) + load64(address, regs.gpr()); +#else + if (address.base == regs.payloadGPR()) { + load32(address.withOffset(TagOffset), regs.tagGPR()); + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + } else { + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + load32(address.withOffset(TagOffset), regs.tagGPR()); + } +#endif + } + + void loadValue(BaseIndex address, JSValueRegs regs) + { +#if USE(JSVALUE64) + load64(address, regs.gpr()); +#else + if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) { + // We actually could handle the case where the registers are aliased to both + // tag and payload, but we don't for now. + RELEASE_ASSERT(address.base != regs.tagGPR()); + RELEASE_ASSERT(address.index != regs.tagGPR()); + + load32(address.withOffset(TagOffset), regs.tagGPR()); + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + } else { + load32(address.withOffset(PayloadOffset), regs.payloadGPR()); + load32(address.withOffset(TagOffset), regs.tagGPR()); + } +#endif + } + + void loadValue(void* address, JSValueRegs regs) + { +#if USE(JSVALUE64) + load64(address, regs.gpr()); +#else + load32(bitwise_cast(bitwise_cast(address) + PayloadOffset), regs.payloadGPR()); + load32(bitwise_cast(bitwise_cast(address) + TagOffset), regs.tagGPR()); +#endif + } + // Note that this clobbers offset. + void loadProperty(GPRReg object, GPRReg offset, JSValueRegs result); + + void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs) + { +#if USE(JSVALUE32_64) + if (destRegs.tagGPR() == srcRegs.payloadGPR()) { + if (destRegs.payloadGPR() == srcRegs.tagGPR()) { + swap(srcRegs.payloadGPR(), srcRegs.tagGPR()); + return; + } + move(srcRegs.payloadGPR(), destRegs.payloadGPR()); + move(srcRegs.tagGPR(), destRegs.tagGPR()); + return; + } + move(srcRegs.tagGPR(), destRegs.tagGPR()); + move(srcRegs.payloadGPR(), destRegs.payloadGPR()); +#else + move(srcRegs.gpr(), destRegs.gpr()); +#endif + } + + void moveValue(JSValue value, JSValueRegs regs) + { +#if USE(JSVALUE64) + move(Imm64(JSValue::encode(value)), regs.gpr()); +#else + move(Imm32(value.tag()), regs.tagGPR()); + move(Imm32(value.payload()), regs.payloadGPR()); +#endif + } + + void moveTrustedValue(JSValue value, JSValueRegs regs) + { +#if USE(JSVALUE64) + move(TrustedImm64(JSValue::encode(value)), regs.gpr()); +#else + move(TrustedImm32(value.tag()), regs.tagGPR()); + move(TrustedImm32(value.payload()), regs.payloadGPR()); +#endif + } + + void storeTrustedValue(JSValue value, Address address) + { +#if USE(JSVALUE64) + store64(TrustedImm64(JSValue::encode(value)), address); +#else + store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); + store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); +#endif + } + + void storeTrustedValue(JSValue value, BaseIndex address) + { +#if USE(JSVALUE64) + store64(TrustedImm64(JSValue::encode(value)), address); +#else + store32(TrustedImm32(value.tag()), address.withOffset(TagOffset)); + store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset)); +#endif + } + + void emitSaveCalleeSavesFor(CodeBlock* codeBlock) + { + ASSERT(codeBlock); + + RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); + RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); + unsigned registerCount = calleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = calleeSaves->at(i); + if (dontSaveRegisters.get(entry.reg())) + continue; + storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset())); + } + } + + enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame }; + + void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp) + { + ASSERT(codeBlock); + + RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); + RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); + unsigned registerCount = calleeSaves->size(); + +#if USE(JSVALUE64) + RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters(); +#endif + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = calleeSaves->at(i); + if (dontSaveRegisters.get(entry.reg())) + continue; + + GPRReg registerToWrite; + +#if USE(JSVALUE32_64) + UNUSED_PARAM(tagRegisterMode); + UNUSED_PARAM(temp); +#else + if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) { + registerToWrite = temp; + loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite); + } else +#endif + registerToWrite = entry.reg().gpr(); + + storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset())); + } + } + + void emitRestoreCalleeSavesFor(CodeBlock* codeBlock) + { + ASSERT(codeBlock); + + RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters(); + RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs()); + unsigned registerCount = calleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = calleeSaves->at(i); + if (dontRestoreRegisters.get(entry.reg())) + continue; + loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr()); + } + } + + void emitSaveCalleeSaves() + { + emitSaveCalleeSavesFor(codeBlock()); + } + + void emitSaveThenMaterializeTagRegisters() + { +#if USE(JSVALUE64) +#if CPU(ARM64) + pushPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister); +#else + push(GPRInfo::tagTypeNumberRegister); + push(GPRInfo::tagMaskRegister); +#endif + emitMaterializeTagCheckRegisters(); +#endif + } + + void emitRestoreCalleeSaves() + { + emitRestoreCalleeSavesFor(codeBlock()); + } + + void emitRestoreSavedTagRegisters() + { +#if USE(JSVALUE64) +#if CPU(ARM64) + popPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister); +#else + pop(GPRInfo::tagMaskRegister); + pop(GPRInfo::tagTypeNumberRegister); +#endif +#endif + } + + void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() }) + { +#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 + GPRReg temp1 = usedRegisters.getFreeGPR(0); + + loadPtr(&m_vm->topVMEntryFrame, temp1); + addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1); + + RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets(); + RegisterSet dontCopyRegisters = RegisterSet::stackRegisters(); + unsigned registerCount = allCalleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset entry = allCalleeSaves->at(i); + if (dontCopyRegisters.get(entry.reg())) + continue; + if (entry.reg().isGPR()) + storePtr(entry.reg().gpr(), Address(temp1, entry.offset())); + else + storeDouble(entry.reg().fpr(), Address(temp1, entry.offset())); + } +#else + UNUSED_PARAM(usedRegisters); +#endif + } + + void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer(); + + void copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() }) + { +#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0 + GPRReg temp1 = usedRegisters.getFreeGPR(0); + GPRReg temp2 = usedRegisters.getFreeGPR(1); + FPRReg fpTemp = usedRegisters.getFreeFPR(); + ASSERT(temp2 != InvalidGPRReg); + + ASSERT(codeBlock()); + + // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer + loadPtr(&m_vm->topVMEntryFrame, temp1); + addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1); + + RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets(); + RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters(); + RegisterSet dontCopyRegisters = RegisterSet::stackRegisters(); + unsigned registerCount = allCalleeSaves->size(); + + for (unsigned i = 0; i < registerCount; i++) { + RegisterAtOffset vmEntry = allCalleeSaves->at(i); + if (dontCopyRegisters.get(vmEntry.reg())) + continue; + RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(vmEntry.reg()); + + if (vmEntry.reg().isGPR()) { + GPRReg regToStore; + if (currentFrameEntry) { + // Load calleeSave from stack into temp register + regToStore = temp2; + loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore); + } else + // Just store callee save directly + regToStore = vmEntry.reg().gpr(); + + storePtr(regToStore, Address(temp1, vmEntry.offset())); + } else { + FPRReg fpRegToStore; + if (currentFrameEntry) { + // Load calleeSave from stack into temp register + fpRegToStore = fpTemp; + loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore); + } else + // Just store callee save directly + fpRegToStore = vmEntry.reg().fpr(); + + storeDouble(fpRegToStore, Address(temp1, vmEntry.offset())); + } + } +#else + UNUSED_PARAM(usedRegisters); +#endif + } + + void emitMaterializeTagCheckRegisters() + { +#if USE(JSVALUE64) + move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister); + orPtr(MacroAssembler::TrustedImm32(TagBitTypeOther), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister); +#endif + } + #if CPU(X86_64) || CPU(X86) + static size_t prologueStackPointerDelta() + { + // Prologue only saves the framePointerRegister + return sizeof(void*); + } + + void emitFunctionPrologue() + { + push(framePointerRegister); + move(stackPointerRegister, framePointerRegister); + } + + void emitFunctionEpilogueWithEmptyFrame() + { + pop(framePointerRegister); + } + + void emitFunctionEpilogue() + { + move(framePointerRegister, stackPointerRegister); + pop(framePointerRegister); + } + void preserveReturnAddressAfterCall(GPRReg reg) { pop(reg); @@ -77,6 +465,29 @@ public: #endif // CPU(X86_64) || CPU(X86) #if CPU(ARM) || CPU(ARM64) + static size_t prologueStackPointerDelta() + { + // Prologue saves the framePointerRegister and linkRegister + return 2 * sizeof(void*); + } + + void emitFunctionPrologue() + { + pushPair(framePointerRegister, linkRegister); + move(stackPointerRegister, framePointerRegister); + } + + void emitFunctionEpilogueWithEmptyFrame() + { + popPair(framePointerRegister, linkRegister); + } + + void emitFunctionEpilogue() + { + move(framePointerRegister, stackPointerRegister); + emitFunctionEpilogueWithEmptyFrame(); + } + ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) { move(linkRegister, reg); @@ -94,6 +505,29 @@ public: #endif #if CPU(MIPS) + static size_t prologueStackPointerDelta() + { + // Prologue saves the framePointerRegister and returnAddressRegister + return 2 * sizeof(void*); + } + + void emitFunctionPrologue() + { + pushPair(framePointerRegister, returnAddressRegister); + move(stackPointerRegister, framePointerRegister); + } + + void emitFunctionEpilogueWithEmptyFrame() + { + popPair(framePointerRegister, returnAddressRegister); + } + + void emitFunctionEpilogue() + { + move(framePointerRegister, stackPointerRegister); + emitFunctionEpilogueWithEmptyFrame(); + } + ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) { move(returnAddressRegister, reg); @@ -104,74 +538,339 @@ public: move(reg, returnAddressRegister); } - ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) + ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) + { + loadPtr(address, returnAddressRegister); + } +#endif + + void emitGetFromCallFrameHeaderPtr(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) + { + loadPtr(Address(from, entry * sizeof(Register)), to); + } + void emitGetFromCallFrameHeader32(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) + { + load32(Address(from, entry * sizeof(Register)), to); + } +#if USE(JSVALUE64) + void emitGetFromCallFrameHeader64(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister) + { + load64(Address(from, entry * sizeof(Register)), to); + } +#endif // USE(JSVALUE64) + void emitPutToCallFrameHeader(GPRReg from, int entry) + { + storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); + } + + void emitPutToCallFrameHeader(void* value, int entry) + { + storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); + } + + void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to) + { + loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to); + } + void emitPutCallerFrameToCallFrameHeader(RegisterID from) + { + storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset())); + } + + void emitPutReturnPCToCallFrameHeader(RegisterID from) + { + storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); + } + void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from) + { + storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); + } + + // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header + // fields before the code from emitFunctionPrologue() has executed. + // First, the access is via the stack pointer. Second, the address calculation must also take + // into account that the stack pointer may not have been adjusted down for the return PC and/or + // caller's frame pointer. On some platforms, the callee is responsible for pushing the + // "link register" containing the return address in the function prologue. +#if USE(JSVALUE64) + void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, int entry) + { + storePtr(from, Address(stackPointerRegister, entry * static_cast(sizeof(Register)) - prologueStackPointerDelta())); + } +#else + void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, int entry) + { + storePtr(from, Address(stackPointerRegister, entry * static_cast(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + } + + void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, int entry) + { + storePtr(tag, Address(stackPointerRegister, entry * static_cast(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + } +#endif + + JumpList branchIfNotEqual(JSValueRegs regs, JSValue value) + { +#if USE(JSVALUE64) + return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value))); +#else + JumpList result; + result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag()))); + if (value.isEmpty() || value.isUndefinedOrNull()) + return result; // These don't have anything interesting in the payload. + result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()))); + return result; +#endif + } + + Jump branchIfEqual(JSValueRegs regs, JSValue value) + { +#if USE(JSVALUE64) + return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value))); +#else + Jump notEqual; + // These don't have anything interesting in the payload. + if (!value.isEmpty() && !value.isUndefinedOrNull()) + notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())); + Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag())); + if (notEqual.isSet()) + notEqual.link(this); + return result; +#endif + } + + Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == HaveTagRegisters) + return branchTest64(NonZero, reg, GPRInfo::tagMaskRegister); + return branchTest64(NonZero, reg, TrustedImm64(TagMask)); +#else + UNUSED_PARAM(mode); + return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag)); +#endif + } + Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + return branchIfNotCell(regs.gpr(), mode); +#else + return branchIfNotCell(regs.tagGPR(), mode); +#endif + } + + Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == HaveTagRegisters) + return branchTest64(Zero, reg, GPRInfo::tagMaskRegister); + return branchTest64(Zero, reg, TrustedImm64(TagMask)); +#else + UNUSED_PARAM(mode); + return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag)); +#endif + } + Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + return branchIfCell(regs.gpr(), mode); +#else + return branchIfCell(regs.tagGPR(), mode); +#endif + } + + Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR) + { +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + and64(TrustedImm32(~TagBitUndefined), tempGPR); + return branch64(Equal, tempGPR, TrustedImm64(ValueNull)); +#else + or32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag)); +#endif + } + + Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR) + { +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + and64(TrustedImm32(~TagBitUndefined), tempGPR); + return branch64(NotEqual, tempGPR, TrustedImm64(ValueNull)); +#else + or32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag)); +#endif + } + + Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == HaveTagRegisters) + return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister); + return branch64(AboveOrEqual, regs.gpr(), TrustedImm64(TagTypeNumber)); +#else + UNUSED_PARAM(mode); + return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag)); +#endif + } + +#if USE(JSVALUE64) + Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters) { - loadPtr(address, returnAddressRegister); + if (mode == HaveTagRegisters) + return branch64(Below, gpr, GPRInfo::tagTypeNumberRegister); + return branch64(Below, gpr, TrustedImm64(TagTypeNumber)); } #endif -#if CPU(SH4) - ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg) + Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) { - m_assembler.stspr(reg); +#if USE(JSVALUE64) + return branchIfNotInt32(regs.gpr(), mode); +#else + UNUSED_PARAM(mode); + return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag)); +#endif } - ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg) + // Note that the tempGPR is not used in 64-bit mode. + Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters) { - m_assembler.ldspr(reg); +#if USE(JSVALUE64) + UNUSED_PARAM(tempGPR); + return branchIfNumber(regs.gpr(), mode); +#else + UNUSED_PARAM(mode); + add32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); +#endif } - ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address) +#if USE(JSVALUE64) + Jump branchIfNumber(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) { - loadPtrLinkReg(address); + if (mode == HaveTagRegisters) + return branchTest64(NonZero, reg, GPRInfo::tagTypeNumberRegister); + return branchTest64(NonZero, reg, TrustedImm64(TagTypeNumber)); } #endif - - void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to) + + // Note that the tempGPR is not used in 64-bit mode. + Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters) { - loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to); +#if USE(JSVALUE64) + UNUSED_PARAM(tempGPR); + return branchIfNotNumber(regs.gpr(), mode); +#else + UNUSED_PARAM(mode); + add32(TrustedImm32(1), regs.tagGPR(), tempGPR); + return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1)); +#endif } - void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry) + +#if USE(JSVALUE64) + Jump branchIfNotNumber(GPRReg reg, TagRegistersMode mode = HaveTagRegisters) { - storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); + if (mode == HaveTagRegisters) + return branchTest64(Zero, reg, GPRInfo::tagTypeNumberRegister); + return branchTest64(Zero, reg, TrustedImm64(TagTypeNumber)); } +#endif - void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry) + Jump branchIfNotDoubleKnownNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters) { - storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register))); +#if USE(JSVALUE64) + if (mode == HaveTagRegisters) + return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister); + return branchTest64(Zero, regs.gpr(), TrustedImm64(TagTypeNumber)); +#else + UNUSED_PARAM(mode); + return branch32(AboveOrEqual, regs.tagGPR(), TrustedImm32(JSValue::LowestTag)); +#endif } - void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to) + // Note that the tempGPR is not used in 32-bit mode. + Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR) { - loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to); +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + xor64(TrustedImm32(static_cast(ValueFalse)), tempGPR); + return branchTest64(Zero, tempGPR, TrustedImm32(static_cast(~1))); +#else + UNUSED_PARAM(tempGPR); + return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag)); +#endif } - void emitPutCallerFrameToCallFrameHeader(RegisterID from) + + // Note that the tempGPR is not used in 32-bit mode. + Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR) { - storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset())); +#if USE(JSVALUE64) + move(regs.gpr(), tempGPR); + xor64(TrustedImm32(static_cast(ValueFalse)), tempGPR); + return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast(~1))); +#else + UNUSED_PARAM(tempGPR); + return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag)); +#endif } - - void emitGetReturnPCFromCallFrameHeaderPtr(RegisterID to) + + Jump branchIfObject(GPRReg cellGPR) { - loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), to); + return branch8( + AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); } - void emitPutReturnPCToCallFrameHeader(RegisterID from) + + Jump branchIfNotObject(GPRReg cellGPR) { - storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); + return branch8( + Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType)); } - void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from) + + Jump branchIfType(GPRReg cellGPR, JSType type) { - storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset())); + return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); + } + + Jump branchIfNotType(GPRReg cellGPR, JSType type) + { + return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type)); } + + Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); } + Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); } + Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); } + Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); } + Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); } + Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); } + + Jump branchIfEmpty(JSValueRegs regs) + { +#if USE(JSVALUE64) + return branchTest64(Zero, regs.gpr()); +#else + return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::EmptyValueTag)); +#endif + } + + JumpList branchIfNotType( + JSValueRegs, GPRReg tempGPR, const InferredType::Descriptor&, + TagRegistersMode = HaveTagRegisters); - Jump branchIfNotCell(GPRReg reg) + template + Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure) { #if USE(JSVALUE64) - return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister); + return branch32(condition, leftHandSide, TrustedImm32(structure->id())); #else - return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag)); + return branchPtr(condition, leftHandSide, TrustedImmPtr(structure)); #endif } - + + Jump branchIfFastTypedArray(GPRReg baseGPR); + Jump branchIfNotFastTypedArray(GPRReg baseGPR); + static Address addressForByteOffset(ptrdiff_t byteOffset) { return Address(GPRInfo::callFrameRegister, byteOffset); @@ -183,6 +882,10 @@ public: } static Address addressFor(VirtualRegister virtualRegister) { + // NB. It's tempting on some architectures to sometimes use an offset from the stack + // register because for some offsets that will encode to a smaller instruction. But we + // cannot do this. We use this in places where the stack pointer has been moved to some + // unpredictable location. ASSERT(virtualRegister.isValid()); return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register)); } @@ -194,7 +897,7 @@ public: static Address tagFor(VirtualRegister virtualRegister) { ASSERT(virtualRegister.isValid()); - return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); + return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset); } static Address tagFor(int operand) { @@ -204,33 +907,69 @@ public: static Address payloadFor(VirtualRegister virtualRegister) { ASSERT(virtualRegister.isValid()); - return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); + return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset); } static Address payloadFor(int operand) { return payloadFor(static_cast(operand)); } - Jump branchIfNotObject(GPRReg structureReg) + // Access to our fixed callee CallFrame. + static Address calleeFrameSlot(int slot) + { + ASSERT(slot >= CallerFrameAndPC::sizeInRegisters); + return Address(stackPointerRegister, sizeof(Register) * (slot - CallerFrameAndPC::sizeInRegisters)); + } + + // Access to our fixed callee CallFrame. + static Address calleeArgumentSlot(int argument) + { + return calleeFrameSlot(virtualRegisterForArgument(argument).offset()); + } + + static Address calleeFrameTagSlot(int slot) + { + return calleeFrameSlot(slot).withOffset(TagOffset); + } + + static Address calleeFramePayloadSlot(int slot) + { + return calleeFrameSlot(slot).withOffset(PayloadOffset); + } + + static Address calleeArgumentTagSlot(int argument) { - return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType)); + return calleeArgumentSlot(argument).withOffset(TagOffset); } - static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg) + static Address calleeArgumentPayloadSlot(int argument) { - if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0) + return calleeArgumentSlot(argument).withOffset(PayloadOffset); + } + + static Address calleeFrameCallerFrame() + { + return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset()); + } + + static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg, GPRReg preserve5 = InvalidGPRReg) + { + if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0 && preserve5 != GPRInfo::regT0) return GPRInfo::regT0; - if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1) + if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1 && preserve5 != GPRInfo::regT1) return GPRInfo::regT1; - if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2) + if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2 && preserve5 != GPRInfo::regT2) return GPRInfo::regT2; - if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3) + if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3 && preserve5 != GPRInfo::regT3) return GPRInfo::regT3; - return GPRInfo::regT4; + if (preserve1 != GPRInfo::regT4 && preserve2 != GPRInfo::regT4 && preserve3 != GPRInfo::regT4 && preserve4 != GPRInfo::regT4 && preserve5 != GPRInfo::regT4) + return GPRInfo::regT4; + + return GPRInfo::regT5; } // Add a debug call. This call has no effect on JIT code execution state. @@ -257,7 +996,7 @@ public: move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0); storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0); -#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4) +#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2); move(TrustedImmPtr(argument), GPRInfo::argumentGPR1); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); @@ -298,6 +1037,8 @@ public: void jitAssertIsCell(GPRReg); void jitAssertHasValidCallFrame(); void jitAssertIsNull(GPRReg); + void jitAssertTagsInPlace(); + void jitAssertArgumentCountSane(); #else void jitAssertIsInt32(GPRReg) { } void jitAssertIsJSInt32(GPRReg) { } @@ -306,8 +1047,17 @@ public: void jitAssertIsCell(GPRReg) { } void jitAssertHasValidCallFrame() { } void jitAssertIsNull(GPRReg) { } + void jitAssertTagsInPlace() { } + void jitAssertArgumentCountSane() { } #endif + void jitReleaseAssertNoException(); + + void incrementSuperSamplerCount(); + void decrementSuperSamplerCount(); + + void purifyNaN(FPRReg); + // These methods convert between doubles, and doubles boxed and JSValues. #if USE(JSVALUE64) GPRReg boxDouble(FPRReg fpr, GPRReg gpr) @@ -317,14 +1067,28 @@ public: jitAssertIsJSDouble(gpr); return gpr; } - FPRReg unboxDouble(GPRReg gpr, FPRReg fpr) + FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) { - jitAssertIsJSDouble(gpr); - add64(GPRInfo::tagTypeNumberRegister, gpr); - move64ToDouble(gpr, fpr); + add64(GPRInfo::tagTypeNumberRegister, gpr, resultGPR); + move64ToDouble(resultGPR, fpr); return fpr; } + FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr) + { + jitAssertIsJSDouble(gpr); + return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr); + } + void boxDouble(FPRReg fpr, JSValueRegs regs) + { + boxDouble(fpr, regs.gpr()); + } + + void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg) + { + unboxDouble(regs.payloadGPR(), resultGPR, destFPR); + } + // Here are possible arrangements of source, target, scratch: // - source, target, scratch can all be separate registers. // - source and target can be the same but scratch is separate. @@ -357,17 +1121,81 @@ public: { moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR); } + + void boxDouble(FPRReg fpr, JSValueRegs regs) + { + boxDouble(fpr, regs.tagGPR(), regs.payloadGPR()); + } + void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR) + { + unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR); + } + + void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR) + { + unboxDouble(regs, destFPR, scratchFPR); + } #endif - enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck }; - Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck) + void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR) + { +#if USE(JSVALUE64) + add32(TrustedImm32(ValueFalse), boolGPR, payloadGPR); +#else + move(boolGPR, payloadGPR); +#endif + } + + void boxBooleanPayload(bool value, GPRReg payloadGPR) + { +#if USE(JSVALUE64) + move(TrustedImm32(ValueFalse + value), payloadGPR); +#else + move(TrustedImm32(value), payloadGPR); +#endif + } + + void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs) + { + boxBooleanPayload(boolGPR, boxedRegs.payloadGPR()); +#if USE(JSVALUE32_64) + move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR()); +#endif + } + + void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters) + { +#if USE(JSVALUE64) + if (mode == DoNotHaveTagRegisters) { + move(intGPR, boxedRegs.gpr()); + or64(TrustedImm64(TagTypeNumber), boxedRegs.gpr()); + } else + or64(GPRInfo::tagTypeNumberRegister, intGPR, boxedRegs.gpr()); +#else + UNUSED_PARAM(mode); + move(intGPR, boxedRegs.payloadGPR()); + move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR()); +#endif + } + + void boxCell(GPRReg cellGPR, JSValueRegs boxedRegs) { #if USE(JSVALUE64) - return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException())); -#elif USE(JSVALUE32_64) - return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast(vm()->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag)); + move(cellGPR, boxedRegs.gpr()); +#else + move(cellGPR, boxedRegs.payloadGPR()); + move(TrustedImm32(JSValue::CellTag), boxedRegs.tagGPR()); #endif } + + void callExceptionFuzz(); + + enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck }; + enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth }; + JS_EXPORT_PRIVATE Jump emitExceptionCheck( + ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth); + JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck(); + Jump emitJumpIfException(); #if ENABLE(SAMPLING_COUNTERS) static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1) @@ -394,7 +1222,7 @@ public: { if (!codeOrigin.inlineCallFrame) return codeBlock()->isStrictMode(); - return jsCast(codeOrigin.inlineCallFrame->executable.get())->isStrictMode(); + return codeOrigin.inlineCallFrame->isStrictMode(); } ECMAMode ecmaModeFor(CodeOrigin codeOrigin) @@ -421,64 +1249,391 @@ public: return m_baselineCodeBlock; } - VirtualRegister baselineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame) + static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame) { if (!inlineCallFrame) - return baselineCodeBlock()->argumentsRegister(); - - return VirtualRegister(baselineCodeBlockForInlineCallFrame( - inlineCallFrame)->argumentsRegister().offset() + inlineCallFrame->stackOffset); + return VirtualRegister(CallFrame::argumentOffset(0)); + if (inlineCallFrame->arguments.size() <= 1) + return virtualRegisterForLocal(0); + ValueRecovery recovery = inlineCallFrame->arguments[1]; + RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack); + return recovery.virtualRegister(); } - VirtualRegister baselineArgumentsRegisterFor(const CodeOrigin& codeOrigin) + static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin) + { + return argumentsStart(codeOrigin.inlineCallFrame); + } + + static VirtualRegister argumentCount(InlineCallFrame* inlineCallFrame) + { + ASSERT(!inlineCallFrame || inlineCallFrame->isVarargs()); + if (!inlineCallFrame) + return VirtualRegister(CallFrameSlot::argumentCount); + return inlineCallFrame->argumentCountRegister; + } + + static VirtualRegister argumentCount(const CodeOrigin& codeOrigin) { - return baselineArgumentsRegisterFor(codeOrigin.inlineCallFrame); + return argumentCount(codeOrigin.inlineCallFrame); } - SymbolTable* symbolTableFor(const CodeOrigin& codeOrigin) + void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch); + + void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID) { - return baselineCodeBlockFor(codeOrigin)->symbolTable(); + emitStoreStructureWithTypeInfo(*this, structure, dest); } - int offsetOfLocals(const CodeOrigin& codeOrigin) + void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch) { - if (!codeOrigin.inlineCallFrame) - return 0; - return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register); +#if USE(JSVALUE64) + load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch); + store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset())); +#else + // Store all the info flags using a single 32-bit wide load and store. + load32(MacroAssembler::Address(structure, Structure::indexingTypeIncludingHistoryOffset()), scratch); + store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset())); + + // Store the StructureID + storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset())); +#endif } - int offsetOfArgumentsIncludingThis(InlineCallFrame* inlineCallFrame) + static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest); + + Jump barrierBranchWithoutFence(GPRReg cell) { - if (!inlineCallFrame) - return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register); - if (inlineCallFrame->arguments.size() <= 1) - return 0; - ValueRecovery recovery = inlineCallFrame->arguments[1]; - RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack); - return (recovery.virtualRegister().offset() - 1) * sizeof(Register); + return branch8(Above, Address(cell, JSCell::cellStateOffset()), TrustedImm32(blackThreshold)); + } + + Jump barrierBranchWithoutFence(JSCell* cell) + { + uint8_t* address = reinterpret_cast(cell) + JSCell::cellStateOffset(); + return branch8(Above, AbsoluteAddress(address), TrustedImm32(blackThreshold)); } - int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin) + Jump barrierBranch(GPRReg cell, GPRReg scratchGPR) { - return offsetOfArgumentsIncludingThis(codeOrigin.inlineCallFrame); + load8(Address(cell, JSCell::cellStateOffset()), scratchGPR); + return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold())); } - void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind) + Jump barrierBranch(JSCell* cell, GPRReg scratchGPR) + { + uint8_t* address = reinterpret_cast(cell) + JSCell::cellStateOffset(); + load8(address, scratchGPR); + return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold())); + } + + void barrierStoreLoadFence() + { + if (!Options::useConcurrentBarriers()) + return; + Jump ok = jumpIfMutatorFenceNotNeeded(); + memoryFence(); + ok.link(this); + } + + void mutatorFence() + { + if (isX86()) + return; + Jump ok = jumpIfMutatorFenceNotNeeded(); + storeFence(); + ok.link(this); + } + + void storeButterfly(GPRReg butterfly, GPRReg object) { - UNUSED_PARAM(owner); - UNUSED_PARAM(scratch1); - UNUSED_PARAM(scratch2); - UNUSED_PARAM(useKind); - ASSERT(owner != scratch1); - ASSERT(owner != scratch2); - ASSERT(scratch1 != scratch2); + if (isX86()) { + storePtr(butterfly, Address(object, JSObject::butterflyOffset())); + return; + } -#if ENABLE(WRITE_BARRIER_PROFILING) - emitCount(WriteBarrierCounters::jitCounterFor(useKind)); -#endif + Jump ok = jumpIfMutatorFenceNotNeeded(); + storeFence(); + storePtr(butterfly, Address(object, JSObject::butterflyOffset())); + storeFence(); + Jump done = jump(); + ok.link(this); + storePtr(butterfly, Address(object, JSObject::butterflyOffset())); + done.link(this); + } + + void nukeStructureAndStoreButterfly(GPRReg butterfly, GPRReg object) + { + if (isX86()) { + or32(TrustedImm32(bitwise_cast(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset())); + storePtr(butterfly, Address(object, JSObject::butterflyOffset())); + return; + } + + Jump ok = jumpIfMutatorFenceNotNeeded(); + or32(TrustedImm32(bitwise_cast(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset())); + storeFence(); + storePtr(butterfly, Address(object, JSObject::butterflyOffset())); + storeFence(); + Jump done = jump(); + ok.link(this); + storePtr(butterfly, Address(object, JSObject::butterflyOffset())); + done.link(this); + } + + Jump jumpIfMutatorFenceNotNeeded() + { + return branchTest8(Zero, AbsoluteAddress(vm()->heap.addressOfMutatorShouldBeFenced())); } + + // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The + // functor is called at those points where we have pinpointed a type. One way to use this is to + // have the functor emit the code to put the type string into an appropriate register and then + // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow + // case. It is passed the unlinked jump to the slow case. + template + void emitTypeOf( + JSValueRegs regs, GPRReg tempGPR, const Functor& functor, + const SlowPathFunctor& slowPathFunctor) + { + // Implements the following branching structure: + // + // if (is cell) { + // if (is object) { + // if (is function) { + // return function; + // } else if (doesn't have call trap and doesn't masquerade as undefined) { + // return object + // } else { + // return slowPath(); + // } + // } else if (is string) { + // return string + // } else { + // return symbol + // } + // } else if (is number) { + // return number + // } else if (is null) { + // return object + // } else if (is boolean) { + // return boolean + // } else { + // return undefined + // } + + Jump notCell = branchIfNotCell(regs); + + GPRReg cellGPR = regs.payloadGPR(); + Jump notObject = branchIfNotObject(cellGPR); + + Jump notFunction = branchIfNotFunction(cellGPR); + functor(TypeofType::Function, false); + + notFunction.link(this); + slowPathFunctor( + branchTest8( + NonZero, + Address(cellGPR, JSCell::typeInfoFlagsOffset()), + TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData))); + functor(TypeofType::Object, false); + + notObject.link(this); + + Jump notString = branchIfNotString(cellGPR); + functor(TypeofType::String, false); + notString.link(this); + functor(TypeofType::Symbol, false); + + notCell.link(this); + Jump notNumber = branchIfNotNumber(regs, tempGPR); + functor(TypeofType::Number, false); + notNumber.link(this); + + JumpList notNull = branchIfNotEqual(regs, jsNull()); + functor(TypeofType::Object, false); + notNull.link(this); + + Jump notBoolean = branchIfNotBoolean(regs, tempGPR); + functor(TypeofType::Boolean, false); + notBoolean.link(this); + + functor(TypeofType::Undefined, true); + } + + void emitDumbVirtualCall(CallLinkInfo*); + Vector& decodedCodeMapFor(CodeBlock*); + + void makeSpaceOnStackForCCall(); + void reclaimSpaceOnStackForCCall(); + +#if USE(JSVALUE64) + void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result); + void emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result); +#endif + + // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean + // that allocator is non-null; allocator can be null as a signal that we don't know what the + // value of allocatorGPR is. + void emitAllocateWithNonNullAllocator(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath) + { + // NOTE: This is carefully written so that we can call it while we disallow scratch + // register usage. + + if (Options::forceGCSlowPaths()) { + slowPath.append(jump()); + return; + } + + Jump popPath; + Jump done; + + load32(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, remaining)), resultGPR); + popPath = branchTest32(Zero, resultGPR); + if (allocator) + add32(TrustedImm32(-allocator->cellSize()), resultGPR, scratchGPR); + else { + if (isX86()) { + move(resultGPR, scratchGPR); + sub32(Address(allocatorGPR, MarkedAllocator::offsetOfCellSize()), scratchGPR); + } else { + load32(Address(allocatorGPR, MarkedAllocator::offsetOfCellSize()), scratchGPR); + sub32(resultGPR, scratchGPR, scratchGPR); + } + } + negPtr(resultGPR); + store32(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, remaining))); + Address payloadEndAddr = Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, payloadEnd)); + if (isX86()) + addPtr(payloadEndAddr, resultGPR); + else { + loadPtr(payloadEndAddr, scratchGPR); + addPtr(scratchGPR, resultGPR); + } + + done = jump(); + + popPath.link(this); + + loadPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, head)), resultGPR); + slowPath.append(branchTestPtr(Zero, resultGPR)); + + // The object is half-allocated: we have what we know is a fresh object, but + // it's still on the GC's free list. + loadPtr(Address(resultGPR), scratchGPR); + storePtr(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, head))); + + done.link(this); + } + + void emitAllocate(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath) + { + if (!allocator) + slowPath.append(branchTestPtr(Zero, allocatorGPR)); + emitAllocateWithNonNullAllocator(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath); + } + + template + void emitAllocateJSCell(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, JumpList& slowPath) + { + emitAllocate(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath); + emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR); + } + + template + void emitAllocateJSObject(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, StructureType structure, StorageType storage, GPRReg scratchGPR, JumpList& slowPath) + { + emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath); + storePtr(storage, Address(resultGPR, JSObject::butterflyOffset())); + } + + template + void emitAllocateJSObjectWithKnownSize( + GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, + GPRReg scratchGPR2, JumpList& slowPath, size_t size) + { + MarkedAllocator* allocator = subspaceFor(*vm())->allocatorFor(size); + if (!allocator) { + slowPath.append(jump()); + return; + } + move(TrustedImmPtr(allocator), scratchGPR1); + emitAllocateJSObject(resultGPR, allocator, scratchGPR1, structure, storage, scratchGPR2, slowPath); + } + + template + void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) + { + emitAllocateJSObjectWithKnownSize(resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0)); + } + + // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it + // won't be clobbered. + void emitAllocateVariableSized(GPRReg resultGPR, Subspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) + { + static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two."); + + unsigned stepShift = getLSBSet(MarkedSpace::sizeStep); + + add32(TrustedImm32(MarkedSpace::sizeStep - 1), allocationSize, scratchGPR1); + urshift32(TrustedImm32(stepShift), scratchGPR1); + slowPath.append(branch32(Above, scratchGPR1, TrustedImm32(MarkedSpace::largeCutoff >> stepShift))); + move(TrustedImmPtr(subspace.allocatorForSizeStep() - 1), scratchGPR2); + loadPtr(BaseIndex(scratchGPR2, scratchGPR1, timesPtr()), scratchGPR1); + + emitAllocate(resultGPR, nullptr, scratchGPR1, scratchGPR2, slowPath); + } + + template + void emitAllocateVariableSizedCell(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) + { + Subspace& subspace = *subspaceFor(*vm()); + emitAllocateVariableSized(resultGPR, subspace, allocationSize, scratchGPR1, scratchGPR2, slowPath); + emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR2); + } + + template + void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) + { + emitAllocateVariableSizedCell(resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath); + storePtr(TrustedImmPtr(0), Address(resultGPR, JSObject::butterflyOffset())); + } + + void emitConvertValueToBoolean(JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false); + + template + void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath) + { + emitAllocateJSObject(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath); + storePtr(TrustedImmPtr(structure->classInfo()), Address(resultGPR, JSDestructibleObject::classInfoOffset())); + } + + void emitInitializeInlineStorage(GPRReg baseGPR, unsigned inlineCapacity) + { + for (unsigned i = 0; i < inlineCapacity; ++i) + storeTrustedValue(JSValue(), Address(baseGPR, JSObject::offsetOfInlineStorage() + i * sizeof(EncodedJSValue))); + } + + void emitInitializeInlineStorage(GPRReg baseGPR, GPRReg inlineCapacity) + { + Jump empty = branchTest32(Zero, inlineCapacity); + Label loop = label(); + sub32(TrustedImm32(1), inlineCapacity); + storeTrustedValue(JSValue(), BaseIndex(baseGPR, inlineCapacity, TimesEight, JSObject::offsetOfInlineStorage())); + branchTest32(NonZero, inlineCapacity).linkTo(loop, this); + empty.link(this); + } + + void emitInitializeOutOfLineStorage(GPRReg butterflyGPR, unsigned outOfLineCapacity) + { + for (unsigned i = 0; i < outOfLineCapacity; ++i) + storeTrustedValue(JSValue(), Address(butterflyGPR, -sizeof(IndexingHeader) - (i + 1) * sizeof(EncodedJSValue))); + } + +#if USE(JSVALUE64) + void wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch); +#endif protected: VM* m_vm; @@ -491,6 +1646,3 @@ protected: } // namespace JSC #endif // ENABLE(JIT) - -#endif // AssemblyHelpers_h - diff --git a/Source/JavaScriptCore/jit/BinarySwitch.cpp b/Source/JavaScriptCore/jit/BinarySwitch.cpp new file mode 100644 index 000000000..f3ddcfca9 --- /dev/null +++ b/Source/JavaScriptCore/jit/BinarySwitch.cpp @@ -0,0 +1,391 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "BinarySwitch.h" + +#if ENABLE(JIT) + +#include "JSCInlines.h" +#include + +namespace JSC { + +static const bool verbose = false; + +static unsigned globalCounter; // We use a different seed every time we are invoked. + +BinarySwitch::BinarySwitch(GPRReg value, const Vector& cases, Type type) + : m_value(value) + , m_weakRandom(globalCounter++) + , m_index(0) + , m_caseIndex(UINT_MAX) + , m_type(type) +{ + if (cases.isEmpty()) + return; + + if (verbose) + dataLog("Original cases: ", listDump(cases), "\n"); + + for (unsigned i = 0; i < cases.size(); ++i) + m_cases.append(Case(cases[i], i)); + + std::sort(m_cases.begin(), m_cases.end()); + + if (verbose) + dataLog("Sorted cases: ", listDump(m_cases), "\n"); + + for (unsigned i = 1; i < m_cases.size(); ++i) + RELEASE_ASSERT(m_cases[i - 1] < m_cases[i]); + + build(0, false, m_cases.size()); +} + +BinarySwitch::~BinarySwitch() +{ +} + +bool BinarySwitch::advance(MacroAssembler& jit) +{ + if (m_cases.isEmpty()) { + m_fallThrough.append(jit.jump()); + return false; + } + + if (m_index == m_branches.size()) { + RELEASE_ASSERT(m_jumpStack.isEmpty()); + return false; + } + + for (;;) { + const BranchCode& code = m_branches[m_index++]; + switch (code.kind) { + case NotEqualToFallThrough: + switch (m_type) { + case Int32: + m_fallThrough.append(jit.branch32( + MacroAssembler::NotEqual, m_value, + MacroAssembler::Imm32(static_cast(m_cases[code.index].value)))); + break; + case IntPtr: + m_fallThrough.append(jit.branchPtr( + MacroAssembler::NotEqual, m_value, + MacroAssembler::ImmPtr(bitwise_cast(static_cast(m_cases[code.index].value))))); + break; + } + break; + case NotEqualToPush: + switch (m_type) { + case Int32: + m_jumpStack.append(jit.branch32( + MacroAssembler::NotEqual, m_value, + MacroAssembler::Imm32(static_cast(m_cases[code.index].value)))); + break; + case IntPtr: + m_jumpStack.append(jit.branchPtr( + MacroAssembler::NotEqual, m_value, + MacroAssembler::ImmPtr(bitwise_cast(static_cast(m_cases[code.index].value))))); + break; + } + break; + case LessThanToPush: + switch (m_type) { + case Int32: + m_jumpStack.append(jit.branch32( + MacroAssembler::LessThan, m_value, + MacroAssembler::Imm32(static_cast(m_cases[code.index].value)))); + break; + case IntPtr: + m_jumpStack.append(jit.branchPtr( + MacroAssembler::LessThan, m_value, + MacroAssembler::ImmPtr(bitwise_cast(static_cast(m_cases[code.index].value))))); + break; + } + break; + case Pop: + m_jumpStack.takeLast().link(&jit); + break; + case ExecuteCase: + m_caseIndex = code.index; + return true; + } + } +} + +void BinarySwitch::build(unsigned start, bool hardStart, unsigned end) +{ + if (verbose) + dataLog("Building with start = ", start, ", hardStart = ", hardStart, ", end = ", end, "\n"); + + auto append = [&] (const BranchCode& code) { + if (verbose) + dataLog("==> ", code, "\n"); + m_branches.append(code); + }; + + unsigned size = end - start; + + RELEASE_ASSERT(size); + + // This code uses some random numbers to keep things balanced. It's important to keep in mind + // that this does not improve average-case throughput under the assumption that all cases fire + // with equal probability. It just ensures that there will not be some switch structure that + // when combined with some input will always produce pathologically good or pathologically bad + // performance. + + const unsigned leafThreshold = 3; + + if (size <= leafThreshold) { + if (verbose) + dataLog("It's a leaf.\n"); + + // It turns out that for exactly three cases or less, it's better to just compare each + // case individually. This saves 1/6 of a branch on average, and up to 1/3 of a branch in + // extreme cases where the divide-and-conquer bottoms out in a lot of 3-case subswitches. + // + // This assumes that we care about the cost of hitting some case more than we care about + // bottoming out in a default case. I believe that in most places where we use switch + // statements, we are more likely to hit one of the cases than we are to fall through to + // default. Intuitively, if we wanted to improve the performance of default, we would + // reduce the value of leafThreshold to 2 or even to 1. See below for a deeper discussion. + + bool allConsecutive = false; + + if ((hardStart || (start && m_cases[start - 1].value == m_cases[start].value - 1)) + && start + size < m_cases.size() + && m_cases[start + size - 1].value == m_cases[start + size].value - 1) { + allConsecutive = true; + for (unsigned i = 0; i < size - 1; ++i) { + if (m_cases[start + i].value + 1 != m_cases[start + i + 1].value) { + allConsecutive = false; + break; + } + } + } + + if (verbose) + dataLog("allConsecutive = ", allConsecutive, "\n"); + + Vector localCaseIndices; + for (unsigned i = 0; i < size; ++i) + localCaseIndices.append(start + i); + + std::random_shuffle( + localCaseIndices.begin(), localCaseIndices.end(), + [this] (unsigned n) { + // We use modulo to get a random number in the range we want fully knowing that + // this introduces a tiny amount of bias, but we're fine with such tiny bias. + return m_weakRandom.getUint32() % n; + }); + + for (unsigned i = 0; i < size - 1; ++i) { + append(BranchCode(NotEqualToPush, localCaseIndices[i])); + append(BranchCode(ExecuteCase, localCaseIndices[i])); + append(BranchCode(Pop)); + } + + if (!allConsecutive) + append(BranchCode(NotEqualToFallThrough, localCaseIndices.last())); + + append(BranchCode(ExecuteCase, localCaseIndices.last())); + return; + } + + if (verbose) + dataLog("It's not a leaf.\n"); + + // There are two different strategies we could consider here: + // + // Isolate median and split: pick a median and check if the comparison value is equal to it; + // if so, execute the median case. Otherwise check if the value is less than the median, and + // recurse left or right based on this. This has two subvariants: we could either first test + // equality for the median and then do the less-than, or we could first do the less-than and + // then check equality on the not-less-than path. + // + // Ignore median and split: do a less-than comparison on a value that splits the cases in two + // equal-sized halves. Recurse left or right based on the comparison. Do not test for equality + // against the median (or anything else); let the recursion handle those equality comparisons + // once we bottom out in a list that case 3 cases or less (see above). + // + // I'll refer to these strategies as Isolate and Ignore. I initially believed that Isolate + // would be faster since it leads to less branching for some lucky cases. It turns out that + // Isolate is almost a total fail in the average, assuming all cases are equally likely. How + // bad Isolate is depends on whether you believe that doing two consecutive branches based on + // the same comparison is cheaper than doing the compare/branches separately. This is + // difficult to evaluate. For small immediates that aren't blinded, we just care about + // avoiding a second compare instruction. For large immediates or when blinding is in play, we + // also care about the instructions used to materialize the immediate a second time. Isolate + // can help with both costs since it involves first doing a < compare+branch on some value, + // followed by a == compare+branch on the same exact value (or vice-versa). Ignore will do a < + // compare+branch on some value, and then the == compare+branch on that same value will happen + // much later. + // + // To evaluate these costs, I wrote the recurrence relation for Isolate and Ignore, assuming + // that ComparisonCost is the cost of a compare+branch and ChainedComparisonCost is the cost + // of a compare+branch on some value that you've just done another compare+branch for. These + // recurrence relations compute the total cost incurred if you executed the switch statement + // on each matching value. So the average cost of hitting some case can be computed as + // Isolate[n]/n or Ignore[n]/n, respectively for the two relations. + // + // Isolate[1] = ComparisonCost + // Isolate[2] = (2 + 1) * ComparisonCost + // Isolate[3] = (3 + 2 + 1) * ComparisonCost + // Isolate[n_] := With[ + // {medianIndex = Floor[n/2] + If[EvenQ[n], RandomInteger[], 1]}, + // ComparisonCost + ChainedComparisonCost + + // (ComparisonCost * (medianIndex - 1) + Isolate[medianIndex - 1]) + + // (2 * ComparisonCost * (n - medianIndex) + Isolate[n - medianIndex])] + // + // Ignore[1] = ComparisonCost + // Ignore[2] = (2 + 1) * ComparisonCost + // Ignore[3] = (3 + 2 + 1) * ComparisonCost + // Ignore[n_] := With[ + // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]}, + // (medianIndex * ComparisonCost + Ignore[medianIndex]) + + // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])] + // + // This does not account for the average cost of hitting the default case. See further below + // for a discussion of that. + // + // It turns out that for ComparisonCost = 1 and ChainedComparisonCost = 1, Ignore is always + // better than Isolate. If we assume that ChainedComparisonCost = 0, then Isolate wins for + // switch statements that have 20 cases or fewer, though the margin of victory is never large + // - it might sometimes save an average of 0.3 ComparisonCost. For larger switch statements, + // we see divergence between the two with Ignore winning. This is of course rather + // unrealistic since the chained comparison is never free. For ChainedComparisonCost = 0.5, we + // see Isolate winning for 10 cases or fewer, by maybe 0.2 ComparisonCost. Again we see + // divergence for large switches with Ignore winning, for example if a switch statement has + // 100 cases then Ignore saves one branch on average. + // + // Our current JIT backends don't provide for optimization for chained comparisons, except for + // reducing the code for materializing the immediate if the immediates are large or blinding + // comes into play. Probably our JIT backends live somewhere north of + // ChainedComparisonCost = 0.5. + // + // This implies that using the Ignore strategy is likely better. If we wanted to incorporate + // the Isolate strategy, we'd want to determine the switch size threshold at which the two + // cross over and then use Isolate for switches that are smaller than that size. + // + // The average cost of hitting the default case is similar, but involves a different cost for + // the base cases: you have to assume that you will always fail each branch. For the Ignore + // strategy we would get this recurrence relation; the same kind of thing happens to the + // Isolate strategy: + // + // Ignore[1] = ComparisonCost + // Ignore[2] = (2 + 2) * ComparisonCost + // Ignore[3] = (3 + 3 + 3) * ComparisonCost + // Ignore[n_] := With[ + // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]}, + // (medianIndex * ComparisonCost + Ignore[medianIndex]) + + // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])] + // + // This means that if we cared about the default case more, we would likely reduce + // leafThreshold. Reducing it to 2 would reduce the average cost of the default case by 1/3 + // in the most extreme cases (num switch cases = 3, 6, 12, 24, ...). But it would also + // increase the average cost of taking one of the non-default cases by 1/3. Typically the + // difference is 1/6 in either direction. This makes it a very simple trade-off: if we believe + // that the default case is more important then we would want leafThreshold to be 2, and the + // default case would become 1/6 faster on average. But we believe that most switch statements + // are more likely to take one of the cases than the default, so we use leafThreshold = 3 + // and get a 1/6 speed-up on average for taking an explicit case. + + unsigned medianIndex = (start + end) / 2; + + if (verbose) + dataLog("medianIndex = ", medianIndex, "\n"); + + // We want medianIndex to point to the thing we will do a less-than compare against. We want + // this less-than compare to split the current sublist into equal-sized sublists, or + // nearly-equal-sized with some randomness if we're in the odd case. With the above + // calculation, in the odd case we will have medianIndex pointing at either the element we + // want or the element to the left of the one we want. Consider the case of five elements: + // + // 0 1 2 3 4 + // + // start will be 0, end will be 5. The average is 2.5, which rounds down to 2. If we do + // value < 2, then we will split the list into 2 elements on the left and three on the right. + // That's pretty good, but in this odd case we'd like to at random choose 3 instead to ensure + // that we don't become unbalanced on the right. This does not improve throughput since one + // side will always get shafted, and that side might still be odd, in which case it will also + // have two sides and one of them will get shafted - and so on. We just want to avoid + // deterministic pathologies. + // + // In the even case, we will always end up pointing at the element we want: + // + // 0 1 2 3 + // + // start will be 0, end will be 4. So, the average is 2, which is what we'd like. + if (size & 1) { + RELEASE_ASSERT(medianIndex - start + 1 == end - medianIndex); + medianIndex += m_weakRandom.getUint32() & 1; + } else + RELEASE_ASSERT(medianIndex - start == end - medianIndex); + + RELEASE_ASSERT(medianIndex > start); + RELEASE_ASSERT(medianIndex + 1 < end); + + if (verbose) + dataLog("fixed medianIndex = ", medianIndex, "\n"); + + append(BranchCode(LessThanToPush, medianIndex)); + build(medianIndex, true, end); + append(BranchCode(Pop)); + build(start, hardStart, medianIndex); +} + +void BinarySwitch::Case::dump(PrintStream& out) const +{ + out.print(""); +} + +void BinarySwitch::BranchCode::dump(PrintStream& out) const +{ + switch (kind) { + case NotEqualToFallThrough: + out.print("NotEqualToFallThrough"); + break; + case NotEqualToPush: + out.print("NotEqualToPush"); + break; + case LessThanToPush: + out.print("LessThanToPush"); + break; + case Pop: + out.print("Pop"); + break; + case ExecuteCase: + out.print("ExecuteCase"); + break; + } + + if (index != UINT_MAX) + out.print("(", index, ")"); +} + +} // namespace JSC + +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/jit/BinarySwitch.h b/Source/JavaScriptCore/jit/BinarySwitch.h new file mode 100644 index 000000000..c2569d335 --- /dev/null +++ b/Source/JavaScriptCore/jit/BinarySwitch.h @@ -0,0 +1,143 @@ +/* + * Copyright (C) 2013, 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#if ENABLE(JIT) + +#include "GPRInfo.h" +#include "MacroAssembler.h" +#include + +namespace JSC { + +// The BinarySwitch class makes it easy to emit a switch statement over either +// 32-bit integers or pointers, where the switch uses a tree of branches +// rather than a jump table. This makes it particularly useful if the case +// values are too far apart to make a jump table practical, or if there are +// sufficiently few cases that the total cost of log(numCases) branches is +// less than the cost of an indirected jump. +// +// In an effort to simplify the logic of emitting code for each case, this +// uses an iterator style, rather than a functor callback style. This makes +// sense because even the iterator implementation found herein is relatively +// simple, whereas the code it's used from is usually quite complex - one +// example being the trie-of-trees string switch implementation, where the +// code emitted for each case involves recursing to emit code for a sub-trie. +// +// Use this like so: +// +// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32); +// while (switch.advance(jit)) { +// int value = switch.caseValue(); +// unsigned index = switch.caseIndex(); // index into casesVector, above +// ... // generate code for this case +// ... = jit.jump(); // you have to jump out yourself; falling through causes undefined behavior +// } +// switch.fallThrough().link(&jit); + +class BinarySwitch { +public: + enum Type { + Int32, + IntPtr + }; + + BinarySwitch(GPRReg value, const Vector& cases, Type); + ~BinarySwitch(); + + unsigned caseIndex() const { return m_cases[m_caseIndex].index; } + int64_t caseValue() const { return m_cases[m_caseIndex].value; } + + bool advance(MacroAssembler&); + + MacroAssembler::JumpList& fallThrough() { return m_fallThrough; } + +private: + void build(unsigned start, bool hardStart, unsigned end); + + GPRReg m_value; + + struct Case { + Case() { } + + Case(int64_t value, unsigned index) + : value(value) + , index(index) + { + } + + bool operator<(const Case& other) const + { + return value < other.value; + } + + void dump(PrintStream& out) const; + + int64_t value; + unsigned index; + }; + + Vector m_cases; + + enum BranchKind { + NotEqualToFallThrough, + NotEqualToPush, + LessThanToPush, + Pop, + ExecuteCase + }; + + struct BranchCode { + BranchCode() { } + + BranchCode(BranchKind kind, unsigned index = UINT_MAX) + : kind(kind) + , index(index) + { + } + + void dump(PrintStream& out) const; + + BranchKind kind; + unsigned index; + }; + + WeakRandom m_weakRandom; + + Vector m_branches; + + unsigned m_index; + unsigned m_caseIndex; + Vector m_jumpStack; + + MacroAssembler::JumpList m_fallThrough; + + Type m_type; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CCallHelpers.cpp b/Source/JavaScriptCore/jit/CCallHelpers.cpp new file mode 100644 index 000000000..3c3df618f --- /dev/null +++ b/Source/JavaScriptCore/jit/CCallHelpers.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CCallHelpers.h" + +#if ENABLE(JIT) + +#include "ShadowChicken.h" + +namespace JSC { + +void CCallHelpers::logShadowChickenProloguePacket(GPRReg shadowPacket, GPRReg scratch1, GPRReg scope) +{ + storePtr(GPRInfo::callFrameRegister, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, frame))); + loadPtr(Address(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(CallerFrameAndPC, callerFrame)), scratch1); + storePtr(scratch1, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callerFrame))); + loadPtr(addressFor(CallFrameSlot::callee), scratch1); + storePtr(scratch1, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callee))); + storePtr(scope, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, scope))); +} + +void CCallHelpers::logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlock* codeBlock, CallSiteIndex callSiteIndex) +{ + storePtr(GPRInfo::callFrameRegister, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, frame))); + storePtr(TrustedImmPtr(ShadowChicken::Packet::tailMarker()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callee))); + storeValue(thisRegs, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, thisValue))); + storePtr(scope, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, scope))); + storePtr(TrustedImmPtr(codeBlock), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, codeBlock))); + store32(TrustedImm32(callSiteIndex.bits()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callSiteIndex))); +} + +void CCallHelpers::ensureShadowChickenPacket(GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2) +{ + ASSERT(!RegisterSet::argumentGPRS().get(scratch1NonArgGPR)); + move(TrustedImmPtr(vm()->shadowChicken().addressOfLogCursor()), scratch1NonArgGPR); + loadPtr(Address(scratch1NonArgGPR), shadowPacket); + Jump ok = branchPtr(Below, shadowPacket, TrustedImmPtr(vm()->shadowChicken().logEnd())); + setupArgumentsExecState(); + move(TrustedImmPtr(bitwise_cast(operationProcessShadowChickenLog)), scratch1NonArgGPR); + call(scratch1NonArgGPR); + move(TrustedImmPtr(vm()->shadowChicken().addressOfLogCursor()), scratch1NonArgGPR); + loadPtr(Address(scratch1NonArgGPR), shadowPacket); + ok.link(this); + addPtr(TrustedImm32(sizeof(ShadowChicken::Packet)), shadowPacket, scratch2); + storePtr(scratch2, Address(scratch1NonArgGPR)); +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CCallHelpers.h b/Source/JavaScriptCore/jit/CCallHelpers.h index afcccd1ca..8a3c90de6 100644 --- a/Source/JavaScriptCore/jit/CCallHelpers.h +++ b/Source/JavaScriptCore/jit/CCallHelpers.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2015-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,24 +23,55 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef CCallHelpers_h -#define CCallHelpers_h - -#include +#pragma once #if ENABLE(JIT) #include "AssemblyHelpers.h" #include "GPRInfo.h" +#include "RegisterMap.h" +#include "StackAlignment.h" namespace JSC { +#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64)) +#define POKE_ARGUMENT_OFFSET 4 +#else +#define POKE_ARGUMENT_OFFSET 0 +#endif + +// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]). +// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary. +#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS) +#define EABI_32BIT_DUMMY_ARG CCallHelpers::TrustedImm32(0), +#else +#define EABI_32BIT_DUMMY_ARG +#endif + class CCallHelpers : public AssemblyHelpers { public: CCallHelpers(VM* vm, CodeBlock* codeBlock = 0) : AssemblyHelpers(vm, codeBlock) { } + + // The most general helper for setting arguments that fit in a GPR, if you can compute each + // argument without using any argument registers. You usually want one of the setupArguments*() + // methods below instead of this. This thing is most useful if you have *a lot* of arguments. + template + void setupArgument(unsigned argumentIndex, const Functor& functor) + { + unsigned numberOfRegs = GPRInfo::numberOfArgumentRegisters; // Disguise the constant from clang's tautological compare warning. + if (argumentIndex < numberOfRegs) { + functor(GPRInfo::toArgumentRegister(argumentIndex)); + return; + } + + functor(GPRInfo::nonArgGPR0); + poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET + argumentIndex - GPRInfo::numberOfArgumentRegisters); + } + + void setupArgumentsWithExecState() { setupArgumentsExecState(); } // These methods used to sort arguments into the correct registers. // On X86 we use cdecl calling conventions, which pass all arguments on the @@ -95,6 +126,13 @@ public: addCallArgument(arg2); } + ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2) + { + resetCallArguments(); + addCallArgument(arg1); + addCallArgument(arg2); + } + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3) { resetCallArguments(); @@ -182,6 +220,15 @@ public: addCallArgument(arg2); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2) { resetCallArguments(); @@ -239,6 +286,15 @@ public: addCallArgument(arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3) { resetCallArguments(); @@ -275,6 +331,36 @@ public: addCallArgument(arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3) { resetCallArguments(); @@ -284,6 +370,58 @@ public: addCallArgument(arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3) { resetCallArguments(); @@ -347,6 +485,57 @@ public: addCallArgument(arg3); addCallArgument(arg4); } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) { @@ -359,6 +548,27 @@ public: addCallArgument(arg5); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) { resetCallArguments(); @@ -379,6 +589,17 @@ public: addCallArgument(arg4); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3) { resetCallArguments(); @@ -438,6 +659,17 @@ public: addCallArgument(arg4); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5) { resetCallArguments(); @@ -493,6 +725,16 @@ public: addCallArgument(arg4); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) { resetCallArguments(); @@ -503,6 +745,16 @@ public: addCallArgument(arg4); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) { resetCallArguments(); @@ -557,7 +809,7 @@ public: addCallArgument(arg4); addCallArgument(arg5); } - + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) { resetCallArguments(); @@ -569,38 +821,99 @@ public: addCallArgument(arg5); addCallArgument(arg6); } + + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + } - ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5, TrustedImmPtr arg6) { resetCallArguments(); addCallArgument(GPRInfo::callFrameRegister); addCallArgument(arg1); addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7) { resetCallArguments(); addCallArgument(GPRInfo::callFrameRegister); addCallArgument(arg1); addCallArgument(arg2); addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + addCallArgument(arg7); } -#endif // !NUMBER_OF_ARGUMENT_REGISTERS - // These methods are suitable for any calling convention that provides for - // at least 4 argument registers, e.g. X86_64, ARMv7. -#if NUMBER_OF_ARGUMENT_REGISTERS >= 4 - template - void setupTwoStubArgsGPR(GPRReg srcA, GPRReg srcB) + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) { - // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: - // (1) both are already in arg regs, the right way around. - // (2) both are already in arg regs, the wrong way around. - // (3) neither are currently in arg registers. - // (4) srcA in in its correct reg. - // (5) srcA in in the incorrect reg. - // (6) srcB in in its correct reg. - // (7) srcB in in the incorrect reg. + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, GPRReg arg8) + { + resetCallArguments(); + addCallArgument(GPRInfo::callFrameRegister); + addCallArgument(arg1); + addCallArgument(arg2); + addCallArgument(arg3); + addCallArgument(arg4); + addCallArgument(arg5); + addCallArgument(arg6); + addCallArgument(arg7); + addCallArgument(arg8); + } + +#endif // !NUMBER_OF_ARGUMENT_REGISTERS + // These methods are suitable for any calling convention that provides for + // at least 4 argument registers, e.g. X86_64, ARMv7. +#if NUMBER_OF_ARGUMENT_REGISTERS >= 4 + template + void setupTwoStubArgsGPR(GPRReg srcA, GPRReg srcB) + { + // Assuming that srcA != srcB, there are 7 interesting states the registers may be in: + // (1) both are already in arg regs, the right way around. + // (2) both are already in arg regs, the wrong way around. + // (3) neither are currently in arg registers. + // (4) srcA in in its correct reg. + // (5) srcA in in the incorrect reg. + // (6) srcB in in its correct reg. + // (7) srcB in in the incorrect reg. // // The trivial approach is to simply emit two moves, to put srcA in place then srcB in // place (the MacroAssembler will omit redundant moves). This apporach will be safe in @@ -672,6 +985,15 @@ public: swap(destB, destC); } + void setupFourStubArgsGPR(GPRReg destA, GPRReg destB, GPRReg destC, GPRReg destD, GPRReg srcA, GPRReg srcB, GPRReg srcC, GPRReg srcD) + { + setupStubArgsGPR<4>({ { destA, destB, destC, destD } }, { { srcA, srcB, srcC, srcD } }); + } + void setupFiveStubArgsGPR(GPRReg destA, GPRReg destB, GPRReg destC, GPRReg destD, GPRReg destE, GPRReg srcA, GPRReg srcB, GPRReg srcC, GPRReg srcD, GPRReg srcE) + { + setupStubArgsGPR<5>({ { destA, destB, destC, destD, destE } }, { { srcA, srcB, srcC, srcD, srcE } }); + } + #if CPU(X86_64) || CPU(ARM64) template void setupTwoStubArgsFPR(FPRReg srcA, FPRReg srcB) @@ -730,12 +1052,6 @@ public: setupThreeStubArgsGPR(arg1, arg2, arg3); } -#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64)) -#define POKE_ARGUMENT_OFFSET 4 -#else -#define POKE_ARGUMENT_OFFSET 0 -#endif - #if CPU(X86_64) || CPU(ARM64) ALWAYS_INLINE void setupArguments(FPRReg arg1) { @@ -749,14 +1065,41 @@ public: ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) { +#if OS(WINDOWS) && CPU(X86_64) + // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments. + // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx + moveDouble(arg1, FPRInfo::argumentFPR1); + move(arg2, GPRInfo::argumentGPR2); +#else + moveDouble(arg1, FPRInfo::argumentFPR0); + move(arg2, GPRInfo::argumentGPR1); +#endif + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2) + { +#if OS(WINDOWS) && CPU(X86_64) + // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments. + // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx + moveDouble(arg1, FPRInfo::argumentFPR1); + move(arg2, GPRInfo::argumentGPR2); +#else moveDouble(arg1, FPRInfo::argumentFPR0); move(arg2, GPRInfo::argumentGPR1); +#endif move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) { +#if OS(WINDOWS) && CPU(X86_64) + // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments. + // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx + moveDouble(arg3, FPRInfo::argumentFPR3); +#else moveDouble(arg3, FPRInfo::argumentFPR0); +#endif setupStubArguments(arg1, arg2); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } @@ -790,6 +1133,13 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2) + { + moveDouble(arg1, FPRInfo::argumentFPR0); + move(arg2, GPRInfo::argumentGPR1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) { moveDouble(arg3, FPRInfo::argumentFPR0); @@ -830,6 +1180,13 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2) + { + move(arg2, GPRInfo::argumentGPR3); + assembler().vmov(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, arg1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) { setupStubArguments(arg1, arg2); @@ -883,6 +1240,13 @@ public: poke(arg2, 4); } + ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2) + { + assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg1); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + poke(arg2, 4); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) { setupStubArguments(arg1, arg2); @@ -890,46 +1254,15 @@ public: poke(arg3, 4); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, FPRReg arg2, GPRReg arg3) + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32, FPRReg arg2, GPRReg arg3) { setupArgumentsWithExecState(arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, FPRReg arg4) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32, FPRReg arg4) { setupArgumentsWithExecState(arg1, arg2, arg4); } -#elif CPU(SH4) - ALWAYS_INLINE void setupArguments(FPRReg arg1) - { - moveDouble(arg1, FPRInfo::argumentFPR0); - } - - ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2) - { - if (arg2 != FPRInfo::argumentFPR0) { - moveDouble(arg1, FPRInfo::argumentFPR0); - moveDouble(arg2, FPRInfo::argumentFPR1); - } else if (arg1 != FPRInfo::argumentFPR1) { - moveDouble(arg2, FPRInfo::argumentFPR1); - moveDouble(arg1, FPRInfo::argumentFPR0); - } else - swapDouble(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2) - { - moveDouble(arg1, FPRInfo::argumentFPR0); - move(arg2, GPRInfo::argumentGPR1); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3) - { - moveDouble(arg3, FPRInfo::argumentFPR0); - setupStubArguments(arg1, arg2); - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - } #else #error "JIT not supported on this platform." #endif @@ -939,6 +1272,12 @@ public: move(arg1, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2) + { + move(arg2, GPRInfo::argumentGPR1); + move(arg1, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2) { setupTwoStubArgsGPR(arg1, arg2); @@ -956,6 +1295,12 @@ public: move(arg4, GPRInfo::argumentGPR3); } + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + move(arg4, GPRInfo::argumentGPR3); + } + ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImmPtr arg4) { setupTwoStubArgsGPR(arg1, arg3); @@ -1000,6 +1345,14 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } +#if OS(WINDOWS) && CPU(X86_64) + ALWAYS_INLINE void setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32 arg1) + { + move(arg1, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); + } +#endif + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2) { setupStubArguments(arg1, arg2); @@ -1083,6 +1436,14 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, TrustedImm32 arg3) { move(arg1, GPRInfo::argumentGPR1); @@ -1133,6 +1494,14 @@ public: move(arg3, GPRInfo::argumentGPR3); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3) + { + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3) { @@ -1157,6 +1526,14 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3) { move(arg3, GPRInfo::argumentGPR3); @@ -1165,6 +1542,14 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3) { move(arg2, GPRInfo::argumentGPR2); @@ -1238,12 +1623,40 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) { poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5, TrustedImmPtr arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) { poke(arg4, POKE_ARGUMENT_OFFSET); @@ -1268,12 +1681,55 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + +#if CPU(X86_64) + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } +#endif + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) { poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); @@ -1294,6 +1750,12 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4, TrustedImm32 arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); @@ -1356,73 +1818,177 @@ public: poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4) { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4) { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4) { - poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, TrustedImm32 arg5) + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) { - poke(arg6, POKE_ARGUMENT_OFFSET + 2); poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) { - poke(arg6, POKE_ARGUMENT_OFFSET + 2); poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); setupArgumentsWithExecState(arg1, arg2, arg3); } - ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, GPRReg arg5) + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) + { + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4, TrustedImmPtr arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, GPRReg arg5) { poke(arg5, POKE_ARGUMENT_OFFSET + 1); poke(arg4, POKE_ARGUMENT_OFFSET); @@ -1469,6 +2035,22 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6) + { + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7) { poke(arg7, POKE_ARGUMENT_OFFSET + 3); @@ -1487,6 +2069,46 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6, TrustedImmPtr arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, TrustedImmPtr arg8) + { + poke(arg8, POKE_ARGUMENT_OFFSET + 4); + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, GPRReg arg8) + { + poke(arg8, POKE_ARGUMENT_OFFSET + 4); + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, GPRReg arg8, GPRReg arg9) + { + poke(arg9, POKE_ARGUMENT_OFFSET + 5); + poke(arg8, POKE_ARGUMENT_OFFSET + 4); + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7) { poke(arg7, POKE_ARGUMENT_OFFSET + 3); @@ -1496,6 +2118,15 @@ public: setupArgumentsWithExecState(arg1, arg2, arg3); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7) + { + poke(arg7, POKE_ARGUMENT_OFFSET + 3); + poke(arg6, POKE_ARGUMENT_OFFSET + 2); + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5) { poke(arg5, POKE_ARGUMENT_OFFSET); @@ -1503,14 +2134,41 @@ public: move(arg3, GPRInfo::argumentGPR2); move(arg4, GPRInfo::argumentGPR3); } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4, GPRReg arg5) + { + poke(arg5, POKE_ARGUMENT_OFFSET + 1); + poke(arg4, POKE_ARGUMENT_OFFSET); + setupArgumentsWithExecState(arg1, arg2, arg3); + } #endif // NUMBER_OF_ARGUMENT_REGISTERS == 4 #if NUMBER_OF_ARGUMENT_REGISTERS >= 5 + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4) + { + setupFourStubArgsGPR(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4, arg1, arg2, arg3, arg4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + void setupStubArguments134(GPRReg arg1, GPRReg arg3, GPRReg arg4) { setupThreeStubArgsGPR(arg1, arg3, arg4); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4) { setupTwoStubArgsGPR(arg1, arg4); @@ -1519,6 +2177,36 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4) + { + setupTwoStubArgsGPR(arg1, arg2); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4) + { + setupTwoStubArgsGPR(arg1, arg2); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5) + { + setupThreeStubArgsGPR(arg1, arg4, arg5); + move(arg2, GPRInfo::argumentGPR2); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5) + { + setupFiveStubArgsGPR(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4, GPRInfo::argumentGPR5, arg1, arg2, arg3, arg4, arg5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4) { setupStubArguments134(arg1, arg3, arg4); @@ -1534,6 +2222,76 @@ public: move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); } + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1. + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg4, GPRInfo::argumentGPR4); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + move(arg3, GPRInfo::argumentGPR3); + move(arg1, GPRInfo::argumentGPR1); + move(arg2, GPRInfo::argumentGPR2); + move(arg4, GPRInfo::argumentGPR4); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5) + { + setupTwoStubArgsGPR(arg2, arg4); + move(arg1, GPRInfo::argumentGPR1); + move(arg3, GPRInfo::argumentGPR3); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5) + { + setupTwoStubArgsGPR(arg2, arg3); + move(arg1, GPRInfo::argumentGPR1); + move(arg4, GPRInfo::argumentGPR4); + move(arg5, GPRInfo::argumentGPR5); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg3); + move(arg4, GPRInfo::argumentGPR4); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + + ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4) + { + setupThreeStubArgsGPR(arg1, arg2, arg4); + move(arg3, GPRInfo::argumentGPR3); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); + } + ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5) { setupThreeStubArgsGPR(arg1, arg3, arg4); @@ -1549,6 +2307,69 @@ public: } #endif + void setupArgumentsWithExecState(JSValueRegs arg) + { +#if USE(JSVALUE64) + setupArgumentsWithExecState(arg.gpr()); +#else + setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg.payloadGPR(), arg.tagGPR()); +#endif + } + + void setupArgumentsWithExecState(JSValueRegs arg1, JSValueRegs arg2) + { +#if USE(JSVALUE64) + setupArgumentsWithExecState(arg1.gpr(), arg2.gpr()); +#else + setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2.payloadGPR(), arg2.tagGPR()); +#endif + } + + void setupArgumentsWithExecState(JSValueRegs arg1, TrustedImmPtr arg2) + { +#if USE(JSVALUE64) + setupArgumentsWithExecState(arg1.gpr(), arg2); +#else + setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2); +#endif + } + + void setupArgumentsWithExecState(JSValueRegs arg1, JSValueRegs arg2, TrustedImmPtr arg3) + { +#if USE(JSVALUE64) + setupArgumentsWithExecState(arg1.gpr(), arg2.gpr(), arg3); +#else + setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2.payloadGPR(), arg2.tagGPR(), arg3); +#endif + } + + void setupArgumentsWithExecState(JSValueRegs arg1, JSValueRegs arg2, TrustedImmPtr arg3, TrustedImmPtr arg4) + { +#if USE(JSVALUE64) + setupArgumentsWithExecState(arg1.gpr(), arg2.gpr(), arg3, arg4); +#else + setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2.payloadGPR(), arg2.tagGPR(), arg3, arg4); +#endif + } + + void setupArgumentsWithExecState(JSValueRegs arg1, TrustedImmPtr arg2, TrustedImmPtr arg3) + { +#if USE(JSVALUE64) + setupArgumentsWithExecState(arg1.gpr(), arg2, arg3); +#else + setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2, arg3); +#endif + } + + void setupArguments(JSValueRegs arg1) + { +#if USE(JSVALUE64) + setupArguments(arg1.gpr()); +#else + setupArguments(arg1.payloadGPR(), arg1.tagGPR()); +#endif + } + void setupResults(GPRReg destA, GPRReg destB) { GPRReg srcA = GPRInfo::returnValueGPR; @@ -1570,18 +2391,218 @@ public: swap(destA, destB); } + void setupResults(JSValueRegs regs) + { +#if USE(JSVALUE64) + move(GPRInfo::returnValueGPR, regs.gpr()); +#else + setupResults(regs.payloadGPR(), regs.tagGPR()); +#endif + } + void jumpToExceptionHandler() { - // genericUnwind() leaves the handler CallFrame* in vm->callFrameForThrow, + // genericUnwind() leaves the handler CallFrame* in vm->callFrameForCatch, // and the address of the handler in vm->targetMachinePCForThrow. loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1); jump(GPRInfo::regT1); } + + void prepareForTailCallSlow(GPRReg calleeGPR = InvalidGPRReg) + { + GPRReg temp1 = calleeGPR == GPRInfo::regT0 ? GPRInfo::regT3 : GPRInfo::regT0; + GPRReg temp2 = calleeGPR == GPRInfo::regT1 ? GPRInfo::regT3 : GPRInfo::regT1; + GPRReg temp3 = calleeGPR == GPRInfo::regT2 ? GPRInfo::regT3 : GPRInfo::regT2; + + GPRReg newFramePointer = temp1; + GPRReg newFrameSizeGPR = temp2; + { + // The old frame size is its number of arguments (or number of + // parameters in case of arity fixup), plus the frame header size, + // aligned + GPRReg oldFrameSizeGPR = temp2; + { + GPRReg argCountGPR = oldFrameSizeGPR; + load32(Address(framePointerRegister, CallFrameSlot::argumentCount * static_cast(sizeof(Register)) + PayloadOffset), argCountGPR); + + { + GPRReg numParametersGPR = temp1; + { + GPRReg codeBlockGPR = numParametersGPR; + loadPtr(Address(framePointerRegister, CallFrameSlot::codeBlock * static_cast(sizeof(Register))), codeBlockGPR); + load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR); + } + + ASSERT(numParametersGPR != argCountGPR); + Jump argumentCountWasNotFixedUp = branch32(BelowOrEqual, numParametersGPR, argCountGPR); + move(numParametersGPR, argCountGPR); + argumentCountWasNotFixedUp.link(this); + } + + add32(TrustedImm32(stackAlignmentRegisters() + CallFrame::headerSizeInRegisters - 1), argCountGPR, oldFrameSizeGPR); + and32(TrustedImm32(-stackAlignmentRegisters()), oldFrameSizeGPR); + // We assume < 2^28 arguments + mul32(TrustedImm32(sizeof(Register)), oldFrameSizeGPR, oldFrameSizeGPR); + } + + // The new frame pointer is at framePointer + oldFrameSize - newFrameSize + ASSERT(newFramePointer != oldFrameSizeGPR); + addPtr(framePointerRegister, oldFrameSizeGPR, newFramePointer); + + // The new frame size is just the number of arguments plus the + // frame header size, aligned + ASSERT(newFrameSizeGPR != newFramePointer); + load32(Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)), + newFrameSizeGPR); + add32(TrustedImm32(stackAlignmentRegisters() + CallFrame::headerSizeInRegisters - 1), newFrameSizeGPR); + and32(TrustedImm32(-stackAlignmentRegisters()), newFrameSizeGPR); + // We assume < 2^28 arguments + mul32(TrustedImm32(sizeof(Register)), newFrameSizeGPR, newFrameSizeGPR); + } + + GPRReg tempGPR = temp3; + ASSERT(tempGPR != newFramePointer && tempGPR != newFrameSizeGPR); + + // We don't need the current frame beyond this point. Masquerade as our + // caller. +#if CPU(ARM) || CPU(ARM64) + loadPtr(Address(framePointerRegister, sizeof(void*)), linkRegister); + subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR); +#elif CPU(MIPS) + loadPtr(Address(framePointerRegister, sizeof(void*)), returnAddressRegister); + subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR); +#elif CPU(X86) || CPU(X86_64) + loadPtr(Address(framePointerRegister, sizeof(void*)), tempGPR); + push(tempGPR); + subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR); +#else + UNREACHABLE_FOR_PLATFORM(); +#endif + subPtr(newFrameSizeGPR, newFramePointer); + loadPtr(Address(framePointerRegister), framePointerRegister); + + + // We need to move the newFrameSizeGPR slots above the stack pointer by + // newFramePointer registers. We use pointer-sized chunks. + MacroAssembler::Label copyLoop(label()); + + subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR); + loadPtr(BaseIndex(stackPointerRegister, newFrameSizeGPR, TimesOne), tempGPR); + storePtr(tempGPR, BaseIndex(newFramePointer, newFrameSizeGPR, TimesOne)); + + branchTest32(MacroAssembler::NonZero, newFrameSizeGPR).linkTo(copyLoop, this); + + // Ready for a jump! + move(newFramePointer, stackPointerRegister); + } + +#if NUMBER_OF_ARGUMENT_REGISTERS >= 4 + template + void setupStubArgsGPR(std::array destinations, std::array sources) + { + if (!ASSERT_DISABLED) { + RegisterSet set; + for (GPRReg dest : destinations) + set.set(dest); + ASSERT_WITH_MESSAGE(set.numberOfSetGPRs() == NumberOfRegisters, "Destinations should not be aliased."); + } + + typedef std::pair RegPair; + Vector pairs; + + for (unsigned i = 0; i < NumberOfRegisters; ++i) { + if (sources[i] != destinations[i]) + pairs.append(std::make_pair(sources[i], destinations[i])); + } + +#if !ASSERT_DISABLED + auto numUniqueSources = [&] () -> unsigned { + RegisterSet set; + for (auto& pair : pairs) { + GPRReg source = pair.first; + set.set(source); + } + return set.numberOfSetGPRs(); + }; + + auto numUniqueDests = [&] () -> unsigned { + RegisterSet set; + for (auto& pair : pairs) { + GPRReg dest = pair.second; + set.set(dest); + } + return set.numberOfSetGPRs(); + }; +#endif + + while (pairs.size()) { + RegisterSet freeDestinations; + for (auto& pair : pairs) { + GPRReg dest = pair.second; + freeDestinations.set(dest); + } + for (auto& pair : pairs) { + GPRReg source = pair.first; + freeDestinations.clear(source); + } + + if (freeDestinations.numberOfSetGPRs()) { + bool madeMove = false; + for (unsigned i = 0; i < pairs.size(); i++) { + auto& pair = pairs[i]; + GPRReg source = pair.first; + GPRReg dest = pair.second; + if (freeDestinations.get(dest)) { + move(source, dest); + pairs.remove(i); + madeMove = true; + break; + } + } + ASSERT_UNUSED(madeMove, madeMove); + continue; + } + + ASSERT(numUniqueDests() == numUniqueSources()); + ASSERT(numUniqueDests() == pairs.size()); + // The set of source and destination registers are equivalent sets. This means we don't have + // any free destination registers that won't also clobber a source. We get around this by + // exchanging registers. + + GPRReg source = pairs[0].first; + GPRReg dest = pairs[0].second; + swap(source, dest); + pairs.remove(0); + + GPRReg newSource = source; + for (auto& pair : pairs) { + GPRReg source = pair.first; + if (source == dest) { + pair.first = newSource; + break; + } + } + + // We may have introduced pairs that have the same source and destination. Remove those now. + for (unsigned i = 0; i < pairs.size(); i++) { + auto& pair = pairs[i]; + if (pair.first == pair.second) { + pairs.remove(i); + i--; + } + } + } + } +#endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4 + + // These operations clobber all volatile registers. They assume that there is room on the top of + // stack to marshall call arguments. + void logShadowChickenProloguePacket(GPRReg shadowPacket, GPRReg scratch1, GPRReg scope); + void logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlock*, CallSiteIndex); + // Leaves behind a pointer to the Packet we should write to in shadowPacket. + void ensureShadowChickenPacket(GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2); }; } // namespace JSC #endif // ENABLE(JIT) - -#endif // CCallHelpers_h - diff --git a/Source/JavaScriptCore/jit/CachedRecovery.cpp b/Source/JavaScriptCore/jit/CachedRecovery.cpp new file mode 100644 index 000000000..f4aacc6c8 --- /dev/null +++ b/Source/JavaScriptCore/jit/CachedRecovery.cpp @@ -0,0 +1,71 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CachedRecovery.h" + +#if ENABLE(JIT) + +namespace JSC { + +// We prefer loading doubles and undetermined JSValues into FPRs +// because it would otherwise use up GPRs. Two in JSVALUE32_64. +bool CachedRecovery::loadsIntoFPR() const +{ + switch (recovery().technique()) { + case DoubleDisplacedInJSStack: + case DisplacedInJSStack: +#if USE(JSVALUE64) + case CellDisplacedInJSStack: +#endif + return true; + + default: + return false; + } +} + +// Integers, booleans and cells can be loaded into GPRs +bool CachedRecovery::loadsIntoGPR() const +{ + switch (recovery().technique()) { + case Int32DisplacedInJSStack: +#if USE(JSVALUE64) + case Int52DisplacedInJSStack: + case StrictInt52DisplacedInJSStack: + case DisplacedInJSStack: +#endif + case BooleanDisplacedInJSStack: + case CellDisplacedInJSStack: + return true; + + default: + return false; + } +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CachedRecovery.h b/Source/JavaScriptCore/jit/CachedRecovery.h new file mode 100644 index 000000000..f627ac901 --- /dev/null +++ b/Source/JavaScriptCore/jit/CachedRecovery.h @@ -0,0 +1,134 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#if ENABLE(JIT) + +#include "ValueRecovery.h" +#include "VirtualRegister.h" +#include + +namespace JSC { + +// A CachedRecovery is a wrapper around a ValueRecovery that records where said +// value should go on the stack and/or in registers. Whenever we perform an +// operation changing the ValueRecovery, we update the CachedRecovery's member +// in place. +class CachedRecovery { +public: + CachedRecovery(ValueRecovery recovery) + : m_recovery { recovery } + { + } + + CachedRecovery(CachedRecovery&) = delete; + CachedRecovery(CachedRecovery&&) = delete; + CachedRecovery& operator=(CachedRecovery&) = delete; + CachedRecovery& operator=(CachedRecovery&&) = delete; + + const Vector& targets() const { return m_targets; } + + void addTarget(VirtualRegister reg) + { + ASSERT(m_targets.isEmpty() || m_targets.last() < reg); + m_targets.append(reg); + } + + void removeTarget(VirtualRegister reg) + { + ASSERT_UNUSED(reg, m_targets.last() == reg); + m_targets.shrink(m_targets.size() - 1); + } + + void clearTargets() + { + m_targets.clear(); + } + + void setWantedJSValueRegs(JSValueRegs jsValueRegs) + { + ASSERT(m_wantedFPR == InvalidFPRReg); + m_wantedJSValueRegs = jsValueRegs; + } + + void setWantedFPR(FPRReg fpr) + { + ASSERT(!m_wantedJSValueRegs); + m_wantedFPR = fpr; + } + + // Determine whether converting this recovery into a JSValue will + // require additional GPRs and/or FPRs. + // This is guaranteed to only depend on the DataFormat, and the + // result of these calls will stay valid after loads and/or stores. + bool boxingRequiresGPR() const + { +#if USE(JSVALUE64) + return recovery().dataFormat() == DataFormatDouble; +#else + return false; +#endif + } + bool boxingRequiresFPR() const + { +#if USE(JSVALUE64) + switch (recovery().dataFormat()) { + case DataFormatInt52: + case DataFormatStrictInt52: + return true; + + default: + return false; + } +#else + return false; +#endif + } + + // This is used to determine what kind of register we need to be + // able to load a recovery. We only use it when a direct load is + // currently impossible, to determine whether we should spill a + // GPR or an FPR for loading this value. + bool loadsIntoGPR() const; + bool loadsIntoFPR() const; + + ValueRecovery recovery() const { return m_recovery; } + + void setRecovery(ValueRecovery recovery) { m_recovery = recovery; } + + JSValueRegs wantedJSValueRegs() const { return m_wantedJSValueRegs; } + + FPRReg wantedFPR() const { return m_wantedFPR; } +private: + ValueRecovery m_recovery; + JSValueRegs m_wantedJSValueRegs; + FPRReg m_wantedFPR { InvalidFPRReg }; + Vector m_targets; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp new file mode 100644 index 000000000..567202c15 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp @@ -0,0 +1,68 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffleData.h" + +#if ENABLE(JIT) + +#include "CCallHelpers.h" +#include "CodeBlock.h" + +namespace JSC { + +#if USE(JSVALUE64) + +void CallFrameShuffleData::setupCalleeSaveRegisters(CodeBlock* codeBlock) +{ + RegisterSet calleeSaveRegisters { RegisterSet::vmCalleeSaveRegisters() }; + RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters(); + + for (size_t i = 0; i < registerSaveLocations->size(); ++i) { + RegisterAtOffset entry { registerSaveLocations->at(i) }; + if (!calleeSaveRegisters.get(entry.reg())) + continue; + + VirtualRegister saveSlot { entry.offsetAsIndex() }; + registers[entry.reg()] + = ValueRecovery::displacedInJSStack(saveSlot, DataFormatJS); + } + + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (!calleeSaveRegisters.get(reg)) + continue; + + if (registers[reg]) + continue; + + registers[reg] = ValueRecovery::inRegister(reg, DataFormatJS); + } +} + +#endif // USE(JSVALUE64) + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.h b/Source/JavaScriptCore/jit/CallFrameShuffleData.h new file mode 100644 index 000000000..7e3ad5f52 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.h @@ -0,0 +1,52 @@ +/* + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#if ENABLE(JIT) + +#include "RegisterMap.h" +#include "ValueRecovery.h" + +namespace JSC { + +struct CallFrameShuffleData { + WTF_MAKE_FAST_ALLOCATED; +public: + unsigned numLocals { UINT_MAX }; + ValueRecovery callee; + Vector args; + unsigned numPassedArgs { UINT_MAX }; +#if USE(JSVALUE64) + RegisterMap registers; + GPRReg tagTypeNumber { InvalidGPRReg }; + + void setupCalleeSaveRegisters(CodeBlock*); +#endif +}; + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp new file mode 100644 index 000000000..ffbc7e6b0 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp @@ -0,0 +1,776 @@ +/* + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffler.h" + +#if ENABLE(JIT) + +#include "CachedRecovery.h" +#include "CCallHelpers.h" +#include "CodeBlock.h" + +namespace JSC { + +CallFrameShuffler::CallFrameShuffler(CCallHelpers& jit, const CallFrameShuffleData& data) + : m_jit(jit) + , m_oldFrame(data.numLocals + CallerFrameAndPC::sizeInRegisters, nullptr) + , m_newFrame(data.args.size() + CallFrame::headerSizeInRegisters, nullptr) + , m_alignedOldFrameSize(CallFrame::headerSizeInRegisters + + roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters())) + , m_alignedNewFrameSize(CallFrame::headerSizeInRegisters + + roundArgumentCountToAlignFrame(data.args.size())) + , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize) + , m_lockedRegisters(RegisterSet::allRegisters()) + , m_numPassedArgs(data.numPassedArgs) +{ + // We are allowed all the usual registers... + for (unsigned i = GPRInfo::numberOfRegisters; i--; ) + m_lockedRegisters.clear(GPRInfo::toRegister(i)); + for (unsigned i = FPRInfo::numberOfRegisters; i--; ) + m_lockedRegisters.clear(FPRInfo::toRegister(i)); + // ... as well as the runtime registers. + m_lockedRegisters.exclude(RegisterSet::vmCalleeSaveRegisters()); + + ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal()); + addNew(VirtualRegister(CallFrameSlot::callee), data.callee); + + for (size_t i = 0; i < data.args.size(); ++i) { + ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal()); + addNew(virtualRegisterForArgument(i), data.args[i]); + } + +#if USE(JSVALUE64) + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (!data.registers[reg].isSet()) + continue; + + if (reg.isGPR()) + addNew(JSValueRegs(reg.gpr()), data.registers[reg]); + else + addNew(reg.fpr(), data.registers[reg]); + } + + m_tagTypeNumber = data.tagTypeNumber; + if (m_tagTypeNumber != InvalidGPRReg) + lockGPR(m_tagTypeNumber); +#endif +} + +void CallFrameShuffler::dump(PrintStream& out) const +{ + static const char* delimiter = " +-------------------------------+ "; + static const char* dangerDelimiter = " X-------------------------------X "; + static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX "; + static const char* emptySpace = " "; + out.print(" "); + out.print(" Old frame "); + out.print(" New frame "); + out.print("\n"); + int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3; + for (int i = 0; i < totalSize; ++i) { + VirtualRegister old { m_alignedOldFrameSize - i - 1 }; + VirtualRegister newReg { old + m_frameDelta }; + + if (!isValidOld(old) && old != firstOld() - 1 + && !isValidNew(newReg) && newReg != firstNew() - 1) + continue; + + out.print(" "); + if (dangerFrontier() >= firstNew() + && (newReg == dangerFrontier() || newReg == firstNew() - 1)) + out.print(dangerBoundsDelimiter); + else if (isValidOld(old)) + out.print(isValidNew(newReg) && isDangerNew(newReg) ? dangerDelimiter : delimiter); + else if (old == firstOld() - 1) + out.print(delimiter); + else + out.print(emptySpace); + if (dangerFrontier() >= firstNew() + && (newReg == dangerFrontier() || newReg == firstNew() - 1)) + out.print(dangerBoundsDelimiter); + else if (isValidNew(newReg) || newReg == firstNew() - 1) + out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter); + else + out.print(emptySpace); + out.print("\n"); + if (old == firstOld()) + out.print(" sp --> "); + else if (!old.offset()) + out.print(" fp --> "); + else + out.print(" "); + if (isValidOld(old)) { + if (getOld(old)) { + auto str = toCString(old); + if (isValidNew(newReg) && isDangerNew(newReg)) + out.printf(" X %18s X ", str.data()); + else + out.printf(" | %18s | ", str.data()); + } else if (isValidNew(newReg) && isDangerNew(newReg)) + out.printf(" X%30s X ", ""); + else + out.printf(" |%30s | ", ""); + } else + out.print(emptySpace); + if (isValidNew(newReg)) { + const char d = isDangerNew(newReg) ? 'X' : '|'; + auto str = toCString(newReg); + if (getNew(newReg)) { + if (getNew(newReg)->recovery().isConstant()) + out.printf(" %c%8s <- constant %c ", d, str.data(), d); + else { + auto recoveryStr = toCString(getNew(newReg)->recovery()); + out.printf(" %c%8s <- %18s %c ", d, str.data(), + recoveryStr.data(), d); + } + } else if (newReg == VirtualRegister { CallFrameSlot::argumentCount }) + out.printf(" %c%8s <- %18zu %c ", d, str.data(), argCount(), d); + else + out.printf(" %c%30s %c ", d, "", d); + } else + out.print(emptySpace); + if (newReg == firstNew() - m_newFrameOffset && !isSlowPath()) + out.print(" <-- new sp before jump (current ", m_newFrameBase, ") "); + if (newReg == firstNew()) + out.print(" <-- new fp after prologue"); + out.print("\n"); + } + out.print(" "); + out.print(" Live registers "); + out.print(" Wanted registers "); + out.print("\n"); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* oldCachedRecovery { m_registers[reg] }; + CachedRecovery* newCachedRecovery { m_newRegisters[reg] }; + if (!oldCachedRecovery && !newCachedRecovery) + continue; + out.print(" "); + if (oldCachedRecovery) { + auto str = toCString(reg); + out.printf(" %8s ", str.data()); + } else + out.print(emptySpace); +#if USE(JSVALUE32_64) + if (newCachedRecovery) { + JSValueRegs wantedJSValueRegs { newCachedRecovery->wantedJSValueRegs() }; + if (reg.isFPR()) + out.print(reg, " <- ", newCachedRecovery->recovery()); + else { + if (reg.gpr() == wantedJSValueRegs.tagGPR()) + out.print(reg.gpr(), " <- tag(", newCachedRecovery->recovery(), ")"); + else + out.print(reg.gpr(), " <- payload(", newCachedRecovery->recovery(), ")"); + } + } +#else + if (newCachedRecovery) + out.print(" ", reg, " <- ", newCachedRecovery->recovery()); +#endif + out.print("\n"); + } + out.print(" Locked registers: "); + bool firstLocked { true }; + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (m_lockedRegisters.get(reg)) { + out.print(firstLocked ? "" : ", ", reg); + firstLocked = false; + } + } + out.print("\n"); + + if (isSlowPath()) + out.print(" Using fp-relative addressing for slow path call\n"); + else + out.print(" Using sp-relative addressing for jump (using ", m_newFrameBase, " as new sp)\n"); + if (m_oldFrameOffset) + out.print(" Old frame offset is ", m_oldFrameOffset, "\n"); + if (m_newFrameOffset) + out.print(" New frame offset is ", m_newFrameOffset, "\n"); +#if USE(JSVALUE64) + if (m_tagTypeNumber != InvalidGPRReg) + out.print(" TagTypeNumber is currently in ", m_tagTypeNumber, "\n"); +#endif +} + +CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery) +{ + ASSERT(!recovery.isConstant()); + if (recovery.isInGPR()) + return m_registers[recovery.gpr()]; + if (recovery.isInFPR()) + return m_registers[recovery.fpr()]; +#if USE(JSVALUE32_64) + if (recovery.technique() == InPair) { + ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]); + return m_registers[recovery.payloadGPR()]; + } +#endif + ASSERT(recovery.isInJSStack()); + return getOld(recovery.virtualRegister()); +} + +CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery) +{ + ASSERT(!recovery.isConstant()); + if (recovery.isInGPR()) + return m_registers[recovery.gpr()] = cachedRecovery; + if (recovery.isInFPR()) + return m_registers[recovery.fpr()] = cachedRecovery; +#if USE(JSVALUE32_64) + if (recovery.technique() == InPair) { + m_registers[recovery.tagGPR()] = cachedRecovery; + return m_registers[recovery.payloadGPR()] = cachedRecovery; + } +#endif + ASSERT(recovery.isInJSStack()); + setOld(recovery.virtualRegister(), cachedRecovery); + return cachedRecovery; +} + +void CallFrameShuffler::spill(CachedRecovery& cachedRecovery) +{ + ASSERT(!isSlowPath()); + ASSERT(cachedRecovery.recovery().isInRegisters()); + + VirtualRegister spillSlot { 0 }; + for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) { + if (slot >= newAsOld(firstNew())) + break; + + if (getOld(slot)) + continue; + + spillSlot = slot; + break; + } + // We must have enough slots to be able to fit the whole callee's + // frame for the slow path - unless we are in the FTL. In that + // case, we are allowed to extend the frame *once*, since we are + // guaranteed to have enough available space for that. + if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) { + RELEASE_ASSERT(!m_didExtendFrame); + extendFrameIfNeeded(); + spill(cachedRecovery); + return; + } + + if (verbose) + dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n"); + auto format = emitStore(cachedRecovery, addressForOld(spillSlot)); + ASSERT(format != DataFormatNone); + updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format)); +} + +void CallFrameShuffler::emitDeltaCheck() +{ + if (ASSERT_DISABLED) + return; + + GPRReg scratchGPR { getFreeGPR() }; + if (scratchGPR != InvalidGPRReg) { + if (verbose) + dataLog(" Using ", scratchGPR, " for the fp-sp delta check\n"); + m_jit.move(MacroAssembler::stackPointerRegister, scratchGPR); + m_jit.subPtr(GPRInfo::callFrameRegister, scratchGPR); + MacroAssembler::Jump ok = m_jit.branch32( + MacroAssembler::Equal, scratchGPR, + MacroAssembler::TrustedImm32(-numLocals() * sizeof(Register))); + m_jit.abortWithReason(JITUnexpectedCallFrameSize); + ok.link(&m_jit); + } else if (verbose) + dataLog(" Skipping the fp-sp delta check since there is too much pressure"); +} + +void CallFrameShuffler::extendFrameIfNeeded() +{ + ASSERT(!m_didExtendFrame); + + VirtualRegister firstRead { firstOld() }; + for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) { + if (getOld(firstRead)) + break; + } + size_t availableSize = static_cast(firstRead.offset() - firstOld().offset()); + size_t wantedSize = m_newFrame.size() + m_newFrameOffset; + + if (availableSize < wantedSize) { + size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize); + m_oldFrame.grow(m_oldFrame.size() + delta); + for (size_t i = 0; i < delta; ++i) + m_oldFrame[m_oldFrame.size() - i - 1] = nullptr; + m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister); + + if (isSlowPath()) + m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters; + else + m_oldFrameOffset = numLocals(); + + if (verbose) + dataLogF(" Not enough space - extending the old frame %zu slot\n", delta); + } + + m_didExtendFrame = true; +} + +void CallFrameShuffler::prepareForSlowPath() +{ + ASSERT(isUndecided()); + emitDeltaCheck(); + + m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters; + m_newFrameBase = MacroAssembler::stackPointerRegister; + m_newFrameOffset = -CallerFrameAndPC::sizeInRegisters; + + if (verbose) + dataLog("\n\nPreparing frame for slow path call:\n"); + + // When coming from the FTL, we need to extend the frame. In other + // cases, we may end up extending the frame if we previously + // spilled things (e.g. in polymorphic cache). + extendFrameIfNeeded(); + + if (verbose) + dataLog(*this); + + prepareAny(); + + if (verbose) + dataLog("Ready for slow path call!\n"); +} + +void CallFrameShuffler::prepareForTailCall() +{ + ASSERT(isUndecided()); + emitDeltaCheck(); + + // We'll use sp-based indexing so that we can load the + // caller's frame pointer into the fpr immediately + m_oldFrameBase = MacroAssembler::stackPointerRegister; + m_oldFrameOffset = numLocals(); + m_newFrameBase = acquireGPR(); +#if CPU(X86) + // We load the frame pointer manually, but we need to ask the + // algorithm to move the return PC for us (it'd probably + // require a write to the danger zone). Since it'd be awkward + // to ask for half a value move, we ask that the whole thing + // be moved for us. + addNew(VirtualRegister { 0 }, + ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS)); + + // sp will point to head0 and we will move it up half a slot + // manually + m_newFrameOffset = 0; +#elif CPU(ARM) || CPU(MIPS) + // We load the the frame pointer and link register + // manually. We could ask the algorithm to load them for us, + // and it would allow us to use the link register as an extra + // temporary - but it'd mean that the frame pointer can also + // be used as an extra temporary, so we keep the link register + // locked instead. + + // sp will point to head1 since the callee's prologue pushes + // the call frame and link register. + m_newFrameOffset = -1; +#elif CPU(ARM64) + // We load the frame pointer and link register manually. We + // could ask the algorithm to load the link register for us + // (which would allow for its use as an extra temporary), but + // since its not in GPRInfo, we can't do it. + + // sp will point to head2 since the callee's prologue pushes the + // call frame and link register + m_newFrameOffset = -2; +#elif CPU(X86_64) + // We load the frame pointer manually, but we ask the + // algorithm to move the return PC for us (it'd probably + // require a write in the danger zone) + addNew(VirtualRegister { 1 }, + ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS)); + + // sp will point to head1 since the callee's prologue pushes + // the call frame register + m_newFrameOffset = -1; +#else + UNREACHABLE_FOR_PLATFORM(); +#endif + + if (verbose) + dataLog(" Emitting code for computing the new frame base\n"); + + // We compute the new frame base by first computing the top of the + // old frame (taking into account an argument count higher than + // the number of parameters), then substracting to it the aligned + // new frame size (adjusted). + m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, CallFrameSlot::argumentCount * static_cast(sizeof(Register)) + PayloadOffset), m_newFrameBase); + MacroAssembler::Jump argumentCountOK = + m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase, + MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters())); + m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + CallFrame::headerSizeInRegisters), m_newFrameBase); + m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase); + m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase); + MacroAssembler::Jump done = m_jit.jump(); + argumentCountOK.link(&m_jit); + m_jit.move( + MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)), + m_newFrameBase); + done.link(&m_jit); + + m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase); + m_jit.subPtr( + MacroAssembler::TrustedImm32( + (m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)), + m_newFrameBase); + + // We load the link register manually for architectures that have one +#if CPU(ARM) || CPU(ARM64) + m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)), + MacroAssembler::linkRegister); +#elif CPU(MIPS) + m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)), + MacroAssembler::returnAddressRegister); +#endif + + // We want the frame pointer to always point to a valid frame, and + // we are going to trash the current one. Let's make it point to + // our caller's frame, since that's what we want to end up with. + m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister), + MacroAssembler::framePointerRegister); + + if (verbose) + dataLog("Preparing frame for tail call:\n", *this); + + prepareAny(); + +#if CPU(X86) + if (verbose) + dataLog(" Simulating pop of the call frame register\n"); + m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister); +#endif + + if (verbose) + dataLog("Ready for tail call!\n"); +} + +bool CallFrameShuffler::tryWrites(CachedRecovery& cachedRecovery) +{ + ASSERT(m_newFrameBase != InvalidGPRReg); + + // If the value is already set up correctly, we don't have + // anything to do. + if (isSlowPath() && cachedRecovery.recovery().isInJSStack() + && cachedRecovery.targets().size() == 1 + && newAsOld(cachedRecovery.targets()[0]) == cachedRecovery.recovery().virtualRegister()) { + cachedRecovery.clearTargets(); + if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg) + clearCachedRecovery(cachedRecovery.recovery()); + return true; + } + + if (!canLoadAndBox(cachedRecovery)) + return false; + + emitLoad(cachedRecovery); + emitBox(cachedRecovery); + ASSERT(cachedRecovery.recovery().isInRegisters() + || cachedRecovery.recovery().isConstant()); + + if (verbose) + dataLog(" * Storing ", cachedRecovery.recovery()); + for (size_t i = 0; i < cachedRecovery.targets().size(); ++i) { + VirtualRegister target { cachedRecovery.targets()[i] }; + ASSERT(!isDangerNew(target)); + if (verbose) + dataLog(!i ? " into " : ", and ", "NEW ", target); + emitStore(cachedRecovery, addressForNew(target)); + setNew(target, nullptr); + } + if (verbose) + dataLog("\n"); + cachedRecovery.clearTargets(); + if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg) + clearCachedRecovery(cachedRecovery.recovery()); + + return true; +} + +bool CallFrameShuffler::performSafeWrites() +{ + VirtualRegister firstSafe; + VirtualRegister end { lastNew() + 1 }; + Vector failures; + + // For all cachedRecoveries that writes to the safe zone, if it + // doesn't also write to the danger zone, we try to perform + // the writes. This may free up danger slots, so we iterate + // again until it doesn't happen anymore. + // + // Note that even though we have a while block, we look at + // each slot of the new call frame at most once since in each + // iteration beyond the first, we only load up the portion of + // the new call frame that was dangerous and became safe due + // to the previous iteration. + do { + firstSafe = dangerFrontier() + 1; + if (verbose) + dataLog(" Trying safe writes (between NEW ", firstSafe, " and NEW ", end - 1, ")\n"); + bool didProgress = false; + for (VirtualRegister reg = firstSafe; reg < end; reg += 1) { + CachedRecovery* cachedRecovery = getNew(reg); + if (!cachedRecovery) { + if (verbose) + dataLog(" + ", reg, " is OK.\n"); + continue; + } + if (!hasOnlySafeWrites(*cachedRecovery)) { + if (verbose) { + dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg, + " but also has dangerous writes.\n"); + } + continue; + } + if (cachedRecovery->wantedJSValueRegs()) { + if (verbose) { + dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg, + " but is also needed in registers.\n"); + } + continue; + } + if (cachedRecovery->wantedFPR() != InvalidFPRReg) { + if (verbose) { + dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg, + " but is also needed in an FPR.\n"); + } + continue; + } + if (!tryWrites(*cachedRecovery)) { + if (verbose) + dataLog(" - Unable to write to NEW ", reg, " from ", cachedRecovery->recovery(), "\n"); + failures.append(reg); + } + didProgress = true; + } + end = firstSafe; + + // If we have cachedRecoveries that failed to write, it is + // because they are on the stack and we didn't have enough + // registers available at the time to load them into. If + // we have a free register, we should try again because it + // could free up some danger slots. + if (didProgress && hasFreeRegister()) { + Vector stillFailing; + for (VirtualRegister failed : failures) { + CachedRecovery* cachedRecovery = getNew(failed); + // It could have been handled later if it had + // several targets + if (!cachedRecovery) + continue; + + ASSERT(hasOnlySafeWrites(*cachedRecovery) + && !cachedRecovery->wantedJSValueRegs() + && cachedRecovery->wantedFPR() == InvalidFPRReg); + if (!tryWrites(*cachedRecovery)) + stillFailing.append(failed); + } + failures = WTFMove(stillFailing); + } + if (verbose && firstSafe != dangerFrontier() + 1) + dataLog(" We freed up danger slots!\n"); + } while (firstSafe != dangerFrontier() + 1); + + return failures.isEmpty(); +} + +void CallFrameShuffler::prepareAny() +{ + ASSERT(!isUndecided()); + + updateDangerFrontier(); + + // First, we try to store any value that goes above the danger + // frontier. This will never use more registers since we are only + // loading+storing if we ensure that any register used for the load + // will be freed up after the stores (i.e., all stores are above + // the danger frontier, and there is no wanted register). + performSafeWrites(); + + // At this point, we couldn't have more available registers than + // we have withouth spilling: all values currently in registers + // either require a write to the danger zone, or have a wanted + // register, which means that in any case they will have to go + // through registers again. + + // We now slowly free up the danger zone by first loading the old + // value on the danger frontier, spilling as many registers as + // needed to do so and ensuring that the corresponding slot in the + // new frame is now ready to be written. Then, we store the old + // value to its target location if possible (we could have failed + // to load it previously due to high pressure). Finally, we write + // to any of the newly safe slots that we can, which could free up + // registers (hence why we do it eagerly). + for (VirtualRegister reg = dangerFrontier(); reg >= firstNew(); reg -= 1) { + if (reg == dangerFrontier()) { + if (verbose) + dataLog(" Next slot (NEW ", reg, ") is the danger frontier\n"); + CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) }; + ASSERT(cachedRecovery); + ensureLoad(*cachedRecovery); + emitLoad(*cachedRecovery); + ensureBox(*cachedRecovery); + emitBox(*cachedRecovery); + if (hasOnlySafeWrites(*cachedRecovery)) + tryWrites(*cachedRecovery); + } else if (verbose) + dataLog(" Next slot is NEW ", reg, "\n"); + + ASSERT(!isDangerNew(reg)); + CachedRecovery* cachedRecovery = getNew(reg); + // This could be one of the header slots we don't care about. + if (!cachedRecovery) { + if (verbose) + dataLog(" + ", reg, " is OK\n"); + continue; + } + + if (canLoadAndBox(*cachedRecovery) && hasOnlySafeWrites(*cachedRecovery) + && !cachedRecovery->wantedJSValueRegs() + && cachedRecovery->wantedFPR() == InvalidFPRReg) { + emitLoad(*cachedRecovery); + emitBox(*cachedRecovery); + bool writesOK = tryWrites(*cachedRecovery); + ASSERT_UNUSED(writesOK, writesOK); + } else if (verbose) + dataLog(" - ", cachedRecovery->recovery(), " can't be handled just yet.\n"); + } + ASSERT(dangerFrontier() < firstNew()); + + // Now, the danger zone is empty, but we still have a couple of + // things to do: + // + // 1) There could be remaining safe writes that failed earlier due + // to high register pressure and had nothing to do with the + // danger zone whatsoever. + // + // 2) Some wanted registers could have to be loaded (this could + // happen either when making a call to a new function with a + // lower number of arguments - since above here, we only load + // wanted registers when they are at the danger frontier -, or + // if a wanted register got spilled). + // + // 3) Some wanted registers could have been loaded in the wrong + // registers + // + // 4) We have to take care of some bookkeeping - namely, storing + // the argument count and updating the stack pointer. + + // At this point, we must have enough registers available for + // handling 1). None of the loads can fail because we have been + // eagerly freeing up registers in all the previous phases - so + // the only values that are in registers at this point must have + // wanted registers. + if (verbose) + dataLog(" Danger zone is clear, performing remaining writes.\n"); + for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) { + CachedRecovery* cachedRecovery { getNew(reg) }; + if (!cachedRecovery) + continue; + + emitLoad(*cachedRecovery); + emitBox(*cachedRecovery); + bool writesOK = tryWrites(*cachedRecovery); + ASSERT_UNUSED(writesOK, writesOK); + } + +#if USE(JSVALUE64) + if (m_tagTypeNumber != InvalidGPRReg && m_newRegisters[m_tagTypeNumber]) + releaseGPR(m_tagTypeNumber); +#endif + + // Handle 2) by loading all registers. We don't have to do any + // writes, since they have been taken care of above. + if (verbose) + dataLog(" Loading wanted registers into registers\n"); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + + emitLoad(*cachedRecovery); + emitBox(*cachedRecovery); + ASSERT(cachedRecovery->targets().isEmpty()); + } + +#if USE(JSVALUE64) + if (m_tagTypeNumber != InvalidGPRReg) + releaseGPR(m_tagTypeNumber); +#endif + + // At this point, we have read everything we cared about from the + // stack, and written everything we had to to the stack. + if (verbose) + dataLog(" Callee frame is fully set up\n"); + if (!ASSERT_DISABLED) { + for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) + ASSERT_UNUSED(reg, !getNew(reg)); + + for (CachedRecovery* cachedRecovery : m_cachedRecoveries) { + ASSERT_UNUSED(cachedRecovery, cachedRecovery->targets().isEmpty()); + ASSERT(!cachedRecovery->recovery().isInJSStack()); + } + } + + // We need to handle 4) first because it implies releasing + // m_newFrameBase, which could be a wanted register. + if (verbose) + dataLog(" * Storing the argument count into ", VirtualRegister { CallFrameSlot::argumentCount }, "\n"); + m_jit.store32(MacroAssembler::TrustedImm32(0), + addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(TagOffset)); + RELEASE_ASSERT(m_numPassedArgs != UINT_MAX); + m_jit.store32(MacroAssembler::TrustedImm32(m_numPassedArgs), + addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(PayloadOffset)); + + if (!isSlowPath()) { + ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister); + if (verbose) + dataLog(" Releasing the new frame base pointer\n"); + m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister); + releaseGPR(m_newFrameBase); + } + + // Finally we handle 3) + if (verbose) + dataLog(" Ensuring wanted registers are in the right register\n"); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + + emitDisplace(*cachedRecovery); + } +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.h b/Source/JavaScriptCore/jit/CallFrameShuffler.h new file mode 100644 index 000000000..6c0ea33f0 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler.h @@ -0,0 +1,804 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#if ENABLE(JIT) + +#include "CachedRecovery.h" +#include "CallFrameShuffleData.h" +#include "MacroAssembler.h" +#include "RegisterSet.h" +#include "StackAlignment.h" +#include + +namespace JSC { + +class CallFrameShuffler { + WTF_MAKE_FAST_ALLOCATED; +public: + CallFrameShuffler(CCallHelpers&, const CallFrameShuffleData&); + + void dump(PrintStream&) const; + + // Any register that has been locked or acquired must be released + // before calling prepareForTailCall() or prepareForSlowPath(). + void lockGPR(GPRReg gpr) + { + ASSERT(!m_lockedRegisters.get(gpr)); + m_lockedRegisters.set(gpr); + if (verbose) + dataLog(" * Locking ", gpr, "\n"); + } + + GPRReg acquireGPR() + { + ensureGPR(); + GPRReg gpr { getFreeGPR() }; + ASSERT(!m_registers[gpr]); + lockGPR(gpr); + return gpr; + } + + void releaseGPR(GPRReg gpr) + { + if (verbose) { + if (m_lockedRegisters.get(gpr)) + dataLog(" * Releasing ", gpr, "\n"); + else + dataLog(" * ", gpr, " was not locked\n"); + } + m_lockedRegisters.clear(gpr); + } + + void restoreGPR(GPRReg gpr) + { + if (!m_newRegisters[gpr]) + return; + + ensureGPR(); +#if USE(JSVALUE32_64) + GPRReg tempGPR { getFreeGPR() }; + lockGPR(tempGPR); + ensureGPR(); + releaseGPR(tempGPR); +#endif + emitDisplace(*m_newRegisters[gpr]); + } + + // You can only take a snapshot if the recovery has not started + // yet. The only operations that are valid before taking a + // snapshot are lockGPR(), acquireGPR() and releaseGPR(). + // + // Locking status is *NOT* preserved by the snapshot: it only + // contains information about where the + // arguments/callee/callee-save registers are by taking into + // account any spilling that acquireGPR() could have done. + CallFrameShuffleData snapshot() const + { + ASSERT(isUndecided()); + + CallFrameShuffleData data; + data.numLocals = numLocals(); + data.numPassedArgs = m_numPassedArgs; + data.callee = getNew(VirtualRegister { CallFrameSlot::callee })->recovery(); + data.args.resize(argCount()); + for (size_t i = 0; i < argCount(); ++i) + data.args[i] = getNew(virtualRegisterForArgument(i))->recovery(); + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + +#if USE(JSVALUE64) + data.registers[reg] = cachedRecovery->recovery(); +#else + RELEASE_ASSERT_NOT_REACHED(); +#endif + } + return data; + } + + // Ask the shuffler to put the callee into some registers once the + // shuffling is done. You should call this before any of the + // prepare() methods, and must not take a snapshot afterwards, as + // this would crash 32bits platforms. + void setCalleeJSValueRegs(JSValueRegs jsValueRegs) + { + ASSERT(isUndecided()); + ASSERT(!getNew(jsValueRegs)); + CachedRecovery* cachedRecovery { getNew(VirtualRegister(CallFrameSlot::callee)) }; + ASSERT(cachedRecovery); + addNew(jsValueRegs, cachedRecovery->recovery()); + } + + // Ask the suhffler to assume the callee has already be checked to + // be a cell. This is a no-op on 64bit platforms, but allows to + // free up a GPR on 32bit platforms. + // You obviously must have ensured that this is the case before + // running any of the prepare methods. + void assumeCalleeIsCell() + { +#if USE(JSVALUE32_64) + CachedRecovery& calleeCachedRecovery = *getNew(VirtualRegister(CallFrameSlot::callee)); + switch (calleeCachedRecovery.recovery().technique()) { + case InPair: + updateRecovery( + calleeCachedRecovery, + ValueRecovery::inGPR( + calleeCachedRecovery.recovery().payloadGPR(), + DataFormatCell)); + break; + case DisplacedInJSStack: + updateRecovery( + calleeCachedRecovery, + ValueRecovery::displacedInJSStack( + calleeCachedRecovery.recovery().virtualRegister(), + DataFormatCell)); + break; + case InFPR: + case UnboxedCellInGPR: + case CellDisplacedInJSStack: + break; + case Constant: + ASSERT(calleeCachedRecovery.recovery().constant().isCell()); + break; + default: + RELEASE_ASSERT_NOT_REACHED(); + break; + } +#endif + } + + // This will emit code to build the new frame over the old one. + void prepareForTailCall(); + + // This will emit code to build the new frame as if performing a + // regular call. However, the callee save registers will be + // restored, and any locals (not the header or arguments) of the + // current frame can be overwritten. + // + // A frame built using prepareForSlowPath() should be used either + // to throw an exception in, or destroyed using + // CCallHelpers::prepareForTailCallSlow() followed by a tail call. + void prepareForSlowPath(); + +private: + static const bool verbose = false; + + CCallHelpers& m_jit; + + void prepareAny(); + + void spill(CachedRecovery&); + + // "box" is arguably a bad name here. The meaning is that after + // calling emitBox(), your ensure that subsequently calling + // emitStore() will be able to store the value without additional + // transformation. In particular, this is a no-op for constants, + // and is a complete no-op on 32bits since any unboxed value can + // still be stored by storing the payload and a statically known + // tag. + void emitBox(CachedRecovery&); + + bool canBox(CachedRecovery& cachedRecovery) + { + if (cachedRecovery.boxingRequiresGPR() && getFreeGPR() == InvalidGPRReg) + return false; + + if (cachedRecovery.boxingRequiresFPR() && getFreeFPR() == InvalidFPRReg) + return false; + + return true; + } + + void ensureBox(CachedRecovery& cachedRecovery) + { + if (canBox(cachedRecovery)) + return; + + if (cachedRecovery.boxingRequiresGPR()) + ensureGPR(); + + if (cachedRecovery.boxingRequiresFPR()) + ensureFPR(); + } + + void emitLoad(CachedRecovery&); + + bool canLoad(CachedRecovery&); + + void ensureLoad(CachedRecovery& cachedRecovery) + { + if (canLoad(cachedRecovery)) + return; + + ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR()); + + if (cachedRecovery.loadsIntoFPR()) { + if (cachedRecovery.loadsIntoGPR()) + ensureRegister(); + else + ensureFPR(); + } else + ensureGPR(); + } + + bool canLoadAndBox(CachedRecovery& cachedRecovery) + { + // We don't have interfering loads & boxes + ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR()); + ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR()); + + return canLoad(cachedRecovery) && canBox(cachedRecovery); + } + + DataFormat emitStore(CachedRecovery&, MacroAssembler::Address); + + void emitDisplace(CachedRecovery&); + + void emitDeltaCheck(); + + Bag m_cachedRecoveries; + + void updateRecovery(CachedRecovery& cachedRecovery, ValueRecovery recovery) + { + clearCachedRecovery(cachedRecovery.recovery()); + cachedRecovery.setRecovery(recovery); + setCachedRecovery(recovery, &cachedRecovery); + } + + CachedRecovery* getCachedRecovery(ValueRecovery); + + CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*); + + void clearCachedRecovery(ValueRecovery recovery) + { + if (!recovery.isConstant()) + setCachedRecovery(recovery, nullptr); + } + + CachedRecovery* addCachedRecovery(ValueRecovery recovery) + { + if (recovery.isConstant()) + return m_cachedRecoveries.add(recovery); + CachedRecovery* cachedRecovery = getCachedRecovery(recovery); + if (!cachedRecovery) + return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery)); + return cachedRecovery; + } + + // This is the current recoveries present in the old frame's + // slots. A null CachedRecovery means we can trash the current + // value as we don't care about it. + Vector m_oldFrame; + + int numLocals() const + { + return m_oldFrame.size() - CallerFrameAndPC::sizeInRegisters; + } + + CachedRecovery* getOld(VirtualRegister reg) const + { + return m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1]; + } + + void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery) + { + m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1] = cachedRecovery; + } + + VirtualRegister firstOld() const + { + return VirtualRegister { static_cast(-numLocals()) }; + } + + VirtualRegister lastOld() const + { + return VirtualRegister { CallerFrameAndPC::sizeInRegisters - 1 }; + } + + bool isValidOld(VirtualRegister reg) const + { + return reg >= firstOld() && reg <= lastOld(); + } + + bool m_didExtendFrame { false }; + + void extendFrameIfNeeded(); + + // This stores, for each slot in the new frame, information about + // the recovery for the value that should eventually go into that + // slot. + // + // Once the slot has been written, the corresponding entry in + // m_newFrame will be empty. + Vector m_newFrame; + + size_t argCount() const + { + return m_newFrame.size() - CallFrame::headerSizeInRegisters; + } + + CachedRecovery* getNew(VirtualRegister newRegister) const + { + return m_newFrame[newRegister.offset()]; + } + + void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery) + { + m_newFrame[newRegister.offset()] = cachedRecovery; + } + + void addNew(VirtualRegister newRegister, ValueRecovery recovery) + { + CachedRecovery* cachedRecovery = addCachedRecovery(recovery); + cachedRecovery->addTarget(newRegister); + setNew(newRegister, cachedRecovery); + } + + VirtualRegister firstNew() const + { + return VirtualRegister { 0 }; + } + + VirtualRegister lastNew() const + { + return VirtualRegister { static_cast(m_newFrame.size()) - 1 }; + } + + bool isValidNew(VirtualRegister reg) const + { + return reg >= firstNew() && reg <= lastNew(); + } + + + int m_alignedOldFrameSize; + int m_alignedNewFrameSize; + + // This is the distance, in slots, between the base of the new + // frame and the base of the old frame. It could be negative when + // preparing for a tail call to a function with smaller argument + // count. + // + // We will overwrite this appropriately for slow path calls, but + // we initialize it as if doing a fast path for the spills we + // could do while undecided (typically while calling acquireGPR() + // for a polymorphic call). + int m_frameDelta; + + VirtualRegister newAsOld(VirtualRegister reg) const + { + return reg - m_frameDelta; + } + + // This stores the set of locked registers, i.e. registers for + // which we have an implicit requirement that they are not changed. + // + // This will usually contains the link register on architectures + // that have one, any scratch register used by the macro assembler + // (e.g. r11 on X86_64), as well as any register that we use for + // addressing (see m_oldFrameBase and m_newFrameBase). + // + // We also use this to lock registers temporarily, for instance to + // ensure that we have at least 2 available registers for loading + // a pair on 32bits. + mutable RegisterSet m_lockedRegisters; + + // This stores the current recoveries present in registers. A null + // CachedRecovery means we can trash the current value as we don't + // care about it. + RegisterMap m_registers; + +#if USE(JSVALUE64) + mutable GPRReg m_tagTypeNumber; + + bool tryAcquireTagTypeNumber(); +#endif + + // This stores, for each register, information about the recovery + // for the value that should eventually go into that register. The + // only registers that have a target recovery will be callee-save + // registers, as well as possibly one JSValueRegs for holding the + // callee. + // + // Once the correct value has been put into the registers, and + // contrary to what we do with m_newFrame, we keep the entry in + // m_newRegisters to simplify spilling. + RegisterMap m_newRegisters; + + template + Reg getFreeRegister(const CheckFunctor& check) const + { + Reg nonTemp { }; + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (m_lockedRegisters.get(reg)) + continue; + + if (!check(reg)) + continue; + + if (!m_registers[reg]) { + if (!m_newRegisters[reg]) + return reg; + if (!nonTemp) + nonTemp = reg; + } + } + +#if USE(JSVALUE64) + if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) { + ASSERT(m_lockedRegisters.get(m_tagTypeNumber)); + m_lockedRegisters.clear(m_tagTypeNumber); + nonTemp = Reg { m_tagTypeNumber }; + m_tagTypeNumber = InvalidGPRReg; + } +#endif + return nonTemp; + } + + GPRReg getFreeTempGPR() const + { + Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) }; + if (!freeTempGPR) + return InvalidGPRReg; + return freeTempGPR.gpr(); + } + + GPRReg getFreeGPR() const + { + Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) }; + if (!freeGPR) + return InvalidGPRReg; + return freeGPR.gpr(); + } + + FPRReg getFreeFPR() const + { + Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) }; + if (!freeFPR) + return InvalidFPRReg; + return freeFPR.fpr(); + } + + bool hasFreeRegister() const + { + return static_cast(getFreeRegister([] (Reg) { return true; })); + } + + // This frees up a register satisfying the check functor (this + // functor could theoretically have any kind of logic, but it must + // ensure that it will only return true for registers - spill + // assumes and asserts that it is passed a cachedRecovery stored in a + // register). + template + void ensureRegister(const CheckFunctor& check) + { + // If we can spill a callee-save, that's best, because it will + // free up a register that would otherwise been taken for the + // longest amount of time. + // + // We could try to bias towards those that are not in their + // target registers yet, but the gain is probably super + // small. Unless you have a huge number of argument (at least + // around twice the number of available registers on your + // architecture), no spilling is going to take place anyways. + for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) { + if (m_lockedRegisters.get(reg)) + continue; + + CachedRecovery* cachedRecovery { m_newRegisters[reg] }; + if (!cachedRecovery) + continue; + + if (check(*cachedRecovery)) { + if (verbose) + dataLog(" ", cachedRecovery->recovery(), " looks like a good spill candidate\n"); + spill(*cachedRecovery); + return; + } + } + + // We use the cachedRecovery associated with the first new slot we + // can, because that is the one for which a write will be + // possible the latest, i.e. that is the one that we would + // have had to retain in registers for the longest. + for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) { + CachedRecovery* cachedRecovery { getNew(reg) }; + if (!cachedRecovery) + continue; + + if (check(*cachedRecovery)) { + spill(*cachedRecovery); + return; + } + } + + RELEASE_ASSERT_NOT_REACHED(); + } + + void ensureRegister() + { + if (hasFreeRegister()) + return; + + if (verbose) + dataLog(" Finding a register to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInGPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().gpr()); + if (cachedRecovery.recovery().isInFPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().fpr()); +#if USE(JSVALUE32_64) + if (cachedRecovery.recovery().technique() == InPair) { + return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR()) + && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR()); + } +#endif + return false; + }); + } + + void ensureTempGPR() + { + if (getFreeTempGPR() != InvalidGPRReg) + return; + + if (verbose) + dataLog(" Finding a temp GPR to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInGPR()) { + return !m_lockedRegisters.get(cachedRecovery.recovery().gpr()) + && !m_newRegisters[cachedRecovery.recovery().gpr()]; + } +#if USE(JSVALUE32_64) + if (cachedRecovery.recovery().technique() == InPair) { + return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR()) + && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR()) + && !m_newRegisters[cachedRecovery.recovery().tagGPR()] + && !m_newRegisters[cachedRecovery.recovery().payloadGPR()]; + } +#endif + return false; + }); + } + + void ensureGPR() + { + if (getFreeGPR() != InvalidGPRReg) + return; + + if (verbose) + dataLog(" Finding a GPR to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInGPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().gpr()); +#if USE(JSVALUE32_64) + if (cachedRecovery.recovery().technique() == InPair) { + return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR()) + && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR()); + } +#endif + return false; + }); + } + + void ensureFPR() + { + if (getFreeFPR() != InvalidFPRReg) + return; + + if (verbose) + dataLog(" Finding an FPR to spill\n"); + ensureRegister( + [this] (const CachedRecovery& cachedRecovery) { + if (cachedRecovery.recovery().isInFPR()) + return !m_lockedRegisters.get(cachedRecovery.recovery().fpr()); + return false; + }); + } + + CachedRecovery* getNew(JSValueRegs jsValueRegs) const + { +#if USE(JSVALUE64) + return m_newRegisters[jsValueRegs.gpr()]; +#else + ASSERT( + jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg + || m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]); + if (jsValueRegs.payloadGPR() == InvalidGPRReg) + return m_newRegisters[jsValueRegs.tagGPR()]; + return m_newRegisters[jsValueRegs.payloadGPR()]; +#endif + } + + void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery) + { + ASSERT(jsValueRegs && !getNew(jsValueRegs)); + CachedRecovery* cachedRecovery = addCachedRecovery(recovery); +#if USE(JSVALUE64) + if (cachedRecovery->wantedJSValueRegs()) + m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr; + m_newRegisters[jsValueRegs.gpr()] = cachedRecovery; +#else + if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) { + if (oldRegs.payloadGPR()) + m_newRegisters[oldRegs.payloadGPR()] = nullptr; + if (oldRegs.tagGPR()) + m_newRegisters[oldRegs.tagGPR()] = nullptr; + } + if (jsValueRegs.payloadGPR() != InvalidGPRReg) + m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery; + if (jsValueRegs.tagGPR() != InvalidGPRReg) + m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery; +#endif + ASSERT(!cachedRecovery->wantedJSValueRegs()); + cachedRecovery->setWantedJSValueRegs(jsValueRegs); + } + + void addNew(FPRReg fpr, ValueRecovery recovery) + { + ASSERT(fpr != InvalidFPRReg && !m_newRegisters[fpr]); + CachedRecovery* cachedRecovery = addCachedRecovery(recovery); + m_newRegisters[fpr] = cachedRecovery; + ASSERT(cachedRecovery->wantedFPR() == InvalidFPRReg); + cachedRecovery->setWantedFPR(fpr); + } + + // m_oldFrameBase is the register relative to which we access + // slots in the old call frame, with an additional offset of + // m_oldFrameOffset. + // + // - For an actual tail call, m_oldFrameBase is the stack + // pointer, and m_oldFrameOffset is the number of locals of the + // tail caller's frame. We use such stack pointer-based + // addressing because it allows us to load the tail caller's + // caller's frame pointer in the frame pointer register + // immediately instead of awkwardly keeping it around on the + // stack. + // + // - For a slow path call, m_oldFrameBase is just the frame + // pointer, and m_oldFrameOffset is 0. + GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister }; + int m_oldFrameOffset { 0 }; + + MacroAssembler::Address addressForOld(VirtualRegister reg) const + { + return MacroAssembler::Address(m_oldFrameBase, + (m_oldFrameOffset + reg.offset()) * sizeof(Register)); + } + + // m_newFrameBase is the register relative to which we access + // slots in the new call frame, and we always make it point to + // wherever the stack pointer will be right before making the + // actual call/jump. The actual base of the new frame is at offset + // m_newFrameOffset relative to m_newFrameBase. + // + // - For an actual tail call, m_newFrameBase is computed + // dynamically, and m_newFrameOffset varies between 0 and -2 + // depending on the architecture's calling convention (see + // prepareForTailCall). + // + // - For a slow path call, m_newFrameBase is the actual stack + // pointer, and m_newFrameOffset is - CallerFrameAndPCSize, + // following the convention for a regular call. + GPRReg m_newFrameBase { InvalidGPRReg }; + int m_newFrameOffset { 0}; + + bool isUndecided() const + { + return m_newFrameBase == InvalidGPRReg; + } + + bool isSlowPath() const + { + return m_newFrameBase == MacroAssembler::stackPointerRegister; + } + + MacroAssembler::Address addressForNew(VirtualRegister reg) const + { + return MacroAssembler::Address(m_newFrameBase, + (m_newFrameOffset + reg.offset()) * sizeof(Register)); + } + + // We use a concept of "danger zone". The danger zone consists of + // all the writes in the new frame that could overlap with reads + // in the old frame. + // + // Because we could have a higher actual number of arguments than + // parameters, when preparing a tail call, we need to assume that + // writing to a slot on the new frame could overlap not only with + // the corresponding slot in the old frame, but also with any slot + // above it. Thus, the danger zone consists of all writes between + // the first write and what I call the "danger frontier": the + // highest slot in the old frame we still care about. Thus, the + // danger zone contains all the slots between the first slot of + // the new frame and the danger frontier. Because the danger + // frontier is related to the new frame, it is stored as a virtual + // register *in the new frame*. + VirtualRegister m_dangerFrontier; + + VirtualRegister dangerFrontier() const + { + ASSERT(!isUndecided()); + + return m_dangerFrontier; + } + + bool isDangerNew(VirtualRegister reg) const + { + ASSERT(!isUndecided() && isValidNew(reg)); + return reg <= dangerFrontier(); + } + + void updateDangerFrontier() + { + ASSERT(!isUndecided()); + + m_dangerFrontier = firstNew() - 1; + for (VirtualRegister reg = lastNew(); reg >= firstNew(); reg -= 1) { + if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg))) + continue; + + m_dangerFrontier = reg; + if (verbose) + dataLog(" Danger frontier now at NEW ", m_dangerFrontier, "\n"); + break; + } + if (verbose) + dataLog(" All clear! Danger zone is empty.\n"); + } + + // A safe write is a write that never writes into the danger zone. + bool hasOnlySafeWrites(CachedRecovery& cachedRecovery) const + { + for (VirtualRegister target : cachedRecovery.targets()) { + if (isDangerNew(target)) + return false; + } + return true; + } + + // You must ensure that there is no dangerous writes before + // calling this function. + bool tryWrites(CachedRecovery&); + + // This function tries to ensure that there is no longer any + // possible safe write, i.e. all remaining writes are either to + // the danger zone or callee save restorations. + // + // It returns false if it was unable to perform some safe writes + // due to high register pressure. + bool performSafeWrites(); + + unsigned m_numPassedArgs { UINT_MAX }; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp new file mode 100644 index 000000000..5dfe96e81 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp @@ -0,0 +1,305 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffler.h" + +#if ENABLE(JIT) && USE(JSVALUE32_64) + +#include "CCallHelpers.h" +#include "DataFormat.h" +#include "JSCInlines.h" + +namespace JSC { + +DataFormat CallFrameShuffler::emitStore(CachedRecovery& location, MacroAssembler::Address address) +{ + ASSERT(!location.recovery().isInJSStack()); + + switch (location.recovery().technique()) { + case UnboxedInt32InGPR: + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::Int32Tag), + address.withOffset(TagOffset)); + m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatInt32; + case UnboxedCellInGPR: + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag), + address.withOffset(TagOffset)); + m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatCell; + case Constant: + m_jit.storeTrustedValue(location.recovery().constant(), address); + return DataFormatJS; + case InPair: + m_jit.storeValue(location.recovery().jsValueRegs(), address); + return DataFormatJS; + case UnboxedBooleanInGPR: + m_jit.store32(MacroAssembler::TrustedImm32(JSValue::BooleanTag), + address.withOffset(TagOffset)); + m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatBoolean; + case InFPR: + case UnboxedDoubleInFPR: + m_jit.storeDouble(location.recovery().fpr(), address); + return DataFormatJS; + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +void CallFrameShuffler::emitBox(CachedRecovery& location) +{ + // Nothing to do, we're good! JSValues and doubles can be stored + // immediately, and other formats don't need any transformation - + // just storing a constant tag separately. + ASSERT_UNUSED(location, canBox(location)); +} + +void CallFrameShuffler::emitLoad(CachedRecovery& location) +{ + if (!location.recovery().isInJSStack()) + return; + + if (verbose) + dataLog(" * Loading ", location.recovery(), " into "); + VirtualRegister reg { location.recovery().virtualRegister() }; + MacroAssembler::Address address { addressForOld(reg) }; + + bool tryFPR { true }; + JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() }; + if (wantedJSValueRegs) { + if (wantedJSValueRegs.payloadGPR() != InvalidGPRReg + && !m_registers[wantedJSValueRegs.payloadGPR()] + && !m_lockedRegisters.get(wantedJSValueRegs.payloadGPR())) + tryFPR = false; + if (wantedJSValueRegs.tagGPR() != InvalidGPRReg + && !m_registers[wantedJSValueRegs.tagGPR()] + && !m_lockedRegisters.get(wantedJSValueRegs.tagGPR())) + tryFPR = false; + } + + if (tryFPR && location.loadsIntoFPR()) { + FPRReg resultFPR = location.wantedFPR(); + if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR)) + resultFPR = getFreeFPR(); + if (resultFPR != InvalidFPRReg) { + m_jit.loadDouble(address, resultFPR); + DataFormat dataFormat = DataFormatJS; + if (location.recovery().dataFormat() == DataFormatDouble) + dataFormat = DataFormatDouble; + updateRecovery(location, + ValueRecovery::inFPR(resultFPR, dataFormat)); + if (verbose) + dataLog(location.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); + return; + } + } + + if (location.loadsIntoGPR()) { + GPRReg resultGPR { wantedJSValueRegs.payloadGPR() }; + if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR)) + resultGPR = getFreeGPR(); + ASSERT(resultGPR != InvalidGPRReg); + m_jit.loadPtr(address.withOffset(PayloadOffset), resultGPR); + updateRecovery(location, + ValueRecovery::inGPR(resultGPR, location.recovery().dataFormat())); + if (verbose) + dataLog(location.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); + return; + } + + ASSERT(location.recovery().technique() == DisplacedInJSStack); + GPRReg payloadGPR { wantedJSValueRegs.payloadGPR() }; + GPRReg tagGPR { wantedJSValueRegs.tagGPR() }; + if (payloadGPR == InvalidGPRReg || m_registers[payloadGPR] || m_lockedRegisters.get(payloadGPR)) + payloadGPR = getFreeGPR(); + m_lockedRegisters.set(payloadGPR); + if (tagGPR == InvalidGPRReg || m_registers[tagGPR] || m_lockedRegisters.get(tagGPR)) + tagGPR = getFreeGPR(); + m_lockedRegisters.clear(payloadGPR); + ASSERT(payloadGPR != InvalidGPRReg && tagGPR != InvalidGPRReg && tagGPR != payloadGPR); + m_jit.loadPtr(address.withOffset(PayloadOffset), payloadGPR); + m_jit.loadPtr(address.withOffset(TagOffset), tagGPR); + updateRecovery(location, + ValueRecovery::inPair(tagGPR, payloadGPR)); + if (verbose) + dataLog(location.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); +} + +bool CallFrameShuffler::canLoad(CachedRecovery& location) +{ + if (!location.recovery().isInJSStack()) + return true; + + if (location.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg) + return true; + + if (location.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg) + return true; + + if (location.recovery().technique() == DisplacedInJSStack) { + GPRReg payloadGPR { getFreeGPR() }; + if (payloadGPR == InvalidGPRReg) + return false; + m_lockedRegisters.set(payloadGPR); + GPRReg tagGPR { getFreeGPR() }; + m_lockedRegisters.clear(payloadGPR); + return tagGPR != InvalidGPRReg; + } + + return false; +} + +void CallFrameShuffler::emitDisplace(CachedRecovery& location) +{ + ASSERT(location.recovery().isInRegisters()); + JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() }; + ASSERT(wantedJSValueRegs); // We don't support wanted FPRs on 32bit platforms + + GPRReg wantedTagGPR { wantedJSValueRegs.tagGPR() }; + GPRReg wantedPayloadGPR { wantedJSValueRegs.payloadGPR() }; + + if (wantedTagGPR != InvalidGPRReg) { + ASSERT(!m_lockedRegisters.get(wantedTagGPR)); + if (CachedRecovery* currentTag { m_registers[wantedTagGPR] }) { + if (currentTag == &location) { + if (verbose) + dataLog(" + ", wantedTagGPR, " is OK\n"); + } else { + // This can never happen on 32bit platforms since we + // have at most one wanted JSValueRegs, for the + // callee, and no callee-save registers. + RELEASE_ASSERT_NOT_REACHED(); + } + } + } + + if (wantedPayloadGPR != InvalidGPRReg) { + ASSERT(!m_lockedRegisters.get(wantedPayloadGPR)); + if (CachedRecovery* currentPayload { m_registers[wantedPayloadGPR] }) { + if (currentPayload == &location) { + if (verbose) + dataLog(" + ", wantedPayloadGPR, " is OK\n"); + } else { + // See above + RELEASE_ASSERT_NOT_REACHED(); + } + } + } + + if (location.recovery().technique() == InPair + || location.recovery().isInGPR()) { + GPRReg payloadGPR; + if (location.recovery().technique() == InPair) + payloadGPR = location.recovery().payloadGPR(); + else + payloadGPR = location.recovery().gpr(); + + if (wantedPayloadGPR == InvalidGPRReg) + wantedPayloadGPR = payloadGPR; + + if (payloadGPR != wantedPayloadGPR) { + if (location.recovery().technique() == InPair + && wantedPayloadGPR == location.recovery().tagGPR()) { + if (verbose) + dataLog(" * Swapping ", payloadGPR, " and ", wantedPayloadGPR, "\n"); + m_jit.swap(payloadGPR, wantedPayloadGPR); + updateRecovery(location, + ValueRecovery::inPair(payloadGPR, wantedPayloadGPR)); + } else { + if (verbose) + dataLog(" * Moving ", payloadGPR, " into ", wantedPayloadGPR, "\n"); + m_jit.move(payloadGPR, wantedPayloadGPR); + if (location.recovery().technique() == InPair) { + updateRecovery(location, + ValueRecovery::inPair(location.recovery().tagGPR(), + wantedPayloadGPR)); + } else { + updateRecovery(location, + ValueRecovery::inGPR(wantedPayloadGPR, location.recovery().dataFormat())); + } + } + } + + if (wantedTagGPR == InvalidGPRReg) + wantedTagGPR = getFreeGPR(); + switch (location.recovery().dataFormat()) { + case DataFormatInt32: + if (verbose) + dataLog(" * Moving int32 tag into ", wantedTagGPR, "\n"); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::Int32Tag), + wantedTagGPR); + break; + case DataFormatCell: + if (verbose) + dataLog(" * Moving cell tag into ", wantedTagGPR, "\n"); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag), + wantedTagGPR); + break; + case DataFormatBoolean: + if (verbose) + dataLog(" * Moving boolean tag into ", wantedTagGPR, "\n"); + m_jit.move(MacroAssembler::TrustedImm32(JSValue::BooleanTag), + wantedTagGPR); + break; + case DataFormatJS: + ASSERT(wantedTagGPR != location.recovery().payloadGPR()); + if (wantedTagGPR != location.recovery().tagGPR()) { + if (verbose) + dataLog(" * Moving ", location.recovery().tagGPR(), " into ", wantedTagGPR, "\n"); + m_jit.move(location.recovery().tagGPR(), wantedTagGPR); + } + break; + + default: + RELEASE_ASSERT_NOT_REACHED(); + } + } else { + ASSERT(location.recovery().isInFPR()); + if (wantedTagGPR == InvalidGPRReg) { + ASSERT(wantedPayloadGPR != InvalidGPRReg); + m_lockedRegisters.set(wantedPayloadGPR); + wantedTagGPR = getFreeGPR(); + m_lockedRegisters.clear(wantedPayloadGPR); + } + if (wantedPayloadGPR == InvalidGPRReg) { + m_lockedRegisters.set(wantedTagGPR); + wantedPayloadGPR = getFreeGPR(); + m_lockedRegisters.clear(wantedTagGPR); + } + m_jit.boxDouble(location.recovery().fpr(), wantedTagGPR, wantedPayloadGPR); + } + updateRecovery(location, ValueRecovery::inPair(wantedTagGPR, wantedPayloadGPR)); +} + +} // namespace JSC + +#endif // ENABLE(JIT) && USE(JSVALUE32_64) diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp new file mode 100644 index 000000000..2ef6ed111 --- /dev/null +++ b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp @@ -0,0 +1,369 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "CallFrameShuffler.h" + +#if ENABLE(JIT) && USE(JSVALUE64) + +#include "CCallHelpers.h" +#include "DataFormat.h" +#include "JSCInlines.h" + +namespace JSC { + +DataFormat CallFrameShuffler::emitStore( + CachedRecovery& cachedRecovery, MacroAssembler::Address address) +{ + ASSERT(!cachedRecovery.recovery().isInJSStack()); + + switch (cachedRecovery.recovery().technique()) { + case InGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatJS; + case UnboxedInt32InGPR: + m_jit.store32(cachedRecovery.recovery().gpr(), address.withOffset(PayloadOffset)); + return DataFormatInt32; + case UnboxedInt52InGPR: + m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount), + cachedRecovery.recovery().gpr()); + FALLTHROUGH; + case UnboxedStrictInt52InGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatStrictInt52; + case UnboxedBooleanInGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatBoolean; + case UnboxedCellInGPR: + m_jit.storePtr(cachedRecovery.recovery().gpr(), address); + return DataFormatCell; + case UnboxedDoubleInFPR: + m_jit.storeDouble(cachedRecovery.recovery().fpr(), address); + return DataFormatDouble; + case InFPR: + m_jit.storeDouble(cachedRecovery.recovery().fpr(), address); + return DataFormatJS; + case Constant: + m_jit.storeTrustedValue(cachedRecovery.recovery().constant(), address); + return DataFormatJS; + default: + RELEASE_ASSERT_NOT_REACHED(); + } +} + +void CallFrameShuffler::emitBox(CachedRecovery& cachedRecovery) +{ + ASSERT(canBox(cachedRecovery)); + if (cachedRecovery.recovery().isConstant()) + return; + + if (cachedRecovery.recovery().isInGPR()) { + switch (cachedRecovery.recovery().dataFormat()) { + case DataFormatInt32: + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + m_jit.zeroExtend32ToPtr( + cachedRecovery.recovery().gpr(), + cachedRecovery.recovery().gpr()); + m_lockedRegisters.set(cachedRecovery.recovery().gpr()); + if (tryAcquireTagTypeNumber()) + m_jit.or64(m_tagTypeNumber, cachedRecovery.recovery().gpr()); + else { + // We have to do this the hard way + m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber), + cachedRecovery.recovery().gpr()); + } + m_lockedRegisters.clear(cachedRecovery.recovery().gpr()); + cachedRecovery.setRecovery( + ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + return; + case DataFormatInt52: + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount), + cachedRecovery.recovery().gpr()); + cachedRecovery.setRecovery( + ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatStrictInt52)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + FALLTHROUGH; + case DataFormatStrictInt52: { + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + FPRReg resultFPR = getFreeFPR(); + ASSERT(resultFPR != InvalidFPRReg); + m_jit.convertInt64ToDouble(cachedRecovery.recovery().gpr(), resultFPR); + updateRecovery(cachedRecovery, ValueRecovery::inFPR(resultFPR, DataFormatDouble)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + break; + } + case DataFormatBoolean: + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + m_jit.add32(MacroAssembler::TrustedImm32(ValueFalse), + cachedRecovery.recovery().gpr()); + cachedRecovery.setRecovery( + ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + return; + default: + return; + } + } + + if (cachedRecovery.recovery().isInFPR()) { + if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) { + if (verbose) + dataLog(" * Boxing ", cachedRecovery.recovery()); + GPRReg resultGPR = cachedRecovery.wantedJSValueRegs().gpr(); + if (resultGPR == InvalidGPRReg || m_registers[resultGPR]) + resultGPR = getFreeGPR(); + ASSERT(resultGPR != InvalidGPRReg); + m_jit.purifyNaN(cachedRecovery.recovery().fpr()); + m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), resultGPR); + m_lockedRegisters.set(resultGPR); + if (tryAcquireTagTypeNumber()) + m_jit.sub64(m_tagTypeNumber, resultGPR); + else + m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR); + m_lockedRegisters.clear(resultGPR); + updateRecovery(cachedRecovery, ValueRecovery::inGPR(resultGPR, DataFormatJS)); + if (verbose) + dataLog(" into ", cachedRecovery.recovery(), "\n"); + return; + } + ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS); + return; + } + + RELEASE_ASSERT_NOT_REACHED(); +} + +void CallFrameShuffler::emitLoad(CachedRecovery& cachedRecovery) +{ + if (!cachedRecovery.recovery().isInJSStack()) + return; + + if (verbose) + dataLog(" * Loading ", cachedRecovery.recovery(), " into "); + + VirtualRegister reg = cachedRecovery.recovery().virtualRegister(); + MacroAssembler::Address address { addressForOld(reg) }; + bool tryFPR { true }; + GPRReg resultGPR { cachedRecovery.wantedJSValueRegs().gpr() }; + + // If we want a GPR and it's available, that's better than loading + // into an FPR. + if (resultGPR != InvalidGPRReg && !m_registers[resultGPR] + && !m_lockedRegisters.get(resultGPR) && cachedRecovery.loadsIntoGPR()) + tryFPR = false; + + // Otherwise, we prefer loading into FPRs if possible + if (tryFPR && cachedRecovery.loadsIntoFPR()) { + FPRReg resultFPR { cachedRecovery.wantedFPR() }; + if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR)) + resultFPR = getFreeFPR(); + if (resultFPR != InvalidFPRReg) { + m_jit.loadDouble(address, resultFPR); + DataFormat dataFormat = DataFormatJS; + // We could be transforming a DataFormatCell into a + // DataFormatJS here - but that's OK. + if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) + dataFormat = DataFormatDouble; + updateRecovery(cachedRecovery, + ValueRecovery::inFPR(resultFPR, dataFormat)); + if (verbose) + dataLog(cachedRecovery.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); + return; + } + } + + ASSERT(cachedRecovery.loadsIntoGPR()); + if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR)) + resultGPR = getFreeGPR(); + ASSERT(resultGPR != InvalidGPRReg); + m_jit.loadPtr(address, resultGPR); + updateRecovery(cachedRecovery, + ValueRecovery::inGPR(resultGPR, cachedRecovery.recovery().dataFormat())); + if (verbose) + dataLog(cachedRecovery.recovery(), "\n"); + if (reg == newAsOld(dangerFrontier())) + updateDangerFrontier(); +} + +bool CallFrameShuffler::canLoad(CachedRecovery& cachedRecovery) +{ + if (!cachedRecovery.recovery().isInJSStack()) + return true; + + ASSERT(cachedRecovery.loadsIntoFPR() || cachedRecovery.loadsIntoGPR()); + + if (cachedRecovery.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg) + return true; + + if (cachedRecovery.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg) + return true; + + return false; +} + +void CallFrameShuffler::emitDisplace(CachedRecovery& cachedRecovery) +{ + Reg wantedReg; + if (!(wantedReg = Reg { cachedRecovery.wantedJSValueRegs().gpr() })) + wantedReg = Reg { cachedRecovery.wantedFPR() }; + ASSERT(wantedReg); + ASSERT(!m_lockedRegisters.get(wantedReg)); + + if (CachedRecovery* current = m_registers[wantedReg]) { + if (current == &cachedRecovery) { + if (verbose) + dataLog(" + ", wantedReg, " is OK\n"); + return; + } + // We could do a more complex thing by finding cycles + // etc. in that case. + // However, ending up in this situation will be super + // rare, and should actually be outright impossible for + // non-FTL tiers, since: + // (a) All doubles have been converted into JSValues with + // ValueRep nodes, so FPRs are initially free + // + // (b) The only recoveries with wanted registers are the + // callee (which always starts out in a register) and + // the callee-save registers + // + // (c) The callee-save registers are the first things we + // load (after the return PC), and they are loaded as JSValues + // + // (d) We prefer loading JSValues into FPRs if their + // wanted GPR is not available + // + // (e) If we end up spilling some registers with a + // target, we won't load them again before the very + // end of the algorithm + // + // Combined, this means that we will never load a recovery + // with a wanted GPR into any GPR other than its wanted + // GPR. The callee could however have been initially in + // one of the callee-save registers - but since the wanted + // GPR for the callee is always regT0, it will be the + // first one to be displaced, and we won't see it when + // handling any of the callee-save registers. + // + // Thus, the only way we could ever reach this path is in + // the FTL, when there is so much pressure that we + // absolutely need to load the callee-save registers into + // different GPRs initially but not enough pressure to + // then have to spill all of them. And even in that case, + // depending on the order in which B3 saves the + // callee-saves, we will probably still be safe. Anyway, + // the couple extra move instructions compared to an + // efficient cycle-based algorithm are not going to hurt + // us. + if (wantedReg.isFPR()) { + FPRReg tempFPR = getFreeFPR(); + if (verbose) + dataLog(" * Moving ", wantedReg, " into ", tempFPR, "\n"); + m_jit.moveDouble(wantedReg.fpr(), tempFPR); + updateRecovery(*current, + ValueRecovery::inFPR(tempFPR, current->recovery().dataFormat())); + } else { + GPRReg tempGPR = getFreeGPR(); + if (verbose) + dataLog(" * Moving ", wantedReg.gpr(), " into ", tempGPR, "\n"); + m_jit.move(wantedReg.gpr(), tempGPR); + updateRecovery(*current, + ValueRecovery::inGPR(tempGPR, current->recovery().dataFormat())); + } + } + ASSERT(!m_registers[wantedReg]); + + if (cachedRecovery.recovery().isConstant()) { + // We only care about callee saves for wanted FPRs, and those are never constants + ASSERT(wantedReg.isGPR()); + if (verbose) + dataLog(" * Loading ", cachedRecovery.recovery().constant(), " into ", wantedReg, "\n"); + m_jit.moveTrustedValue(cachedRecovery.recovery().constant(), JSValueRegs { wantedReg.gpr() }); + updateRecovery( + cachedRecovery, + ValueRecovery::inRegister(wantedReg, DataFormatJS)); + } else if (cachedRecovery.recovery().isInGPR()) { + if (verbose) + dataLog(" * Moving ", cachedRecovery.recovery(), " into ", wantedReg, "\n"); + if (wantedReg.isGPR()) + m_jit.move(cachedRecovery.recovery().gpr(), wantedReg.gpr()); + else + m_jit.move64ToDouble(cachedRecovery.recovery().gpr(), wantedReg.fpr()); + RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS); + updateRecovery(cachedRecovery, + ValueRecovery::inRegister(wantedReg, DataFormatJS)); + } else { + ASSERT(cachedRecovery.recovery().isInFPR()); + if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) { + // We only care about callee saves for wanted FPRs, and those are always DataFormatJS + ASSERT(wantedReg.isGPR()); + // This will automatically pick the wanted GPR + emitBox(cachedRecovery); + } else { + if (verbose) + dataLog(" * Moving ", cachedRecovery.recovery().fpr(), " into ", wantedReg, "\n"); + if (wantedReg.isGPR()) + m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), wantedReg.gpr()); + else + m_jit.moveDouble(cachedRecovery.recovery().fpr(), wantedReg.fpr()); + RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS); + updateRecovery(cachedRecovery, + ValueRecovery::inRegister(wantedReg, DataFormatJS)); + } + } + + ASSERT(m_registers[wantedReg] == &cachedRecovery); +} + +bool CallFrameShuffler::tryAcquireTagTypeNumber() +{ + if (m_tagTypeNumber != InvalidGPRReg) + return true; + + m_tagTypeNumber = getFreeGPR(); + + if (m_tagTypeNumber == InvalidGPRReg) + return false; + + m_lockedRegisters.set(m_tagTypeNumber); + m_jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), m_tagTypeNumber); + return true; +} + +} // namespace JSC + +#endif // ENABLE(JIT) && USE(JSVALUE64) diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp deleted file mode 100644 index 1588f7fea..000000000 --- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp +++ /dev/null @@ -1,63 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" -#include "ClosureCallStubRoutine.h" - -#if ENABLE(JIT) - -#include "Executable.h" -#include "Heap.h" -#include "VM.h" -#include "Operations.h" -#include "SlotVisitor.h" -#include "Structure.h" - -namespace JSC { - -ClosureCallStubRoutine::ClosureCallStubRoutine( - const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, - Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin) - : GCAwareJITStubRoutine(code, vm, true) - , m_structure(vm, owner, structure) - , m_executable(vm, owner, executable) - , m_codeOrigin(codeOrigin) -{ -} - -ClosureCallStubRoutine::~ClosureCallStubRoutine() -{ -} - -void ClosureCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor) -{ - visitor.append(&m_structure); - visitor.append(&m_executable); -} - -} // namespace JSC - -#endif // ENABLE(JIT) - diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h deleted file mode 100644 index ad61ed514..000000000 --- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h +++ /dev/null @@ -1,66 +0,0 @@ -/* - * Copyright (C) 2012 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#ifndef ClosureCallStubRoutine_h -#define ClosureCallStubRoutine_h - -#include - -#if ENABLE(JIT) - -#include "CodeOrigin.h" -#include "GCAwareJITStubRoutine.h" - -namespace JSC { - -class ClosureCallStubRoutine : public GCAwareJITStubRoutine { -public: - ClosureCallStubRoutine( - const MacroAssemblerCodeRef&, VM&, const JSCell* owner, - Structure*, ExecutableBase*, const CodeOrigin&); - - virtual ~ClosureCallStubRoutine(); - - Structure* structure() const { return m_structure.get(); } - ExecutableBase* executable() const { return m_executable.get(); } - const CodeOrigin& codeOrigin() const { return m_codeOrigin; } - -protected: - virtual void markRequiredObjectsInternal(SlotVisitor&) override; - -private: - WriteBarrier m_structure; - WriteBarrier m_executable; - // This allows us to figure out who a call is linked to by searching through - // stub routines. - CodeOrigin m_codeOrigin; -}; - -} // namespace JSC - -#endif // ENABLE(JIT) - -#endif // ClosureCallStubRoutine_h - diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h index b09f2f6cd..01f8faf24 100644 --- a/Source/JavaScriptCore/jit/CompactJITCodeMap.h +++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h @@ -10,7 +10,7 @@ * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. - * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of + * 3. Neither the name of Apple Inc. ("Apple") nor the names of * its contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * @@ -26,14 +26,11 @@ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef CompactJITCodeMap_h -#define CompactJITCodeMap_h +#pragma once #include #include #include -#include -#include #include namespace JSC { @@ -47,7 +44,7 @@ namespace JSC { // CompactJITCodeMap::Encoder encoder(map); // encoder.append(a, b); // encoder.append(c, d); // preconditions: c >= a, d >= b -// OwnPtr map = encoder.finish(); +// auto map = encoder.finish(); // // At some later time: // @@ -80,6 +77,16 @@ struct BytecodeAndMachineOffset { class CompactJITCodeMap { WTF_MAKE_FAST_ALLOCATED; public: + CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries) + : m_buffer(buffer) +#if !ASSERT_DISABLED + , m_size(size) +#endif + , m_numberOfEntries(numberOfEntries) + { + UNUSED_PARAM(size); + } + ~CompactJITCodeMap() { if (m_buffer) @@ -94,16 +101,6 @@ public: void decode(Vector& result) const; private: - CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries) - : m_buffer(buffer) -#if !ASSERT_DISABLED - , m_size(size) -#endif - , m_numberOfEntries(numberOfEntries) - { - UNUSED_PARAM(size); - } - uint8_t at(unsigned index) const { ASSERT(index < m_size); @@ -138,8 +135,8 @@ public: void ensureCapacityFor(unsigned numberOfEntriesToAdd); void append(unsigned bytecodeIndex, unsigned machineCodeOffset); - PassOwnPtr finish(); - + std::unique_ptr finish(); + private: void appendByte(uint8_t value); void encodeNumber(uint32_t value); @@ -212,18 +209,18 @@ inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned m_numberOfEntries++; } -inline PassOwnPtr CompactJITCodeMap::Encoder::finish() +inline std::unique_ptr CompactJITCodeMap::Encoder::finish() { m_capacity = m_size; m_buffer = static_cast(fastRealloc(m_buffer, m_capacity)); - OwnPtr result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries)); + auto result = std::make_unique(m_buffer, m_size, m_numberOfEntries); m_buffer = 0; m_size = 0; m_capacity = 0; m_numberOfEntries = 0; m_previousBytecodeIndex = 0; m_previousMachineCodeOffset = 0; - return result.release(); + return result; } inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value) @@ -293,5 +290,3 @@ inline void CompactJITCodeMap::Decoder::read(unsigned& bytecodeIndex, unsigned& } } // namespace JSC - -#endif // CompactJITCodeMap_h diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp new file mode 100644 index 000000000..b4f56650b --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp @@ -0,0 +1,73 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ExecutableAllocationFuzz.h" + +#include "TestRunnerUtils.h" +#include +#include + +namespace JSC { + +static Atomic s_numberOfExecutableAllocationFuzzChecks; +unsigned numberOfExecutableAllocationFuzzChecks() +{ + return s_numberOfExecutableAllocationFuzzChecks.load(); +} + +ExecutableAllocationFuzzResult doExecutableAllocationFuzzing() +{ + ASSERT(Options::useExecutableAllocationFuzz()); + + unsigned oldValue; + unsigned newValue; + do { + oldValue = s_numberOfExecutableAllocationFuzzChecks.load(); + newValue = oldValue + 1; + } while (!s_numberOfExecutableAllocationFuzzChecks.compareExchangeWeak(oldValue, newValue)); + + if (newValue == Options::fireExecutableAllocationFuzzAt()) { + if (Options::verboseExecutableAllocationFuzz()) { + dataLog("Will pretend to fail executable allocation.\n"); + WTFReportBacktrace(); + } + return PretendToFailExecutableAllocation; + } + + if (Options::fireExecutableAllocationFuzzAtOrAfter() + && newValue >= Options::fireExecutableAllocationFuzzAtOrAfter()) { + if (Options::verboseExecutableAllocationFuzz()) { + dataLog("Will pretend to fail executable allocation.\n"); + WTFReportBacktrace(); + } + return PretendToFailExecutableAllocation; + } + + return AllowNormalExecutableAllocation; +} + +} // namespace JSC + diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h new file mode 100644 index 000000000..176e1727a --- /dev/null +++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h @@ -0,0 +1,47 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "Options.h" + +namespace JSC { + +enum ExecutableAllocationFuzzResult { + AllowNormalExecutableAllocation, + PretendToFailExecutableAllocation +}; + +ExecutableAllocationFuzzResult doExecutableAllocationFuzzing(); + +inline ExecutableAllocationFuzzResult doExecutableAllocationFuzzingIfEnabled() +{ + if (LIKELY(!Options::useExecutableAllocationFuzz())) + return AllowNormalExecutableAllocation; + + return doExecutableAllocationFuzzing(); +} + +} // namespace JSC diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp index 5ac6cc412..44f8fbae4 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008-2009, 2015, 2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -20,245 +20,400 @@ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "config.h" - #include "ExecutableAllocator.h" -#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) +#if ENABLE(ASSEMBLER) + #include "CodeProfiling.h" -#include +#include "ExecutableAllocationFuzz.h" +#include "JSCInlines.h" #include #include -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -#include + +#if OS(DARWIN) +#include +#endif + +#include "LinkBuffer.h" +#include "MacroAssembler.h" + +#if PLATFORM(MAC) || (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000) +#define HAVE_REMAP_JIT 1 +#endif + +#if HAVE(REMAP_JIT) +#if CPU(ARM64) && PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000 +#define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1 #endif -#include -#include #endif -// Uncomment to create an artificial executable memory usage limit. This limit -// is imperfect and is primarily useful for testing the VM's ability to handle -// out-of-executable-memory situations. -// #define EXECUTABLE_MEMORY_LIMIT 1000000 +#if OS(DARWIN) +#include +extern "C" { + /* Routine mach_vm_remap */ +#ifdef mig_external + mig_external +#else + extern +#endif /* mig_external */ + kern_return_t mach_vm_remap + ( + vm_map_t target_task, + mach_vm_address_t *target_address, + mach_vm_size_t size, + mach_vm_offset_t mask, + int flags, + vm_map_t src_task, + mach_vm_address_t src_address, + boolean_t copy, + vm_prot_t *cur_protection, + vm_prot_t *max_protection, + vm_inherit_t inheritance + ); +} -#if ENABLE(ASSEMBLER) +#endif using namespace WTF; namespace JSC { -#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) +JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool; +JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool; + +JS_EXPORTDATA JITWriteFunction jitWriteFunction; + +#if !USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) && HAVE(REMAP_JIT) +static uintptr_t startOfFixedWritableMemoryPool; +#endif -class DemandExecutableAllocator : public MetaAllocator { +class FixedVMPoolExecutableAllocator : public MetaAllocator { + WTF_MAKE_FAST_ALLOCATED; public: - DemandExecutableAllocator() - : MetaAllocator(jitAllocationGranule) - { - MutexLocker lock(allocatorsMutex()); - allocators().add(this); - // Don't preallocate any memory here. - } - - virtual ~DemandExecutableAllocator() + FixedVMPoolExecutableAllocator() + : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes { - { - MutexLocker lock(allocatorsMutex()); - allocators().remove(this); + size_t reservationSize; + if (Options::jitMemoryReservationSize()) + reservationSize = Options::jitMemoryReservationSize(); + else + reservationSize = fixedExecutableMemoryPoolSize; + reservationSize = roundUpToMultipleOf(pageSize(), reservationSize); + m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + if (m_reservation) { + ASSERT(m_reservation.size() == reservationSize); + void* reservationBase = m_reservation.base(); + + if (Options::useSeparatedWXHeap()) { + // First page of our JIT allocation is reserved. + ASSERT(reservationSize >= pageSize() * 2); + reservationBase = (void*)((uintptr_t)reservationBase + pageSize()); + reservationSize -= pageSize(); + initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize); + } + + addFreshFreeSpace(reservationBase, reservationSize); + + startOfFixedExecutableMemoryPool = reinterpret_cast(reservationBase); + endOfFixedExecutableMemoryPool = startOfFixedExecutableMemoryPool + reservationSize; } - for (unsigned i = 0; i < reservations.size(); ++i) - reservations.at(i).deallocate(); } - static size_t bytesAllocatedByAllAllocators() + virtual ~FixedVMPoolExecutableAllocator(); + +protected: + void* allocateNewSpace(size_t&) override { - size_t total = 0; - MutexLocker lock(allocatorsMutex()); - for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) - total += (*allocator)->bytesAllocated(); - return total; + // We're operating in a fixed pool, so new allocation is always prohibited. + return 0; } - static size_t bytesCommittedByAllocactors() + void notifyNeedPage(void* page) override { - size_t total = 0; - MutexLocker lock(allocatorsMutex()); - for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) - total += (*allocator)->bytesCommitted(); - return total; +#if USE(MADV_FREE_FOR_JIT_MEMORY) + UNUSED_PARAM(page); +#else + m_reservation.commit(page, pageSize()); +#endif } -#if ENABLE(META_ALLOCATOR_PROFILE) - static void dumpProfileFromAllAllocators() + void notifyPageIsFree(void* page) override { - MutexLocker lock(allocatorsMutex()); - for (HashSet::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator) - (*allocator)->dumpProfile(); - } +#if USE(MADV_FREE_FOR_JIT_MEMORY) + for (;;) { + int result = madvise(page, pageSize(), MADV_FREE); + if (!result) + return; + ASSERT(result == -1); + if (errno != EAGAIN) { + RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure. + break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway. + } + } +#else + m_reservation.decommit(page, pageSize()); #endif + } -protected: - virtual void* allocateNewSpace(size_t& numPages) +private: +#if OS(DARWIN) && HAVE(REMAP_JIT) + void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize) { - size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize(); - - ASSERT(newNumPages >= numPages); - - numPages = newNumPages; - -#ifdef EXECUTABLE_MEMORY_LIMIT - if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT) - return 0; + mach_vm_address_t writableAddr = 0; + + // Create a second mapping of the JIT region at a random address. + vm_prot_t cur, max; + int remapFlags = VM_FLAGS_ANYWHERE; +#if defined(VM_FLAGS_RANDOM_ADDR) + remapFlags |= VM_FLAGS_RANDOM_ADDR; +#endif + kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0, + remapFlags, + mach_task_self(), (mach_vm_address_t)jitBase, FALSE, + &cur, &max, VM_INHERIT_DEFAULT); + + bool remapSucceeded = (ret == KERN_SUCCESS); + if (!remapSucceeded) + return; + + // Assemble a thunk that will serve as the means for writing into the JIT region. + MacroAssemblerCodeRef writeThunk = jitWriteThunkGenerator(reinterpret_cast(writableAddr), stubBase, stubSize); + + int result = 0; + +#if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) + // Prevent reading the write thunk code. + result = mprotect(stubBase, stubSize, VM_PROT_EXECUTE_ONLY); + RELEASE_ASSERT(!result); #endif - - PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); - RELEASE_ASSERT(reservation); - - reservations.append(reservation); - - return reservation.base(); + + // Prevent writing into the executable JIT mapping. + result = mprotect(jitBase, jitSize, VM_PROT_READ | VM_PROT_EXECUTE); + RELEASE_ASSERT(!result); + + // Prevent execution in the writable JIT mapping. + result = mprotect((void*)writableAddr, jitSize, VM_PROT_READ | VM_PROT_WRITE); + RELEASE_ASSERT(!result); + + // Zero out writableAddr to avoid leaking the address of the writable mapping. + memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr)); + + jitWriteFunction = reinterpret_cast(writeThunk.code().executableAddress()); } - - virtual void notifyNeedPage(void* page) + +#if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) + MacroAssemblerCodeRef jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize) { - OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true); + using namespace ARM64Registers; + using TrustedImm32 = MacroAssembler::TrustedImm32; + + MacroAssembler jit; + + jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7); + jit.addPtr(x7, x0); + + jit.move(x0, x3); + MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64)); + + jit.add64(TrustedImm32(32), x3); + jit.and64(TrustedImm32(-32), x3); + jit.loadPair64(x1, x12, x13); + jit.loadPair64(x1, TrustedImm32(16), x14, x15); + jit.sub64(x3, x0, x5); + jit.addPtr(x5, x1); + + jit.loadPair64(x1, x8, x9); + jit.loadPair64(x1, TrustedImm32(16), x10, x11); + jit.add64(TrustedImm32(32), x1); + jit.sub64(x5, x2); + jit.storePair64(x12, x13, x0); + jit.storePair64(x14, x15, x0, TrustedImm32(16)); + MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2); + + MacroAssembler::Label copyLoop = jit.label(); + jit.storePair64WithNonTemporalAccess(x8, x9, x3); + jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16)); + jit.add64(TrustedImm32(32), x3); + jit.loadPair64WithNonTemporalAccess(x1, x8, x9); + jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11); + jit.add64(TrustedImm32(32), x1); + jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit); + + cleanup.link(&jit); + jit.add64(x2, x1); + jit.loadPair64(x1, x12, x13); + jit.loadPair64(x1, TrustedImm32(16), x14, x15); + jit.storePair64(x8, x9, x3); + jit.storePair64(x10, x11, x3, TrustedImm32(16)); + jit.addPtr(x2, x3); + jit.storePair64(x12, x13, x3, TrustedImm32(32)); + jit.storePair64(x14, x15, x3, TrustedImm32(48)); + jit.ret(); + + MacroAssembler::Label local0 = jit.label(); + jit.load64(x1, PostIndex(8), x6); + jit.store64(x6, x3, PostIndex(8)); + smallCopy.link(&jit); + jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit); + MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2); + MacroAssembler::Label local1 = jit.label(); + jit.load8(x1, PostIndex(1), x6); + jit.store8(x6, x3, PostIndex(1)); + jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit); + local2.link(&jit); + jit.ret(); + + LinkBuffer linkBuffer(jit, stubBase, stubSize); + // We don't use FINALIZE_CODE() for two reasons. + // The first is that we don't want the writeable address, as disassembled instructions, + // to appear in the console or anywhere in memory, via the PrintStream buffer. + // The second is we can't guarantee that the code is readable when using the + // asyncDisassembly option as our caller will set our pages execute only. + return linkBuffer.finalizeCodeWithoutDisassembly(); } - - virtual void notifyPageIsFree(void* page) +#else // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) + static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize) { - OSAllocator::decommit(page, pageSize()); + memcpy((void*)(startOfFixedWritableMemoryPool + offset), data, dataSize); } -private: - Vector reservations; - static HashSet& allocators() + MacroAssemblerCodeRef jitWriteThunkGenerator(void* address, void*, size_t) { - DEFINE_STATIC_LOCAL(HashSet, sAllocators, ()); - return sAllocators; + startOfFixedWritableMemoryPool = reinterpret_cast(address); + uintptr_t function = (uintptr_t)((void*)&genericWriteToJITRegion); +#if CPU(ARM_THUMB2) + // Handle thumb offset + function -= 1; +#endif + return MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr((void*)function)); } - static Mutex& allocatorsMutex() +#endif + +#else // OS(DARWIN) && HAVE(REMAP_JIT) + void initializeSeparatedWXHeaps(void*, size_t, void*, size_t) { - DEFINE_STATIC_LOCAL(Mutex, mutex, ()); - return mutex; } -}; +#endif -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -void ExecutableAllocator::initializeAllocator() -{ -} -#else -static DemandExecutableAllocator* gAllocator; +private: + PageReservation m_reservation; +}; -namespace { -static inline DemandExecutableAllocator* allocator() -{ - return gAllocator; -} -} +static FixedVMPoolExecutableAllocator* allocator; void ExecutableAllocator::initializeAllocator() { - ASSERT(!gAllocator); - gAllocator = new DemandExecutableAllocator(); - CodeProfiling::notifyAllocator(gAllocator); + ASSERT(!allocator); + allocator = new FixedVMPoolExecutableAllocator(); + CodeProfiling::notifyAllocator(allocator); } -#endif ExecutableAllocator::ExecutableAllocator(VM&) -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - : m_allocator(adoptPtr(new DemandExecutableAllocator())) -#endif { - ASSERT(allocator()); + ASSERT(allocator); } ExecutableAllocator::~ExecutableAllocator() { } +FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator() +{ + m_reservation.deallocate(); +} + bool ExecutableAllocator::isValid() const { - return true; + return !!allocator->bytesReserved(); } bool ExecutableAllocator::underMemoryPressure() { -#ifdef EXECUTABLE_MEMORY_LIMIT - return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2; -#else - return false; -#endif + MetaAllocator::Statistics statistics = allocator->currentStatistics(); + return statistics.bytesAllocated > statistics.bytesReserved / 2; } double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) { - double result; -#ifdef EXECUTABLE_MEMORY_LIMIT - size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage; - if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT) - bytesAllocated = EXECUTABLE_MEMORY_LIMIT; - result = static_cast(EXECUTABLE_MEMORY_LIMIT) / - (EXECUTABLE_MEMORY_LIMIT - bytesAllocated); -#else - UNUSED_PARAM(addedMemoryUsage); - result = 1.0; -#endif + MetaAllocator::Statistics statistics = allocator->currentStatistics(); + ASSERT(statistics.bytesAllocated <= statistics.bytesReserved); + size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; + size_t bytesAvailable = static_cast( + statistics.bytesReserved * (1 - executablePoolReservationFraction)); + if (bytesAllocated >= bytesAvailable) + bytesAllocated = bytesAvailable; + double result = 1.0; + size_t divisor = bytesAvailable - bytesAllocated; + if (divisor) + result = static_cast(bytesAvailable) / divisor; if (result < 1.0) result = 1.0; return result; - } -PassRefPtr ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) +RefPtr ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) { - RefPtr result = allocator()->allocate(sizeInBytes, ownerUID); - RELEASE_ASSERT(result || effort != JITCompilationMustSucceed); - return result.release(); + if (Options::logExecutableAllocation()) { + MetaAllocator::Statistics stats = allocator->currentStatistics(); + dataLog("Allocating ", sizeInBytes, " bytes of executable memory with ", stats.bytesAllocated, " bytes allocated, ", stats.bytesReserved, " bytes reserved, and ", stats.bytesCommitted, " committed.\n"); + } + + if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) { + dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n"); + WTFReportBacktrace(); + } + + if (effort == JITCompilationCanFail + && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation) + return nullptr; + + if (effort == JITCompilationCanFail) { + // Don't allow allocations if we are down to reserve. + MetaAllocator::Statistics statistics = allocator->currentStatistics(); + size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes; + size_t bytesAvailable = static_cast( + statistics.bytesReserved * (1 - executablePoolReservationFraction)); + if (bytesAllocated > bytesAvailable) + return nullptr; + } + + RefPtr result = allocator->allocate(sizeInBytes, ownerUID); + if (!result) { + if (effort != JITCompilationCanFail) { + dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n"); + CRASH(); + } + return nullptr; + } + return result; } -size_t ExecutableAllocator::committedByteCount() +bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address) { - return DemandExecutableAllocator::bytesCommittedByAllocactors(); + return allocator->isInAllocatedMemory(locker, address); } -#if ENABLE(META_ALLOCATOR_PROFILE) -void ExecutableAllocator::dumpProfile() +Lock& ExecutableAllocator::getLock() const { - DemandExecutableAllocator::dumpProfileFromAllAllocators(); + return allocator->getLock(); } -#endif - -#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) - -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -#if OS(WINDOWS) -#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform." -#endif - -void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting) +size_t ExecutableAllocator::committedByteCount() { - size_t pageSize = WTF::pageSize(); - - // Calculate the start of the page containing this region, - // and account for this extra memory within size. - intptr_t startPtr = reinterpret_cast(start); - intptr_t pageStartPtr = startPtr & ~(pageSize - 1); - void* pageStart = reinterpret_cast(pageStartPtr); - size += (startPtr - pageStartPtr); - - // Round size up - size += (pageSize - 1); - size &= ~(pageSize - 1); - - mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX); + return allocator->bytesCommitted(); } +#if ENABLE(META_ALLOCATOR_PROFILE) +void ExecutableAllocator::dumpProfile() +{ + allocator->dumpProfile(); +} #endif - + } -#endif // HAVE(ASSEMBLER) +#endif // ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h index 01be7c1aa..a686e7217 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.h +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2017 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,18 +23,16 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef ExecutableAllocator_h -#define ExecutableAllocator_h +#pragma once + #include "JITCompilationEffort.h" #include // for ptrdiff_t #include #include +#include #include #include #include -#include -#include -#include #if OS(IOS) #include @@ -48,71 +46,57 @@ #include #endif -#if CPU(SH4) && OS(LINUX) -#include -#include -#include -#include -#endif - -#if OS(WINCE) -// From pkfuncs.h (private header file from the Platform Builder) -#define CACHE_SYNC_ALL 0x07F -extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags); -#endif - #define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4) -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) -#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE) -#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC) -#define EXECUTABLE_POOL_WRITABLE false -#else #define EXECUTABLE_POOL_WRITABLE true -#endif namespace JSC { class VM; -void releaseExecutableMemory(VM&); static const unsigned jitAllocationGranule = 32; -inline size_t roundUpAllocationSize(size_t request, size_t granularity) -{ - RELEASE_ASSERT((std::numeric_limits::max() - granularity) > request); - - // Round up to next page boundary - size_t size = request + (granularity - 1); - size = size & ~(granularity - 1); - ASSERT(size >= request); - return size; -} - -} - -namespace JSC { - typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle; #if ENABLE(ASSEMBLER) -#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) -class DemandExecutableAllocator; -#endif - -#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) -#if CPU(ARM) || CPU(ARM64) +#if defined(FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB) && FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB > 0 +static const size_t fixedExecutableMemoryPoolSize = FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB * 1024 * 1024; +#elif CPU(ARM) static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024; +#elif CPU(ARM64) +static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; #elif CPU(X86_64) static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024; #else static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; #endif - -extern uintptr_t startOfFixedExecutableMemoryPool; +#if CPU(ARM) +static const double executablePoolReservationFraction = 0.15; +#else +static const double executablePoolReservationFraction = 0.25; #endif +extern JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool; +extern JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool; + +typedef void (*JITWriteFunction)(off_t, const void*, size_t); +extern JS_EXPORTDATA JITWriteFunction jitWriteFunction; + +static inline void* performJITMemcpy(void *dst, const void *src, size_t n) +{ + // Use execute-only write thunk for writes inside the JIT region. This is a variant of + // memcpy that takes an offset into the JIT region as its destination (first) parameter. + if (jitWriteFunction && (uintptr_t)dst >= startOfFixedExecutableMemoryPool && (uintptr_t)dst <= endOfFixedExecutableMemoryPool) { + off_t offset = (off_t)((uintptr_t)dst - startOfFixedExecutableMemoryPool); + jitWriteFunction(offset, src, n); + return dst; + } + + // Use regular memcpy for writes outside the JIT region. + return memcpy(dst, src, n); +} + class ExecutableAllocator { enum ProtectionSetting { Writable, Executable }; @@ -134,40 +118,15 @@ public: static void dumpProfile() { } #endif - PassRefPtr allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort); - -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - static void makeWritable(void* start, size_t size) - { - reprotectRegion(start, size, Writable); - } + RefPtr allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort); - static void makeExecutable(void* start, size_t size) - { - reprotectRegion(start, size, Executable); - } -#else - static void makeWritable(void*, size_t) {} - static void makeExecutable(void*, size_t) {} -#endif + bool isValidExecutableMemory(const LockHolder&, void* address); static size_t committedByteCount(); -private: - -#if ENABLE(ASSEMBLER_WX_EXCLUSIVE) - static void reprotectRegion(void*, size_t, ProtectionSetting); -#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND) - // We create a MetaAllocator for each JS global object. - OwnPtr m_allocator; - DemandExecutableAllocator* allocator() { return m_allocator.get(); } -#endif -#endif - + Lock& getLock() const; }; #endif // ENABLE(JIT) && ENABLE(ASSEMBLER) } // namespace JSC - -#endif // !defined(ExecutableAllocator) diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp deleted file mode 100644 index 8e0b77cfc..000000000 --- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp +++ /dev/null @@ -1,194 +0,0 @@ -/* - * Copyright (C) 2009 Apple Inc. All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * 1. Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * 2. Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in the - * documentation and/or other materials provided with the distribution. - * - * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY - * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE - * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR - * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR - * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, - * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, - * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR - * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY - * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - */ - -#include "config.h" - -#include "ExecutableAllocator.h" - -#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) - -#include "CodeProfiling.h" -#include -#include -#include -#include -#include - -#if OS(DARWIN) -#include -#endif - -#if OS(LINUX) -#include -#endif - -#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090 -// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case. -#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1 -#endif - -using namespace WTF; - -namespace JSC { - -uintptr_t startOfFixedExecutableMemoryPool; - -class FixedVMPoolExecutableAllocator : public MetaAllocator { - WTF_MAKE_FAST_ALLOCATED; -public: - FixedVMPoolExecutableAllocator() - : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes - { - m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); -#if !ENABLE(LLINT) - RELEASE_ASSERT(m_reservation); -#endif - if (m_reservation) { - ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize); - addFreshFreeSpace(m_reservation.base(), m_reservation.size()); - - startOfFixedExecutableMemoryPool = reinterpret_cast(m_reservation.base()); - } - } - - virtual ~FixedVMPoolExecutableAllocator(); - -protected: - virtual void* allocateNewSpace(size_t&) override - { - // We're operating in a fixed pool, so new allocation is always prohibited. - return 0; - } - - virtual void notifyNeedPage(void* page) override - { -#if USE(MADV_FREE_FOR_JIT_MEMORY) - UNUSED_PARAM(page); -#else - m_reservation.commit(page, pageSize()); -#endif - } - - virtual void notifyPageIsFree(void* page) override - { -#if USE(MADV_FREE_FOR_JIT_MEMORY) - for (;;) { - int result = madvise(page, pageSize(), MADV_FREE); - if (!result) - return; - ASSERT(result == -1); - if (errno != EAGAIN) { - RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure. - break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway. - } - } -#else - m_reservation.decommit(page, pageSize()); -#endif - } - -private: - PageReservation m_reservation; -}; - -static FixedVMPoolExecutableAllocator* allocator; - -void ExecutableAllocator::initializeAllocator() -{ - ASSERT(!allocator); - allocator = new FixedVMPoolExecutableAllocator(); - CodeProfiling::notifyAllocator(allocator); -} - -ExecutableAllocator::ExecutableAllocator(VM&) -{ - ASSERT(allocator); -} - -ExecutableAllocator::~ExecutableAllocator() -{ -} - -FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator() -{ - m_reservation.deallocate(); -} - -bool ExecutableAllocator::isValid() const -{ - return !!allocator->bytesReserved(); -} - -bool ExecutableAllocator::underMemoryPressure() -{ - MetaAllocator::Statistics statistics = allocator->currentStatistics(); - return statistics.bytesAllocated > statistics.bytesReserved / 2; -} - -double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage) -{ - MetaAllocator::Statistics statistics = allocator->currentStatistics(); - ASSERT(statistics.bytesAllocated <= statistics.bytesReserved); - size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage; - if (bytesAllocated >= statistics.bytesReserved) - bytesAllocated = statistics.bytesReserved; - double result = 1.0; - size_t divisor = statistics.bytesReserved - bytesAllocated; - if (divisor) - result = static_cast(statistics.bytesReserved) / divisor; - if (result < 1.0) - result = 1.0; - return result; -} - -PassRefPtr ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort) -{ - RefPtr result = allocator->allocate(sizeInBytes, ownerUID); - if (!result) { - if (effort == JITCompilationCanFail) - return result; - releaseExecutableMemory(vm); - result = allocator->allocate(sizeInBytes, ownerUID); - RELEASE_ASSERT(result); - } - return result.release(); -} - -size_t ExecutableAllocator::committedByteCount() -{ - return allocator->bytesCommitted(); -} - -#if ENABLE(META_ALLOCATOR_PROFILE) -void ExecutableAllocator::dumpProfile() -{ - allocator->dumpProfile(); -} -#endif - -} - - -#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED) diff --git a/Source/JavaScriptCore/jit/FPRInfo.h b/Source/JavaScriptCore/jit/FPRInfo.h index 5bb0e16cc..ec0ab125a 100644 --- a/Source/JavaScriptCore/jit/FPRInfo.h +++ b/Source/JavaScriptCore/jit/FPRInfo.h @@ -23,8 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef FPRInfo_h -#define FPRInfo_h +#pragma once #include "MacroAssembler.h" #include @@ -42,6 +41,7 @@ class FPRInfo { public: typedef FPRReg RegisterType; static const unsigned numberOfRegisters = 6; + static const unsigned numberOfArgumentRegisters = 8; // Temporary registers. static const FPRReg fpRegT0 = X86Registers::xmm0; @@ -56,6 +56,10 @@ public: static const FPRReg argumentFPR1 = X86Registers::xmm1; // fpRegT1 static const FPRReg argumentFPR2 = X86Registers::xmm2; // fpRegT2 static const FPRReg argumentFPR3 = X86Registers::xmm3; // fpRegT3 + static const FPRReg argumentFPR4 = X86Registers::xmm4; // fpRegT4 + static const FPRReg argumentFPR5 = X86Registers::xmm5; // fpRegT5 + static const FPRReg argumentFPR6 = X86Registers::xmm6; + static const FPRReg argumentFPR7 = X86Registers::xmm7; #endif // On X86 the return will actually be on the x87 stack, // so we'll copy to xmm0 for sanity! @@ -182,6 +186,7 @@ class FPRInfo { public: typedef FPRReg RegisterType; static const unsigned numberOfRegisters = 23; + static const unsigned numberOfArgumentRegisters = 8; // Temporary registers. // q8-q15 are callee saved, q31 is use by the MacroAssembler as fpTempRegister. @@ -208,6 +213,14 @@ public: static const FPRReg fpRegT20 = ARM64Registers::q28; static const FPRReg fpRegT21 = ARM64Registers::q29; static const FPRReg fpRegT22 = ARM64Registers::q30; + static const FPRReg fpRegCS0 = ARM64Registers::q8; + static const FPRReg fpRegCS1 = ARM64Registers::q9; + static const FPRReg fpRegCS2 = ARM64Registers::q10; + static const FPRReg fpRegCS3 = ARM64Registers::q11; + static const FPRReg fpRegCS4 = ARM64Registers::q12; + static const FPRReg fpRegCS5 = ARM64Registers::q13; + static const FPRReg fpRegCS6 = ARM64Registers::q14; + static const FPRReg fpRegCS7 = ARM64Registers::q15; static const FPRReg argumentFPR0 = ARM64Registers::q0; // fpRegT0 static const FPRReg argumentFPR1 = ARM64Registers::q1; // fpRegT1 @@ -242,10 +255,15 @@ public: 16, 17, 18, 19, 20, 21, 22, InvalidIndex }; unsigned result = indexForRegister[reg]; - ASSERT(result != InvalidIndex); return result; } + static FPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < 8); + return static_cast(index); + } + static const char* debugName(FPRReg reg) { ASSERT(reg != InvalidFPRReg); @@ -269,15 +287,16 @@ public: class FPRInfo { public: typedef FPRReg RegisterType; - static const unsigned numberOfRegisters = 6; + static const unsigned numberOfRegisters = 7; // Temporary registers. static const FPRReg fpRegT0 = MIPSRegisters::f0; - static const FPRReg fpRegT1 = MIPSRegisters::f4; - static const FPRReg fpRegT2 = MIPSRegisters::f6; - static const FPRReg fpRegT3 = MIPSRegisters::f8; - static const FPRReg fpRegT4 = MIPSRegisters::f10; - static const FPRReg fpRegT5 = MIPSRegisters::f18; + static const FPRReg fpRegT1 = MIPSRegisters::f2; + static const FPRReg fpRegT2 = MIPSRegisters::f4; + static const FPRReg fpRegT3 = MIPSRegisters::f6; + static const FPRReg fpRegT4 = MIPSRegisters::f8; + static const FPRReg fpRegT5 = MIPSRegisters::f10; + static const FPRReg fpRegT6 = MIPSRegisters::f18; static const FPRReg returnValueFPR = MIPSRegisters::f0; @@ -287,7 +306,7 @@ public: static FPRReg toRegister(unsigned index) { static const FPRReg registerForIndex[numberOfRegisters] = { - fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 }; + fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6 }; ASSERT(index < numberOfRegisters); return registerForIndex[index]; @@ -298,14 +317,13 @@ public: ASSERT(reg != InvalidFPRReg); ASSERT(reg < 20); static const unsigned indexForRegister[20] = { - 0, InvalidIndex, InvalidIndex, InvalidIndex, - 1, InvalidIndex, 2, InvalidIndex, - 3, InvalidIndex, 4, InvalidIndex, + 0, InvalidIndex, 1, InvalidIndex, + 2, InvalidIndex, 3, InvalidIndex, + 4, InvalidIndex, 5, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, - InvalidIndex, InvalidIndex, 5, InvalidIndex, + InvalidIndex, InvalidIndex, 6, InvalidIndex, }; unsigned result = indexForRegister[reg]; - ASSERT(result != InvalidIndex); return result; } @@ -331,68 +349,6 @@ public: #endif // CPU(MIPS) -#if CPU(SH4) - -class FPRInfo { -public: - typedef FPRReg RegisterType; - static const unsigned numberOfRegisters = 6; - - // Temporary registers. - static const FPRReg fpRegT0 = SH4Registers::dr0; - static const FPRReg fpRegT1 = SH4Registers::dr2; - static const FPRReg fpRegT2 = SH4Registers::dr4; - static const FPRReg fpRegT3 = SH4Registers::dr6; - static const FPRReg fpRegT4 = SH4Registers::dr8; - static const FPRReg fpRegT5 = SH4Registers::dr10; - - static const FPRReg returnValueFPR = SH4Registers::dr0; - - static const FPRReg argumentFPR0 = SH4Registers::dr4; - static const FPRReg argumentFPR1 = SH4Registers::dr6; - - static FPRReg toRegister(unsigned index) - { - static const FPRReg registerForIndex[numberOfRegisters] = { - fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 }; - - ASSERT(index < numberOfRegisters); - return registerForIndex[index]; - } - - static unsigned toIndex(FPRReg reg) - { - ASSERT(reg != InvalidFPRReg); - ASSERT(reg < 16); - static const unsigned indexForRegister[16] = { - 0, InvalidIndex, 1, InvalidIndex, - 2, InvalidIndex, 3, InvalidIndex, - 4, InvalidIndex, 5, InvalidIndex, - InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex - }; - unsigned result = indexForRegister[reg]; - ASSERT(result != InvalidIndex); - return result; - } - - static const char* debugName(FPRReg reg) - { - ASSERT(reg != InvalidFPRReg); - ASSERT(reg < 16); - static const char* nameForRegister[16] = { - "dr0", "fr1", "dr2", "fr3", - "dr4", "fr5", "dr6", "fr7", - "dr8", "fr9", "dr10", "fr11", - "dr12", "fr13", "dr14", "fr15" - }; - return nameForRegister[reg]; - } - - static const unsigned InvalidIndex = 0xffffffff; -}; - -#endif // CPU(SH4) - #endif // ENABLE(JIT) } // namespace JSC @@ -409,5 +365,3 @@ inline void printInternal(PrintStream& out, JSC::FPRReg reg) } } // namespace WTF - -#endif diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp index f681dd847..bab6de13b 100644 --- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -28,24 +28,27 @@ #if ENABLE(JIT) +#include "CodeBlock.h" +#include "DFGCommonData.h" #include "Heap.h" #include "VM.h" -#include "Operations.h" +#include "JITStubRoutineSet.h" +#include "JSCInlines.h" #include "SlotVisitor.h" #include "Structure.h" +#include namespace JSC { GCAwareJITStubRoutine::GCAwareJITStubRoutine( - const MacroAssemblerCodeRef& code, VM& vm, bool isClosureCall) + const MacroAssemblerCodeRef& code, VM& vm) : JITStubRoutine(code) , m_mayBeExecuting(false) , m_isJettisoned(false) - , m_isClosureCall(isClosureCall) { - vm.heap.m_jitStubRoutines.add(this); + vm.heap.m_jitStubRoutines->add(this); } - + GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { } void GCAwareJITStubRoutine::observeZeroRefCount() @@ -78,48 +81,78 @@ void GCAwareJITStubRoutine::markRequiredObjectsInternal(SlotVisitor&) { } -MarkingGCAwareJITStubRoutineWithOneObject::MarkingGCAwareJITStubRoutineWithOneObject( +MarkingGCAwareJITStubRoutine::MarkingGCAwareJITStubRoutine( const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, - JSCell* object) + const Vector& cells) : GCAwareJITStubRoutine(code, vm) - , m_object(vm, owner, object) + , m_cells(cells.size()) { + for (unsigned i = cells.size(); i--;) + m_cells[i].set(vm, owner, cells[i]); } -MarkingGCAwareJITStubRoutineWithOneObject::~MarkingGCAwareJITStubRoutineWithOneObject() +MarkingGCAwareJITStubRoutine::~MarkingGCAwareJITStubRoutine() { } -void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(SlotVisitor& visitor) +void MarkingGCAwareJITStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor) { - visitor.append(&m_object); + for (auto& entry : m_cells) + visitor.append(entry); } -PassRefPtr createJITStubRoutine( - const MacroAssemblerCodeRef& code, - VM& vm, - const JSCell*, - bool makesCalls) + +GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler( + const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, const Vector& cells, + CodeBlock* codeBlockForExceptionHandlers, CallSiteIndex exceptionHandlerCallSiteIndex) + : MarkingGCAwareJITStubRoutine(code, vm, owner, cells) + , m_codeBlockWithExceptionHandler(codeBlockForExceptionHandlers) + , m_exceptionHandlerCallSiteIndex(exceptionHandlerCallSiteIndex) { - if (!makesCalls) - return adoptRef(new JITStubRoutine(code)); + RELEASE_ASSERT(m_codeBlockWithExceptionHandler); + ASSERT(!!m_codeBlockWithExceptionHandler->handlerForIndex(exceptionHandlerCallSiteIndex.bits())); +} - return static_pointer_cast( - adoptRef(new GCAwareJITStubRoutine(code, vm))); +void GCAwareJITStubRoutineWithExceptionHandler::aboutToDie() +{ + m_codeBlockWithExceptionHandler = nullptr; } -PassRefPtr createJITStubRoutine( +void GCAwareJITStubRoutineWithExceptionHandler::observeZeroRefCount() +{ +#if ENABLE(DFG_JIT) + if (m_codeBlockWithExceptionHandler) { + m_codeBlockWithExceptionHandler->jitCode()->dfgCommon()->removeCallSiteIndex(m_exceptionHandlerCallSiteIndex); + m_codeBlockWithExceptionHandler->removeExceptionHandlerForCallSite(m_exceptionHandlerCallSiteIndex); + m_codeBlockWithExceptionHandler = nullptr; + } +#endif + + Base::observeZeroRefCount(); +} + + +Ref createJITStubRoutine( const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, bool makesCalls, - JSCell* object) + const Vector& cells, + CodeBlock* codeBlockForExceptionHandlers, + CallSiteIndex exceptionHandlerCallSiteIndex) { if (!makesCalls) - return adoptRef(new JITStubRoutine(code)); + return adoptRef(*new JITStubRoutine(code)); + + if (codeBlockForExceptionHandlers) { + RELEASE_ASSERT(JITCode::isOptimizingJIT(codeBlockForExceptionHandlers->jitType())); + return adoptRef(*new GCAwareJITStubRoutineWithExceptionHandler(code, vm, owner, cells, codeBlockForExceptionHandlers, exceptionHandlerCallSiteIndex)); + } + + if (cells.isEmpty()) + return adoptRef(*new GCAwareJITStubRoutine(code, vm)); - return static_pointer_cast( - adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, vm, owner, object))); + return adoptRef(*new MarkingGCAwareJITStubRoutine(code, vm, owner, cells)); } } // namespace JSC diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h index 03045c5d1..5ee36ca46 100644 --- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2012 Apple Inc. All rights reserved. + * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,10 +23,7 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef GCAwareJITStubRoutine_h -#define GCAwareJITStubRoutine_h - -#include +#pragma once #if ENABLE(JIT) @@ -34,7 +31,6 @@ #include "JSObject.h" #include "JSString.h" #include "WriteBarrier.h" -#include #include namespace JSC { @@ -54,7 +50,7 @@ class JITStubRoutineSet; // list which does not get reclaimed all at once). class GCAwareJITStubRoutine : public JITStubRoutine { public: - GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&, bool isClosureCall = false); + GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&); virtual ~GCAwareJITStubRoutine(); void markRequiredObjects(SlotVisitor& visitor) @@ -64,10 +60,8 @@ public: void deleteFromGC(); - bool isClosureCall() const { return m_isClosureCall; } - protected: - virtual void observeZeroRefCount() override; + void observeZeroRefCount() override; virtual void markRequiredObjectsInternal(SlotVisitor&); @@ -76,22 +70,39 @@ private: bool m_mayBeExecuting; bool m_isJettisoned; - bool m_isClosureCall; }; // Use this if you want to mark one additional object during GC if your stub // routine is known to be executing. -class MarkingGCAwareJITStubRoutineWithOneObject : public GCAwareJITStubRoutine { +class MarkingGCAwareJITStubRoutine : public GCAwareJITStubRoutine { public: - MarkingGCAwareJITStubRoutineWithOneObject( - const MacroAssemblerCodeRef&, VM&, const JSCell* owner, JSCell*); - virtual ~MarkingGCAwareJITStubRoutineWithOneObject(); + MarkingGCAwareJITStubRoutine( + const MacroAssemblerCodeRef&, VM&, const JSCell* owner, const Vector&); + virtual ~MarkingGCAwareJITStubRoutine(); protected: - virtual void markRequiredObjectsInternal(SlotVisitor&) override; + void markRequiredObjectsInternal(SlotVisitor&) override; + +private: + Vector> m_cells; +}; + + +// The stub has exception handlers in it. So it clears itself from exception +// handling table when it dies. It also frees space in CodeOrigin table +// for new exception handlers to use the same CallSiteIndex. +class GCAwareJITStubRoutineWithExceptionHandler : public MarkingGCAwareJITStubRoutine { +public: + typedef GCAwareJITStubRoutine Base; + + GCAwareJITStubRoutineWithExceptionHandler(const MacroAssemblerCodeRef&, VM&, const JSCell* owner, const Vector&, CodeBlock*, CallSiteIndex); + + void aboutToDie() override; + void observeZeroRefCount() override; private: - WriteBarrier m_object; + CodeBlock* m_codeBlockWithExceptionHandler; + CallSiteIndex m_exceptionHandlerCallSiteIndex; }; // Helper for easily creating a GC-aware JIT stub routine. For the varargs, @@ -100,7 +111,7 @@ private: // appropriate. Generally you only need to pass pointers that will be used // after the first call to C++ or JS. // -// PassRefPtr createJITStubRoutine( +// Ref createJITStubRoutine( // const MacroAssemblerCodeRef& code, // VM& vm, // const JSCell* owner, @@ -113,15 +124,11 @@ private: // this function using varargs, I ended up with more code than this simple // way. -PassRefPtr createJITStubRoutine( - const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls); -PassRefPtr createJITStubRoutine( +Ref createJITStubRoutine( const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls, - JSCell*); + const Vector& = { }, + CodeBlock* codeBlockForExceptionHandlers = nullptr, CallSiteIndex exceptionHandlingCallSiteIndex = CallSiteIndex(std::numeric_limits::max())); } // namespace JSC #endif // ENABLE(JIT) - -#endif // GCAwareJITStubRoutine_h - diff --git a/Source/JavaScriptCore/jit/GPRInfo.cpp b/Source/JavaScriptCore/jit/GPRInfo.cpp new file mode 100644 index 000000000..5a8005f9b --- /dev/null +++ b/Source/JavaScriptCore/jit/GPRInfo.cpp @@ -0,0 +1,51 @@ +/* + * Copyright (C) 2015 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GPRInfo.h" + +#if ENABLE(JIT) + +namespace JSC { + +void JSValueRegs::dump(PrintStream& out) const +{ +#if USE(JSVALUE64) + out.print(m_gpr); +#else + out.print("(tag:", tagGPR(), ", payload:", payloadGPR(), ")"); +#endif +} + +// This is in the .cpp file to work around clang issues. +#if CPU(X86_64) +const GPRReg GPRInfo::patchpointScratchRegister = MacroAssembler::s_scratchRegister; +#elif CPU(ARM64) +const GPRReg GPRInfo::patchpointScratchRegister = ARM64Registers::ip0; +#endif + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/GPRInfo.h b/Source/JavaScriptCore/jit/GPRInfo.h index 393a56b50..f7e4a6b2c 100644 --- a/Source/JavaScriptCore/jit/GPRInfo.h +++ b/Source/JavaScriptCore/jit/GPRInfo.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,14 +23,21 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef GPRInfo_h -#define GPRInfo_h +#pragma once #include "MacroAssembler.h" +#include #include namespace JSC { +enum NoResultTag { NoResult }; + +// We use the same conventions in the basline JIT as in the LLint. If you +// change mappings in the GPRInfo, you should change them in the offlineasm +// compiler adequately. The register naming conventions are described at the +// top of the LowLevelInterpreter.asm file. + typedef MacroAssembler::RegisterID GPRReg; #define InvalidGPRReg ((::JSC::GPRReg)-1) @@ -54,12 +61,25 @@ public: return JSValueRegs(gpr); } + static JSValueRegs withTwoAvailableRegs(GPRReg gpr, GPRReg) + { + return JSValueRegs(gpr); + } + bool operator!() const { return m_gpr == InvalidGPRReg; } + explicit operator bool() const { return m_gpr != InvalidGPRReg; } + + bool operator==(JSValueRegs other) { return m_gpr == other.m_gpr; } + bool operator!=(JSValueRegs other) { return !(*this == other); } GPRReg gpr() const { return m_gpr; } GPRReg tagGPR() const { return InvalidGPRReg; } GPRReg payloadGPR() const { return m_gpr; } + bool uses(GPRReg gpr) const { return m_gpr == gpr; } + + void dump(PrintStream&) const; + private: GPRReg m_gpr; }; @@ -98,6 +118,7 @@ public: } bool operator!() const { return m_base == InvalidGPRReg; } + explicit operator bool() const { return m_base != InvalidGPRReg; } bool isAddress() const { return m_offset != notAddress(); } @@ -119,6 +140,11 @@ public: return m_base; } + JSValueRegs regs() const + { + return JSValueRegs(gpr()); + } + MacroAssembler::Address asAddress() const { return MacroAssembler::Address(base(), offset()); } private: @@ -144,16 +170,29 @@ public: { } + static JSValueRegs withTwoAvailableRegs(GPRReg gpr1, GPRReg gpr2) + { + return JSValueRegs(gpr1, gpr2); + } + static JSValueRegs payloadOnly(GPRReg gpr) { return JSValueRegs(InvalidGPRReg, gpr); } - bool operator!() const + bool operator!() const { return !static_cast(*this); } + explicit operator bool() const { - return static_cast(m_tagGPR) == InvalidGPRReg - && static_cast(m_payloadGPR) == InvalidGPRReg; + return static_cast(m_tagGPR) != InvalidGPRReg + || static_cast(m_payloadGPR) != InvalidGPRReg; } + + bool operator==(JSValueRegs other) const + { + return m_tagGPR == other.m_tagGPR + && m_payloadGPR == other.m_payloadGPR; + } + bool operator!=(JSValueRegs other) const { return !(*this == other); } GPRReg tagGPR() const { return static_cast(m_tagGPR); } GPRReg payloadGPR() const { return static_cast(m_payloadGPR); } @@ -169,6 +208,10 @@ public: return tagGPR(); } + bool uses(GPRReg gpr) const { return m_tagGPR == gpr || m_payloadGPR == gpr; } + + void dump(PrintStream&) const; + private: int8_t m_tagGPR; int8_t m_payloadGPR; @@ -219,11 +262,12 @@ public: result.m_tagType = static_cast(JSValue::CellTag); return result; } - - bool operator!() const + + bool operator!() const { return !static_cast(*this); } + explicit operator bool() const { - return static_cast(m_baseOrTag) == InvalidGPRReg - && static_cast(m_payload) == InvalidGPRReg; + return static_cast(m_baseOrTag) != InvalidGPRReg + || static_cast(m_payload) != InvalidGPRReg; } bool isAddress() const @@ -268,6 +312,11 @@ public: return static_cast(m_tagType); } + JSValueRegs regs() const + { + return JSValueRegs(tagGPR(), payloadGPR()); + } + MacroAssembler::Address asAddress(unsigned additionalOffset = 0) const { return MacroAssembler::Address(base(), offset() + additionalOffset); } private: @@ -280,10 +329,9 @@ private: }; #endif // USE(JSVALUE32_64) -// The baseline JIT requires that regT3 be callee-preserved. - #if CPU(X86) #define NUMBER_OF_ARGUMENT_REGISTERS 0u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u class GPRInfo { public: @@ -291,25 +339,20 @@ public: static const unsigned numberOfRegisters = 6; static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; - // Note: regT3 is required to be callee-preserved. - // Temporary registers. static const GPRReg regT0 = X86Registers::eax; static const GPRReg regT1 = X86Registers::edx; static const GPRReg regT2 = X86Registers::ecx; - static const GPRReg regT3 = X86Registers::ebx; - static const GPRReg regT4 = X86Registers::edi; - static const GPRReg regT5 = X86Registers::esi; - // These registers match the baseline JIT. - static const GPRReg cachedResultRegister = regT0; - static const GPRReg cachedResultRegister2 = regT1; + static const GPRReg regT3 = X86Registers::ebx; // Callee-save + static const GPRReg regT4 = X86Registers::esi; // Callee-save + static const GPRReg regT5 = X86Registers::edi; // Callee-save static const GPRReg callFrameRegister = X86Registers::ebp; // These constants provide the names for the general purpose argument & return value registers. static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2 static const GPRReg argumentGPR1 = X86Registers::edx; // regT1 + static const GPRReg argumentGPR2 = X86Registers::eax; // regT0 + static const GPRReg argumentGPR3 = X86Registers::ebx; // regT3 static const GPRReg nonArgGPR0 = X86Registers::esi; // regT4 - static const GPRReg nonArgGPR1 = X86Registers::eax; // regT0 - static const GPRReg nonArgGPR2 = X86Registers::ebx; // regT3 static const GPRReg returnValueGPR = X86Registers::eax; // regT0 static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 static const GPRReg nonPreservedNonReturnGPR = X86Registers::ecx; @@ -321,13 +364,18 @@ public: return registerForIndex[index]; } + static GPRReg toArgumentRegister(unsigned) + { + UNREACHABLE_FOR_PLATFORM(); + return InvalidGPRReg; + } + static unsigned toIndex(GPRReg reg) { ASSERT(reg != InvalidGPRReg); ASSERT(static_cast(reg) < 8); - static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4 }; + static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, 5 }; unsigned result = indexForRegister[reg]; - ASSERT(result != InvalidIndex); return result; } @@ -350,8 +398,10 @@ public: #if CPU(X86_64) #if !OS(WINDOWS) #define NUMBER_OF_ARGUMENT_REGISTERS 6u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 5u #else #define NUMBER_OF_ARGUMENT_REGISTERS 4u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 7u #endif class GPRInfo { @@ -360,50 +410,78 @@ public: static const unsigned numberOfRegisters = 11; static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; - // Note: regT3 is required to be callee-preserved. - // These registers match the baseline JIT. - static const GPRReg cachedResultRegister = X86Registers::eax; static const GPRReg callFrameRegister = X86Registers::ebp; static const GPRReg tagTypeNumberRegister = X86Registers::r14; static const GPRReg tagMaskRegister = X86Registers::r15; + // Temporary registers. static const GPRReg regT0 = X86Registers::eax; - static const GPRReg regT1 = X86Registers::edx; - static const GPRReg regT2 = X86Registers::ecx; - static const GPRReg regT3 = X86Registers::ebx; - static const GPRReg regT4 = X86Registers::edi; - static const GPRReg regT5 = X86Registers::esi; - static const GPRReg regT6 = X86Registers::r8; +#if !OS(WINDOWS) + static const GPRReg regT1 = X86Registers::esi; + static const GPRReg regT2 = X86Registers::edx; + static const GPRReg regT3 = X86Registers::ecx; + static const GPRReg regT4 = X86Registers::r8; + static const GPRReg regT5 = X86Registers::r10; + static const GPRReg regT6 = X86Registers::edi; static const GPRReg regT7 = X86Registers::r9; - static const GPRReg regT8 = X86Registers::r10; - static const GPRReg regT9 = X86Registers::r12; - static const GPRReg regT10 = X86Registers::r13; +#else + static const GPRReg regT1 = X86Registers::edx; + static const GPRReg regT2 = X86Registers::r8; + static const GPRReg regT3 = X86Registers::r9; + static const GPRReg regT4 = X86Registers::r10; + static const GPRReg regT5 = X86Registers::ecx; +#endif + + static const GPRReg regCS0 = X86Registers::ebx; + +#if !OS(WINDOWS) + static const GPRReg regCS1 = X86Registers::r12; + static const GPRReg regCS2 = X86Registers::r13; + static const GPRReg regCS3 = X86Registers::r14; + static const GPRReg regCS4 = X86Registers::r15; +#else + static const GPRReg regCS1 = X86Registers::esi; + static const GPRReg regCS2 = X86Registers::edi; + static const GPRReg regCS3 = X86Registers::r12; + static const GPRReg regCS4 = X86Registers::r13; + static const GPRReg regCS5 = X86Registers::r14; + static const GPRReg regCS6 = X86Registers::r15; +#endif + // These constants provide the names for the general purpose argument & return value registers. #if !OS(WINDOWS) - static const GPRReg argumentGPR0 = X86Registers::edi; // regT4 - static const GPRReg argumentGPR1 = X86Registers::esi; // regT5 - static const GPRReg argumentGPR2 = X86Registers::edx; // regT1 - static const GPRReg argumentGPR3 = X86Registers::ecx; // regT2 - static const GPRReg argumentGPR4 = X86Registers::r8; // regT6 - static const GPRReg argumentGPR5 = X86Registers::r9; // regT7 + static const GPRReg argumentGPR0 = X86Registers::edi; // regT6 + static const GPRReg argumentGPR1 = X86Registers::esi; // regT1 + static const GPRReg argumentGPR2 = X86Registers::edx; // regT2 + static const GPRReg argumentGPR3 = X86Registers::ecx; // regT3 + static const GPRReg argumentGPR4 = X86Registers::r8; // regT4 + static const GPRReg argumentGPR5 = X86Registers::r9; // regT7 #else - static const GPRReg argumentGPR0 = X86Registers::ecx; - static const GPRReg argumentGPR1 = X86Registers::edx; - static const GPRReg argumentGPR2 = X86Registers::r8; // regT6 - static const GPRReg argumentGPR3 = X86Registers::r9; // regT7 + static const GPRReg argumentGPR0 = X86Registers::ecx; // regT5 + static const GPRReg argumentGPR1 = X86Registers::edx; // regT1 + static const GPRReg argumentGPR2 = X86Registers::r8; // regT2 + static const GPRReg argumentGPR3 = X86Registers::r9; // regT3 #endif - static const GPRReg nonArgGPR0 = X86Registers::r10; // regT8 - static const GPRReg nonArgGPR1 = X86Registers::ebx; // regT3 - static const GPRReg nonArgGPR2 = X86Registers::r12; // regT9 + static const GPRReg nonArgGPR0 = X86Registers::r10; // regT5 (regT4 on Windows) static const GPRReg returnValueGPR = X86Registers::eax; // regT0 - static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 - static const GPRReg nonPreservedNonReturnGPR = X86Registers::esi; + static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 or regT2 + static const GPRReg nonPreservedNonReturnGPR = X86Registers::r10; // regT5 (regT4 on Windows) + static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; // regT5 (regT4 on Windows) + + // FIXME: I believe that all uses of this are dead in the sense that it just causes the scratch + // register allocator to select a different register and potentially spill things. It would be better + // if we instead had a more explicit way of saying that we don't have a scratch register. + static const GPRReg patchpointScratchRegister; static GPRReg toRegister(unsigned index) { ASSERT(index < numberOfRegisters); - static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9, regT10 }; +#if !OS(WINDOWS) + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regCS0, regCS1, regCS2 }; +#else + static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regCS0, regCS1, regCS2, regCS3, regCS4 }; +#endif return registerForIndex[index]; } @@ -422,7 +500,11 @@ public: { ASSERT(reg != InvalidGPRReg); ASSERT(static_cast(reg) < 16); - static const unsigned indexForRegister[16] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4, 6, 7, 8, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; +#if !OS(WINDOWS) + static const unsigned indexForRegister[16] = { 0, 3, 2, 8, InvalidIndex, InvalidIndex, 1, 6, 4, 7, 5, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; +#else + static const unsigned indexForRegister[16] = { 0, 5, 1, 6, InvalidIndex, InvalidIndex, 7, 8, 2, 3, 4, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex }; +#endif return indexForRegister[reg]; } @@ -439,6 +521,16 @@ public: return nameForRegister[reg]; } + static const std::array& reservedRegisters() + { + static const std::array reservedRegisters { { + MacroAssembler::s_scratchRegister, + tagTypeNumberRegister, + tagMaskRegister, + } }; + return reservedRegisters; + } + static const unsigned InvalidIndex = 0xffffffff; }; @@ -446,6 +538,7 @@ public: #if CPU(ARM) #define NUMBER_OF_ARGUMENT_REGISTERS 4u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u class GPRInfo { public: @@ -453,13 +546,11 @@ public: static const unsigned numberOfRegisters = 9; static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS; - // Note: regT3 is required to be callee-preserved. - // Temporary registers. static const GPRReg regT0 = ARMRegisters::r0; static const GPRReg regT1 = ARMRegisters::r1; static const GPRReg regT2 = ARMRegisters::r2; - static const GPRReg regT3 = ARMRegisters::r4; + static const GPRReg regT3 = ARMRegisters::r3; static const GPRReg regT4 = ARMRegisters::r8; static const GPRReg regT5 = ARMRegisters::r9; static const GPRReg regT6 = ARMRegisters::r10; @@ -468,22 +559,19 @@ public: #else static const GPRReg regT7 = ARMRegisters::r7; #endif - static const GPRReg regT8 = ARMRegisters::r3; + static const GPRReg regT8 = ARMRegisters::r4; // These registers match the baseline JIT. - static const GPRReg cachedResultRegister = regT0; - static const GPRReg cachedResultRegister2 = regT1; static const GPRReg callFrameRegister = ARMRegisters::fp; // These constants provide the names for the general purpose argument & return value registers. static const GPRReg argumentGPR0 = ARMRegisters::r0; // regT0 static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1 static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2 - static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT8 - static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT3 + static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT3 + static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT8 static const GPRReg nonArgGPR1 = ARMRegisters::r8; // regT4 - static const GPRReg nonArgGPR2 = ARMRegisters::r9; // regT5 static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0 static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1 - static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5; // regT7 + static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5; static GPRReg toRegister(unsigned index) { @@ -492,18 +580,24 @@ public: return registerForIndex[index]; } + static GPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < numberOfArgumentRegisters); + static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 }; + return registerForIndex[index]; + } + static unsigned toIndex(GPRReg reg) { ASSERT(reg != InvalidGPRReg); ASSERT(static_cast(reg) < 16); static const unsigned indexForRegister[16] = #if CPU(ARM_THUMB2) - { 0, 1, 2, 8, 3, 9, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; + { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; #else - { 0, 1, 2, 8, 3, 9, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; + { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; #endif unsigned result = indexForRegister[reg]; - ASSERT(result != InvalidIndex); return result; } @@ -527,26 +621,27 @@ public: #if CPU(ARM64) #define NUMBER_OF_ARGUMENT_REGISTERS 8u +// Callee Saves includes x19..x28 and FP registers q8..q15 +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 18u class GPRInfo { public: typedef GPRReg RegisterType; static const unsigned numberOfRegisters = 16; - - // Note: regT3 is required to be callee-preserved. + static const unsigned numberOfArgumentRegisters = 8; // These registers match the baseline JIT. - static const GPRReg cachedResultRegister = ARM64Registers::x0; - static const GPRReg timeoutCheckRegister = ARM64Registers::x26; static const GPRReg callFrameRegister = ARM64Registers::fp; static const GPRReg tagTypeNumberRegister = ARM64Registers::x27; static const GPRReg tagMaskRegister = ARM64Registers::x28; + static const GPRReg dataTempRegister = MacroAssembler::dataTempRegister; + static const GPRReg memoryTempRegister = MacroAssembler::memoryTempRegister; // Temporary registers. static const GPRReg regT0 = ARM64Registers::x0; static const GPRReg regT1 = ARM64Registers::x1; static const GPRReg regT2 = ARM64Registers::x2; - static const GPRReg regT3 = ARM64Registers::x23; - static const GPRReg regT4 = ARM64Registers::x24; + static const GPRReg regT3 = ARM64Registers::x3; + static const GPRReg regT4 = ARM64Registers::x4; static const GPRReg regT5 = ARM64Registers::x5; static const GPRReg regT6 = ARM64Registers::x6; static const GPRReg regT7 = ARM64Registers::x7; @@ -558,6 +653,16 @@ public: static const GPRReg regT13 = ARM64Registers::x13; static const GPRReg regT14 = ARM64Registers::x14; static const GPRReg regT15 = ARM64Registers::x15; + static const GPRReg regCS0 = ARM64Registers::x19; // Used by FTL only + static const GPRReg regCS1 = ARM64Registers::x20; // Used by FTL only + static const GPRReg regCS2 = ARM64Registers::x21; // Used by FTL only + static const GPRReg regCS3 = ARM64Registers::x22; // Used by FTL only + static const GPRReg regCS4 = ARM64Registers::x23; // Used by FTL only + static const GPRReg regCS5 = ARM64Registers::x24; // Used by FTL only + static const GPRReg regCS6 = ARM64Registers::x25; // Used by FTL only + static const GPRReg regCS7 = ARM64Registers::x26; + static const GPRReg regCS8 = ARM64Registers::x27; // tagTypeNumber + static const GPRReg regCS9 = ARM64Registers::x28; // tagMask // These constants provide the names for the general purpose argument & return value registers. static const GPRReg argumentGPR0 = ARM64Registers::x0; // regT0 static const GPRReg argumentGPR1 = ARM64Registers::x1; // regT1 @@ -569,12 +674,13 @@ public: static const GPRReg argumentGPR7 = ARM64Registers::x7; // regT7 static const GPRReg nonArgGPR0 = ARM64Registers::x8; // regT8 static const GPRReg nonArgGPR1 = ARM64Registers::x9; // regT9 - static const GPRReg nonArgGPR2 = ARM64Registers::x10; // regT10 static const GPRReg returnValueGPR = ARM64Registers::x0; // regT0 static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1 static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2; + static const GPRReg nonPreservedNonArgumentGPR = ARM64Registers::x8; + static const GPRReg patchpointScratchRegister; - // GPRReg mapping is direct, the machine regsiter numbers can + // GPRReg mapping is direct, the machine register numbers can // be used directly as indices into the GPR RegisterBank. COMPILE_ASSERT(ARM64Registers::q0 == 0, q0_is_0); COMPILE_ASSERT(ARM64Registers::q1 == 1, q1_is_1); @@ -598,12 +704,20 @@ public: } static unsigned toIndex(GPRReg reg) { + if (reg > regT15) + return InvalidIndex; return (unsigned)reg; } + static GPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < numberOfArgumentRegisters); + return toRegister(index); + } + static const char* debugName(GPRReg reg) { - ASSERT(static_cast(reg) != InvalidGPRReg); + ASSERT(reg != InvalidGPRReg); ASSERT(static_cast(reg) < 32); static const char* nameForRegister[32] = { "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", @@ -614,6 +728,17 @@ public: return nameForRegister[reg]; } + static const std::array& reservedRegisters() + { + static const std::array reservedRegisters { { + dataTempRegister, + memoryTempRegister, + tagTypeNumberRegister, + tagMaskRegister, + } }; + return reservedRegisters; + } + static const unsigned InvalidIndex = 0xffffffff; }; @@ -621,6 +746,7 @@ public: #if CPU(MIPS) #define NUMBER_OF_ARGUMENT_REGISTERS 4u +#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u class GPRInfo { public: @@ -630,31 +756,26 @@ public: // regT0 must be v0 for returning a 32-bit value. // regT1 must be v1 for returning a pair of 32-bit value. - // regT3 must be saved in the callee, so use an S register. // Temporary registers. static const GPRReg regT0 = MIPSRegisters::v0; static const GPRReg regT1 = MIPSRegisters::v1; - static const GPRReg regT2 = MIPSRegisters::t4; - static const GPRReg regT3 = MIPSRegisters::s2; - static const GPRReg regT4 = MIPSRegisters::t5; - static const GPRReg regT5 = MIPSRegisters::t6; - static const GPRReg regT6 = MIPSRegisters::s0; + static const GPRReg regT2 = MIPSRegisters::t2; + static const GPRReg regT3 = MIPSRegisters::t3; + static const GPRReg regT4 = MIPSRegisters::t4; + static const GPRReg regT5 = MIPSRegisters::t5; + static const GPRReg regT6 = MIPSRegisters::t6; // These registers match the baseline JIT. - static const GPRReg cachedResultRegister = regT0; - static const GPRReg cachedResultRegister2 = regT1; static const GPRReg callFrameRegister = MIPSRegisters::fp; // These constants provide the names for the general purpose argument & return value registers. static const GPRReg argumentGPR0 = MIPSRegisters::a0; static const GPRReg argumentGPR1 = MIPSRegisters::a1; static const GPRReg argumentGPR2 = MIPSRegisters::a2; static const GPRReg argumentGPR3 = MIPSRegisters::a3; - static const GPRReg nonArgGPR0 = regT2; - static const GPRReg nonArgGPR1 = regT3; - static const GPRReg nonArgGPR2 = regT4; + static const GPRReg nonArgGPR0 = regT4; static const GPRReg returnValueGPR = regT0; static const GPRReg returnValueGPR2 = regT1; - static const GPRReg nonPreservedNonReturnGPR = regT5; + static const GPRReg nonPreservedNonReturnGPR = regT2; static GPRReg toRegister(unsigned index) { @@ -663,17 +784,24 @@ public: return registerForIndex[index]; } + static GPRReg toArgumentRegister(unsigned index) + { + ASSERT(index < numberOfArgumentRegisters); + static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 }; + return registerForIndex[index]; + } + static unsigned toIndex(GPRReg reg) { ASSERT(reg != InvalidGPRReg); - ASSERT(reg < 24); - static const unsigned indexForRegister[24] = { + ASSERT(reg < 32); + static const unsigned indexForRegister[32] = { InvalidIndex, InvalidIndex, 0, 1, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, - InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, 2, 4, 5, InvalidIndex, - 6, InvalidIndex, 3, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex + InvalidIndex, InvalidIndex, 2, 3, 4, 5, 6, InvalidIndex, + InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, + InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex }; unsigned result = indexForRegister[reg]; - ASSERT(result != InvalidIndex); return result; } @@ -695,76 +823,6 @@ public: #endif // CPU(MIPS) -#if CPU(SH4) -#define NUMBER_OF_ARGUMENT_REGISTERS 4u - -class GPRInfo { -public: - typedef GPRReg RegisterType; - static const unsigned numberOfRegisters = 10; - - // Note: regT3 is required to be callee-preserved. - - // Temporary registers. - static const GPRReg regT0 = SH4Registers::r0; - static const GPRReg regT1 = SH4Registers::r1; - static const GPRReg regT2 = SH4Registers::r2; - static const GPRReg regT3 = SH4Registers::r10; - static const GPRReg regT4 = SH4Registers::r4; - static const GPRReg regT5 = SH4Registers::r5; - static const GPRReg regT6 = SH4Registers::r6; - static const GPRReg regT7 = SH4Registers::r7; - static const GPRReg regT8 = SH4Registers::r8; - static const GPRReg regT9 = SH4Registers::r9; - // These registers match the baseline JIT. - static const GPRReg cachedResultRegister = regT0; - static const GPRReg cachedResultRegister2 = regT1; - static const GPRReg callFrameRegister = SH4Registers::fp; - // These constants provide the names for the general purpose argument & return value registers. - static const GPRReg argumentGPR0 = regT4; - static const GPRReg argumentGPR1 = regT5; - static const GPRReg argumentGPR2 = regT6; - static const GPRReg argumentGPR3 = regT7; - static const GPRReg nonArgGPR0 = regT3; - static const GPRReg nonArgGPR1 = regT8; - static const GPRReg nonArgGPR2 = regT9; - static const GPRReg returnValueGPR = regT0; - static const GPRReg returnValueGPR2 = regT1; - static const GPRReg nonPreservedNonReturnGPR = regT2; - - static GPRReg toRegister(unsigned index) - { - ASSERT(index < numberOfRegisters); - static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9 }; - return registerForIndex[index]; - } - - static unsigned toIndex(GPRReg reg) - { - ASSERT(reg != InvalidGPRReg); - ASSERT(reg < 14); - static const unsigned indexForRegister[14] = { 0, 1, 2, InvalidIndex, 4, 5, 6, 7, 8, 9, 3, InvalidIndex, InvalidIndex, InvalidIndex }; - unsigned result = indexForRegister[reg]; - ASSERT(result != InvalidIndex); - return result; - } - - static const char* debugName(GPRReg reg) - { - ASSERT(reg != InvalidGPRReg); - ASSERT(reg < 16); - static const char* nameForRegister[16] = { - "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7", - "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15" - }; - return nameForRegister[reg]; - } - - static const unsigned InvalidIndex = 0xffffffff; -}; - -#endif // CPU(SH4) - // The baseline JIT uses "accumulator" style execution with regT0 (for 64-bit) // and regT0 + regT1 (for 32-bit) serving as the accumulator register(s) for // passing results of one opcode to the next. Hence: @@ -773,6 +831,14 @@ COMPILE_ASSERT(GPRInfo::regT0 == GPRInfo::returnValueGPR, regT0_must_equal_retur COMPILE_ASSERT(GPRInfo::regT1 == GPRInfo::returnValueGPR2, regT1_must_equal_returnValueGPR2); #endif +inline GPRReg extractResult(GPRReg result) { return result; } +#if USE(JSVALUE64) +inline GPRReg extractResult(JSValueRegs result) { return result.gpr(); } +#else +inline JSValueRegs extractResult(JSValueRegs result) { return result; } +#endif +inline NoResultTag extractResult(NoResultTag) { return NoResult; } + #endif // ENABLE(JIT) } // namespace JSC @@ -789,5 +855,3 @@ inline void printInternal(PrintStream& out, JSC::GPRReg reg) } } // namespace WTF - -#endif diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp index 528fb2bc4..e8d01916b 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp @@ -29,6 +29,7 @@ #include "CallFrame.h" #include "JSCJSValueInlines.h" #include "JSObject.h" +#include "JSCInlines.h" #include diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h index f4c8bc703..eb6116106 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.h +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h @@ -23,12 +23,10 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef HostCallReturnValue_h -#define HostCallReturnValue_h +#pragma once #include "JSCJSValue.h" #include "MacroAssemblerCodeRef.h" -#include #if ENABLE(JIT) @@ -42,7 +40,7 @@ namespace JSC { extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL; -#if COMPILER(GCC) +#if COMPILER(GCC_OR_CLANG) // This is a public declaration only to convince CLANG not to elide it. extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL; @@ -52,14 +50,12 @@ inline void initializeHostCallReturnValue() getHostCallReturnValueWithExecState(0); } -#else // COMPILER(GCC) +#else // COMPILER(GCC_OR_CLANG) inline void initializeHostCallReturnValue() { } -#endif // COMPILER(GCC) +#endif // COMPILER(GCC_OR_CLANG) } // namespace JSC #endif // ENABLE(JIT) - -#endif // HostCallReturnValue_h diff --git a/Source/JavaScriptCore/jit/ICStats.cpp b/Source/JavaScriptCore/jit/ICStats.cpp new file mode 100644 index 000000000..2080cd934 --- /dev/null +++ b/Source/JavaScriptCore/jit/ICStats.cpp @@ -0,0 +1,128 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "ICStats.h" + +namespace JSC { + +bool ICEvent::operator<(const ICEvent& other) const +{ + if (m_classInfo != other.m_classInfo) { + if (!m_classInfo) + return true; + if (!other.m_classInfo) + return false; + return strcmp(m_classInfo->className, other.m_classInfo->className) < 0; + } + + if (m_propertyName != other.m_propertyName) + return codePointCompare(m_propertyName.string(), other.m_propertyName.string()) < 0; + + return m_kind < other.m_kind; +} + +void ICEvent::dump(PrintStream& out) const +{ + out.print(m_kind, "(", m_classInfo ? m_classInfo->className : "", ", ", m_propertyName, ")"); +} + +void ICEvent::log() const +{ + ICStats::instance().add(*this); +} + +Atomic ICStats::s_instance; + +ICStats::ICStats() +{ + m_thread = createThread( + "JSC ICStats", + [this] () { + LockHolder locker(m_lock); + for (;;) { + m_condition.waitFor( + m_lock, Seconds(1), [this] () -> bool { return m_shouldStop; }); + if (m_shouldStop) + break; + + dataLog("ICStats:\n"); + auto list = m_spectrum.buildList(); + for (unsigned i = list.size(); i--;) + dataLog(" ", list[i].key, ": ", list[i].count, "\n"); + } + }); +} + +ICStats::~ICStats() +{ + { + LockHolder locker(m_lock); + m_shouldStop = true; + m_condition.notifyAll(); + } + + waitForThreadCompletion(m_thread); +} + +void ICStats::add(const ICEvent& event) +{ + m_spectrum.add(event); +} + +ICStats& ICStats::instance() +{ + for (;;) { + ICStats* result = s_instance.load(); + if (result) + return *result; + + ICStats* newStats = new ICStats(); + if (s_instance.compareExchangeWeak(nullptr, newStats)) + return *newStats; + + delete newStats; + } +} + +} // namespace JSC + +namespace WTF { + +using namespace JSC; + +void printInternal(PrintStream& out, ICEvent::Kind kind) +{ + switch (kind) { +#define ICEVENT_KIND_DUMP(name) case ICEvent::name: out.print(#name); return; + FOR_EACH_ICEVENT_KIND(ICEVENT_KIND_DUMP); +#undef ICEVENT_KIND_DUMP + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace WTF + + diff --git a/Source/JavaScriptCore/jit/ICStats.h b/Source/JavaScriptCore/jit/ICStats.h new file mode 100644 index 000000000..9499c915a --- /dev/null +++ b/Source/JavaScriptCore/jit/ICStats.h @@ -0,0 +1,194 @@ +/* + * Copyright (C) 2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#pragma once + +#include "ClassInfo.h" +#include "Identifier.h" +#include +#include +#include +#include +#include +#include +#include + +namespace JSC { + +#define FOR_EACH_ICEVENT_KIND(macro) \ + macro(InvalidKind) \ + macro(GetByIdAddAccessCase) \ + macro(GetByIdReplaceWithJump) \ + macro(GetByIdSelfPatch) \ + macro(InAddAccessCase) \ + macro(InReplaceWithJump) \ + macro(OperationGetById) \ + macro(OperationGetByIdGeneric) \ + macro(OperationGetByIdBuildList) \ + macro(OperationGetByIdOptimize) \ + macro(OperationInOptimize) \ + macro(OperationIn) \ + macro(OperationGenericIn) \ + macro(OperationPutByIdStrict) \ + macro(OperationPutByIdNonStrict) \ + macro(OperationPutByIdDirectStrict) \ + macro(OperationPutByIdDirectNonStrict) \ + macro(OperationPutByIdStrictOptimize) \ + macro(OperationPutByIdNonStrictOptimize) \ + macro(OperationPutByIdDirectStrictOptimize) \ + macro(OperationPutByIdDirectNonStrictOptimize) \ + macro(OperationPutByIdStrictBuildList) \ + macro(OperationPutByIdNonStrictBuildList) \ + macro(OperationPutByIdDirectStrictBuildList) \ + macro(OperationPutByIdDirectNonStrictBuildList) \ + macro(PutByIdAddAccessCase) \ + macro(PutByIdReplaceWithJump) \ + macro(PutByIdSelfPatch) + +class ICEvent { +public: + enum Kind { +#define ICEVENT_KIND_DECLARATION(name) name, + FOR_EACH_ICEVENT_KIND(ICEVENT_KIND_DECLARATION) +#undef ICEVENT_KIND_DECLARATION + }; + + ICEvent() + { + } + + ICEvent(Kind kind, const ClassInfo* classInfo, const Identifier propertyName) + : m_kind(kind) + , m_classInfo(classInfo) + , m_propertyName(propertyName) + { + } + + ICEvent(WTF::HashTableDeletedValueType) + : m_kind(OperationGetById) + { + } + + bool operator==(const ICEvent& other) const + { + return m_kind == other.m_kind + && m_classInfo == other.m_classInfo + && m_propertyName == other.m_propertyName; + } + + bool operator!=(const ICEvent& other) const + { + return !(*this == other); + } + + bool operator<(const ICEvent& other) const; + bool operator>(const ICEvent& other) const { return other < *this; } + bool operator<=(const ICEvent& other) const { return !(*this > other); } + bool operator>=(const ICEvent& other) const { return !(*this < other); } + + explicit operator bool() const + { + return *this != ICEvent(); + } + + Kind kind() const { return m_kind; } + const ClassInfo* classInfo() const { return m_classInfo; } + const Identifier& propertyName() const { return m_propertyName; } + + unsigned hash() const + { + return m_kind + WTF::PtrHash::hash(m_classInfo) + StringHash::hash(m_propertyName.string()); + } + + bool isHashTableDeletedValue() const + { + return *this == ICEvent(WTF::HashTableDeletedValue); + } + + void dump(PrintStream&) const; + + void log() const; + +private: + + Kind m_kind { InvalidKind }; + const ClassInfo* m_classInfo { nullptr }; + Identifier m_propertyName; +}; + +struct ICEventHash { + static unsigned hash(const ICEvent& key) { return key.hash(); } + static bool equal(const ICEvent& a, const ICEvent& b) { return a == b; } + static const bool safeToCompareToEmptyOrDeleted = true; +}; + +} // namespace JSC + +namespace WTF { + +void printInternal(PrintStream&, JSC::ICEvent::Kind); + +template struct DefaultHash; +template<> struct DefaultHash { + typedef JSC::ICEventHash Hash; +}; + +template struct HashTraits; +template<> struct HashTraits : SimpleClassHashTraits { + static const bool emptyValueIsZero = false; +}; + +} // namespace WTF + +namespace JSC { + +class ICStats { + WTF_MAKE_NONCOPYABLE(ICStats); + WTF_MAKE_FAST_ALLOCATED; +public: + ICStats(); + ~ICStats(); + + void add(const ICEvent& event); + + static ICStats& instance(); + +private: + + Spectrum m_spectrum; + ThreadIdentifier m_thread; + Lock m_lock; + Condition m_condition; + bool m_shouldStop { false }; + + static Atomic s_instance; +}; + +#define LOG_IC(arguments) do { \ + if (Options::useICStats()) \ + (ICEvent arguments).log(); \ + } while (false) + +} // namespace JSC diff --git a/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp new file mode 100644 index 000000000..4fb7a5909 --- /dev/null +++ b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp @@ -0,0 +1,136 @@ +/* + * Copyright (C) 2015-2016 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * 3. Neither the name of Apple Inc. ("Apple") nor the names of + * its contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND + * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF + * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" + +#if ENABLE(JIT) + +#include "CCallHelpers.h" +#include "CallFrame.h" +#include "CodeBlock.h" +#include "IntrinsicGetterAccessCase.h" +#include "JSArrayBufferView.h" +#include "JSCJSValueInlines.h" +#include "JSCellInlines.h" +#include "PolymorphicAccess.h" +#include "StructureStubInfo.h" + +namespace JSC { + +typedef CCallHelpers::TrustedImm32 TrustedImm32; +typedef CCallHelpers::Imm32 Imm32; +typedef CCallHelpers::TrustedImmPtr TrustedImmPtr; +typedef CCallHelpers::ImmPtr ImmPtr; +typedef CCallHelpers::TrustedImm64 TrustedImm64; +typedef CCallHelpers::Imm64 Imm64; + +bool IntrinsicGetterAccessCase::canEmitIntrinsicGetter(JSFunction* getter, Structure* structure) +{ + + switch (getter->intrinsic()) { + case TypedArrayByteOffsetIntrinsic: + case TypedArrayByteLengthIntrinsic: + case TypedArrayLengthIntrinsic: { + TypedArrayType type = structure->classInfo()->typedArrayStorageType; + + if (!isTypedView(type)) + return false; + + return true; + } + default: + return false; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +void IntrinsicGetterAccessCase::emitIntrinsicGetter(AccessGenerationState& state) +{ + CCallHelpers& jit = *state.jit; + JSValueRegs valueRegs = state.valueRegs; + GPRReg baseGPR = state.baseGPR; + GPRReg valueGPR = valueRegs.payloadGPR(); + + switch (intrinsic()) { + case TypedArrayLengthIntrinsic: { + jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR); + jit.boxInt32(valueGPR, valueRegs); + state.succeed(); + return; + } + + case TypedArrayByteLengthIntrinsic: { + TypedArrayType type = structure()->classInfo()->typedArrayStorageType; + + jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR); + + if (elementSize(type) > 1) { + // We can use a bitshift here since we TypedArrays cannot have byteLength that overflows an int32. + jit.lshift32(valueGPR, Imm32(logElementSize(type)), valueGPR); + } + + jit.boxInt32(valueGPR, valueRegs); + state.succeed(); + return; + } + + case TypedArrayByteOffsetIntrinsic: { + GPRReg scratchGPR = state.scratchGPR; + + CCallHelpers::Jump emptyByteOffset = jit.branch32( + MacroAssembler::NotEqual, + MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()), + TrustedImm32(WastefulTypedArray)); + + jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR); + jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), valueGPR); + jit.loadPtr(MacroAssembler::Address(scratchGPR, Butterfly::offsetOfArrayBuffer()), scratchGPR); + jit.loadPtr(MacroAssembler::Address(scratchGPR, ArrayBuffer::offsetOfData()), scratchGPR); + jit.subPtr(scratchGPR, valueGPR); + + CCallHelpers::Jump done = jit.jump(); + + emptyByteOffset.link(&jit); + jit.move(TrustedImmPtr(0), valueGPR); + + done.link(&jit); + + jit.boxInt32(valueGPR, valueRegs); + state.succeed(); + return; + } + + default: + break; + } + RELEASE_ASSERT_NOT_REACHED(); +} + +} // namespace JSC + +#endif // ENABLE(JIT) diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index c3508b01d..e74219b62 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,63 +26,75 @@ #include "config.h" #if ENABLE(JIT) -#include "JIT.h" -// This probably does not belong here; adding here for now as a quick Windows build fix. -#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X) -#include "MacroAssembler.h" -JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2; -#endif +#include "JIT.h" +#include "BytecodeGraph.h" #include "CodeBlock.h" +#include "CodeBlockWithJITType.h" #include "DFGCapabilities.h" #include "Interpreter.h" #include "JITInlines.h" #include "JITOperations.h" #include "JSArray.h" +#include "JSCInlines.h" #include "JSFunction.h" #include "LinkBuffer.h" -#include "Operations.h" -#include "RepatchBuffer.h" +#include "MaxFrameExtentForSlowPathCall.h" +#include "PCToCodeOriginMap.h" +#include "ProfilerDatabase.h" +#include "ProgramCodeBlock.h" #include "ResultType.h" -#include "SamplingTool.h" #include "SlowPathCall.h" +#include "StackAlignment.h" +#include "TypeProfilerLog.h" #include +#include +#include using namespace std; namespace JSC { -void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) -{ - RepatchBuffer repatchBuffer(codeblock); - repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction); -} +double totalBaselineCompileTime; +double totalDFGCompileTime; +double totalFTLCompileTime; +double totalFTLDFGCompileTime; +double totalFTLB3CompileTime; -void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction) +void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) { - RepatchBuffer repatchBuffer(codeblock); - repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction); + MacroAssembler::repatchCall( + CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), + newCalleeFunction); } -void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction) +JIT::CodeRef JIT::compileCTINativeCall(VM* vm, NativeFunction func) { - RepatchBuffer repatchBuffer(codeblock); - repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction); + if (!vm->canUseJIT()) + return CodeRef::createLLIntCodeRef(llint_native_call_trampoline); + JIT jit(vm, 0); + return jit.privateCompileCTINativeCall(vm, func); } -JIT::JIT(VM* vm, CodeBlock* codeBlock) +JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset) : JSInterfaceJIT(vm, codeBlock) , m_interpreter(vm->interpreter) , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) - , m_bytecodeOffset((unsigned)-1) + , m_bytecodeOffset(std::numeric_limits::max()) , m_getByIdIndex(UINT_MAX) , m_putByIdIndex(UINT_MAX) , m_byValInstructionIndex(UINT_MAX) , m_callLinkInfoIndex(UINT_MAX) , m_randomGenerator(cryptographicallyRandomNumber()) + , m_pcToCodeOriginMapBuilder(*vm) , m_canBeOptimized(false) , m_shouldEmitProfiling(false) + , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset) +{ +} + +JIT::~JIT() { } @@ -96,27 +108,61 @@ void JIT::emitEnterOptimizationCheck() skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()))); ASSERT(!m_bytecodeOffset); + + copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(); + callOperation(operationOptimize, m_bytecodeOffset); skipOptimize.append(branchTestPtr(Zero, returnValueGPR)); + move(returnValueGPR2, stackPointerRegister); jump(returnValueGPR); skipOptimize.link(this); } #endif +void JIT::emitNotifyWrite(WatchpointSet* set) +{ + if (!set || set->state() == IsInvalidated) { + addSlowCase(Jump()); + return; + } + + addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated))); +} + +void JIT::emitNotifyWrite(GPRReg pointerToSet) +{ + addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated))); +} + +void JIT::assertStackPointerOffset() +{ + if (ASSERT_DISABLED) + return; + + addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0); + Jump ok = branchPtr(Equal, regT0, stackPointerRegister); + breakpoint(); + ok.link(this); +} + #define NEXT_OPCODE(name) \ m_bytecodeOffset += OPCODE_LENGTH(name); \ break; #define DEFINE_SLOW_OP(name) \ case op_##name: { \ - JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \ - slowPathCall.call(); \ + if (m_bytecodeOffset >= startBytecodeOffset) { \ + JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \ + slowPathCall.call(); \ + } \ NEXT_OPCODE(op_##name); \ } #define DEFINE_OP(name) \ case name: { \ - emit_##name(currentInstruction); \ + if (m_bytecodeOffset >= startBytecodeOffset) { \ + emit_##name(currentInstruction); \ + } \ NEXT_OPCODE(name); \ } @@ -128,17 +174,60 @@ void JIT::emitEnterOptimizationCheck() void JIT::privateCompileMainPass() { + if (false) + dataLog("Compiling ", *m_codeBlock, "\n"); + + jitAssertTagsInPlace(); + jitAssertArgumentCountSane(); + Instruction* instructionsBegin = m_codeBlock->instructions().begin(); - unsigned instructionCount = m_codeBlock->instructions().size(); + unsigned instructionCount = m_instructions.size(); m_callLinkInfoIndex = 0; + unsigned startBytecodeOffset = 0; + if (m_loopOSREntryBytecodeOffset && m_codeBlock->inherits(*m_codeBlock->vm(), ProgramCodeBlock::info())) { + // We can only do this optimization because we execute ProgramCodeBlock's exactly once. + // This optimization would be invalid otherwise. When the LLInt determines it wants to + // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it + // was executing at when it kicked off our compilation. We only need to compile code for + // anything reachable from that bytecode offset. + + // We only bother building the bytecode graph if it could save time and executable + // memory. We pick an arbitrary offset where we deem this is profitable. + if (m_loopOSREntryBytecodeOffset >= 200) { + // As a simplification, we don't find all bytecode ranges that are unreachable. + // Instead, we just find the minimum bytecode offset that is reachable, and + // compile code from that bytecode offset onwards. + + BytecodeGraph graph(m_codeBlock, m_instructions); + BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset); + RELEASE_ASSERT(block); + + GraphNodeWorklist worklist; + startBytecodeOffset = UINT_MAX; + worklist.push(block); + while (BytecodeBasicBlock* block = worklist.pop()) { + startBytecodeOffset = std::min(startBytecodeOffset, block->leaderOffset()); + worklist.pushAll(block->successors()); + } + } + } + for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) { + if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) { + // We've proven all bytecode instructions up until here are unreachable. + // Let's ensure that by crashing if it's ever hit. + breakpoint(); + } + if (m_disassembler) m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label()); Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset); + m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); + #if ENABLE(OPCODE_SAMPLING) if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice. sampleInstruction(currentInstruction); @@ -158,55 +247,72 @@ void JIT::privateCompileMainPass() AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin( m_compilation->bytecodes(), m_bytecodeOffset)))->address())); } + + if (Options::eagerlyUpdateTopCallFrame()) + updateTopCallFrame(); + + unsigned bytecodeOffset = m_bytecodeOffset; switch (opcodeID) { - DEFINE_SLOW_OP(del_by_val) DEFINE_SLOW_OP(in) DEFINE_SLOW_OP(less) DEFINE_SLOW_OP(lesseq) DEFINE_SLOW_OP(greater) DEFINE_SLOW_OP(greatereq) DEFINE_SLOW_OP(is_function) - DEFINE_SLOW_OP(is_object) + DEFINE_SLOW_OP(is_object_or_null) DEFINE_SLOW_OP(typeof) - DEFINE_OP(op_touch_entry) DEFINE_OP(op_add) DEFINE_OP(op_bitand) DEFINE_OP(op_bitor) DEFINE_OP(op_bitxor) DEFINE_OP(op_call) + DEFINE_OP(op_tail_call) DEFINE_OP(op_call_eval) DEFINE_OP(op_call_varargs) + DEFINE_OP(op_tail_call_varargs) + DEFINE_OP(op_tail_call_forward_arguments) + DEFINE_OP(op_construct_varargs) DEFINE_OP(op_catch) DEFINE_OP(op_construct) - DEFINE_OP(op_get_callee) DEFINE_OP(op_create_this) DEFINE_OP(op_to_this) - DEFINE_OP(op_init_lazy_reg) - DEFINE_OP(op_create_arguments) + DEFINE_OP(op_create_direct_arguments) + DEFINE_OP(op_create_scoped_arguments) + DEFINE_OP(op_create_cloned_arguments) + DEFINE_OP(op_get_argument) + DEFINE_OP(op_argument_count) + DEFINE_OP(op_create_rest) + DEFINE_OP(op_get_rest_length) + DEFINE_OP(op_check_tdz) + DEFINE_OP(op_assert) DEFINE_OP(op_debug) DEFINE_OP(op_del_by_id) + DEFINE_OP(op_del_by_val) DEFINE_OP(op_div) DEFINE_OP(op_end) DEFINE_OP(op_enter) - DEFINE_OP(op_create_activation) + DEFINE_OP(op_get_scope) DEFINE_OP(op_eq) DEFINE_OP(op_eq_null) - case op_get_by_id_out_of_line: + DEFINE_OP(op_try_get_by_id) case op_get_array_length: + case op_get_by_id_proto_load: + case op_get_by_id_unset: DEFINE_OP(op_get_by_id) - DEFINE_OP(op_get_arguments_length) + DEFINE_OP(op_get_by_id_with_this) DEFINE_OP(op_get_by_val) - DEFINE_OP(op_get_argument_by_val) - DEFINE_OP(op_get_by_pname) - DEFINE_OP(op_get_pnames) - DEFINE_OP(op_check_has_instance) + DEFINE_OP(op_get_by_val_with_this) + DEFINE_OP(op_overrides_has_instance) DEFINE_OP(op_instanceof) + DEFINE_OP(op_instanceof_custom) + DEFINE_OP(op_is_empty) DEFINE_OP(op_is_undefined) DEFINE_OP(op_is_boolean) DEFINE_OP(op_is_number) - DEFINE_OP(op_is_string) + DEFINE_OP(op_is_object) + DEFINE_OP(op_is_cell_with_type) DEFINE_OP(op_jeq_null) DEFINE_OP(op_jfalse) DEFINE_OP(op_jmp) @@ -222,9 +328,9 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_jngreatereq) DEFINE_OP(op_jtrue) DEFINE_OP(op_loop_hint) + DEFINE_OP(op_watchdog) DEFINE_OP(op_lshift) DEFINE_OP(op_mod) - DEFINE_OP(op_captured_mov) DEFINE_OP(op_mov) DEFINE_OP(op_mul) DEFINE_OP(op_negate) @@ -233,80 +339,88 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_new_array) DEFINE_OP(op_new_array_with_size) DEFINE_OP(op_new_array_buffer) + DEFINE_OP(op_new_array_with_spread) + DEFINE_OP(op_spread) DEFINE_OP(op_new_func) - DEFINE_OP(op_new_captured_func) DEFINE_OP(op_new_func_exp) + DEFINE_OP(op_new_generator_func) + DEFINE_OP(op_new_generator_func_exp) + DEFINE_OP(op_new_async_func) + DEFINE_OP(op_new_async_func_exp) DEFINE_OP(op_new_object) DEFINE_OP(op_new_regexp) - DEFINE_OP(op_next_pname) DEFINE_OP(op_not) DEFINE_OP(op_nstricteq) - DEFINE_OP(op_pop_scope) DEFINE_OP(op_dec) DEFINE_OP(op_inc) - DEFINE_OP(op_profile_did_call) - DEFINE_OP(op_profile_will_call) - DEFINE_OP(op_push_name_scope) + DEFINE_OP(op_pow) + DEFINE_OP(op_profile_type) + DEFINE_OP(op_profile_control_flow) DEFINE_OP(op_push_with_scope) - case op_put_by_id_out_of_line: - case op_put_by_id_transition_direct: - case op_put_by_id_transition_normal: - case op_put_by_id_transition_direct_out_of_line: - case op_put_by_id_transition_normal_out_of_line: + DEFINE_OP(op_create_lexical_environment) + DEFINE_OP(op_get_parent_scope) DEFINE_OP(op_put_by_id) + DEFINE_OP(op_put_by_id_with_this) DEFINE_OP(op_put_by_index) case op_put_by_val_direct: DEFINE_OP(op_put_by_val) - DEFINE_OP(op_put_getter_setter) - case op_init_global_const_nop: - NEXT_OPCODE(op_init_global_const_nop); - DEFINE_OP(op_init_global_const) + DEFINE_OP(op_put_by_val_with_this) + DEFINE_OP(op_put_getter_by_id) + DEFINE_OP(op_put_setter_by_id) + DEFINE_OP(op_put_getter_setter_by_id) + DEFINE_OP(op_put_getter_by_val) + DEFINE_OP(op_put_setter_by_val) + DEFINE_OP(op_define_data_property) + DEFINE_OP(op_define_accessor_property) DEFINE_OP(op_ret) - DEFINE_OP(op_ret_object_or_this) DEFINE_OP(op_rshift) DEFINE_OP(op_unsigned) DEFINE_OP(op_urshift) + DEFINE_OP(op_set_function_name) DEFINE_OP(op_strcat) DEFINE_OP(op_stricteq) DEFINE_OP(op_sub) DEFINE_OP(op_switch_char) DEFINE_OP(op_switch_imm) DEFINE_OP(op_switch_string) - DEFINE_OP(op_tear_off_activation) - DEFINE_OP(op_tear_off_arguments) DEFINE_OP(op_throw) DEFINE_OP(op_throw_static_error) DEFINE_OP(op_to_number) + DEFINE_OP(op_to_string) DEFINE_OP(op_to_primitive) DEFINE_OP(op_resolve_scope) DEFINE_OP(op_get_from_scope) DEFINE_OP(op_put_to_scope) - - case op_get_by_id_chain: - case op_get_by_id_generic: - case op_get_by_id_proto: - case op_get_by_id_self: - case op_get_by_id_getter_chain: - case op_get_by_id_getter_proto: - case op_get_by_id_getter_self: - case op_get_by_id_custom_chain: - case op_get_by_id_custom_proto: - case op_get_by_id_custom_self: - case op_get_string_length: - case op_put_by_id_generic: - case op_put_by_id_replace: - case op_put_by_id_transition: + DEFINE_OP(op_get_from_arguments) + DEFINE_OP(op_put_to_arguments) + + DEFINE_OP(op_get_enumerable_length) + DEFINE_OP(op_has_generic_property) + DEFINE_OP(op_has_structure_property) + DEFINE_OP(op_has_indexed_property) + DEFINE_OP(op_get_direct_pname) + DEFINE_OP(op_get_property_enumerator) + DEFINE_OP(op_enumerator_structure_pname) + DEFINE_OP(op_enumerator_generic_pname) + DEFINE_OP(op_to_index_string) + + DEFINE_OP(op_log_shadow_chicken_prologue) + DEFINE_OP(op_log_shadow_chicken_tail) + default: RELEASE_ASSERT_NOT_REACHED(); } + + if (false) + dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n"); } - RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = (unsigned)-1; + m_bytecodeOffset = std::numeric_limits::max(); #endif } @@ -338,6 +452,8 @@ void JIT::privateCompileSlowCases() for (Vector::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) { m_bytecodeOffset = iter->to; + m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset)); + unsigned firstTo = m_bytecodeOffset; Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset; @@ -359,25 +475,26 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_bitor) DEFINE_SLOWCASE_OP(op_bitxor) DEFINE_SLOWCASE_OP(op_call) + DEFINE_SLOWCASE_OP(op_tail_call) DEFINE_SLOWCASE_OP(op_call_eval) DEFINE_SLOWCASE_OP(op_call_varargs) + DEFINE_SLOWCASE_OP(op_tail_call_varargs) + DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments) + DEFINE_SLOWCASE_OP(op_construct_varargs) DEFINE_SLOWCASE_OP(op_construct) DEFINE_SLOWCASE_OP(op_to_this) + DEFINE_SLOWCASE_OP(op_check_tdz) DEFINE_SLOWCASE_OP(op_create_this) - DEFINE_SLOWCASE_OP(op_captured_mov) DEFINE_SLOWCASE_OP(op_div) DEFINE_SLOWCASE_OP(op_eq) - DEFINE_SLOWCASE_OP(op_get_callee) - case op_get_by_id_out_of_line: + DEFINE_SLOWCASE_OP(op_try_get_by_id) case op_get_array_length: + case op_get_by_id_proto_load: + case op_get_by_id_unset: DEFINE_SLOWCASE_OP(op_get_by_id) - DEFINE_SLOWCASE_OP(op_get_arguments_length) DEFINE_SLOWCASE_OP(op_get_by_val) - DEFINE_SLOWCASE_OP(op_get_argument_by_val) - DEFINE_SLOWCASE_OP(op_get_by_pname) - DEFINE_SLOWCASE_OP(op_check_has_instance) DEFINE_SLOWCASE_OP(op_instanceof) - DEFINE_SLOWCASE_OP(op_jfalse) + DEFINE_SLOWCASE_OP(op_instanceof_custom) DEFINE_SLOWCASE_OP(op_jless) DEFINE_SLOWCASE_OP(op_jlesseq) DEFINE_SLOWCASE_OP(op_jgreater) @@ -386,8 +503,8 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_jnlesseq) DEFINE_SLOWCASE_OP(op_jngreater) DEFINE_SLOWCASE_OP(op_jngreatereq) - DEFINE_SLOWCASE_OP(op_jtrue) DEFINE_SLOWCASE_OP(op_loop_hint) + DEFINE_SLOWCASE_OP(op_watchdog) DEFINE_SLOWCASE_OP(op_lshift) DEFINE_SLOWCASE_OP(op_mod) DEFINE_SLOWCASE_OP(op_mul) @@ -398,11 +515,6 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_nstricteq) DEFINE_SLOWCASE_OP(op_dec) DEFINE_SLOWCASE_OP(op_inc) - case op_put_by_id_out_of_line: - case op_put_by_id_transition_direct: - case op_put_by_id_transition_normal: - case op_put_by_id_transition_direct_out_of_line: - case op_put_by_id_transition_normal_out_of_line: DEFINE_SLOWCASE_OP(op_put_by_id) case op_put_by_val_direct: DEFINE_SLOWCASE_OP(op_put_by_val) @@ -412,7 +524,11 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_stricteq) DEFINE_SLOWCASE_OP(op_sub) DEFINE_SLOWCASE_OP(op_to_number) + DEFINE_SLOWCASE_OP(op_to_string) DEFINE_SLOWCASE_OP(op_to_primitive) + DEFINE_SLOWCASE_OP(op_has_indexed_property) + DEFINE_SLOWCASE_OP(op_has_structure_property) + DEFINE_SLOWCASE_OP(op_get_direct_pname) DEFINE_SLOWCASE_OP(op_resolve_scope) DEFINE_SLOWCASE_OP(op_get_from_scope) @@ -422,6 +538,9 @@ void JIT::privateCompileSlowCases() RELEASE_ASSERT_NOT_REACHED(); } + if (false) + dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n"); + RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen."); RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen."); @@ -433,17 +552,26 @@ void JIT::privateCompileSlowCases() RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size()); RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size()); - RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size()); + RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size()); RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles()); #ifndef NDEBUG // Reset this, in order to guard its use with ASSERTs. - m_bytecodeOffset = (unsigned)-1; + m_bytecodeOffset = std::numeric_limits::max(); #endif } -CompilationResult JIT::privateCompile(JITCompilationEffort effort) +void JIT::compileWithoutLinking(JITCompilationEffort effort) { + double before = 0; + if (UNLIKELY(computeCompileTimes())) + before = monotonicallyIncreasingTimeMS(); + + { + ConcurrentJSLocker locker(m_codeBlock->m_lock); + m_instructions = m_codeBlock->instructions().clone(); + } + DFG::CapabilityLevel level = m_codeBlock->capabilityLevel(); switch (level) { case DFG::CannotCompile: @@ -451,11 +579,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_canBeOptimizedOrInlined = false; m_shouldEmitProfiling = false; break; - case DFG::CanInline: - m_canBeOptimized = false; - m_canBeOptimizedOrInlined = true; - m_shouldEmitProfiling = true; - break; case DFG::CanCompile: case DFG::CanCompileAndInline: m_canBeOptimized = true; @@ -469,6 +592,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) switch (m_codeBlock->codeType()) { case GlobalCode: + case ModuleCode: case EvalCode: m_codeBlock->m_shouldAlwaysBeInlined = false; break; @@ -478,9 +602,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock); break; } - - if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler) - m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock)); + + if (Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler())) + m_disassembler = std::make_unique(m_codeBlock); if (m_vm->m_perBytecodeProfiler) { m_compilation = adoptRef( new Profiler::Compilation( @@ -489,6 +613,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock); } + m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr)); + if (m_disassembler) m_disassembler->setStartOfCode(label()); @@ -496,9 +622,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_randomGenerator.getUint32() & 1) nop(); - preserveReturnAddressAfterCall(regT2); - emitPutReturnPCToCallFrameHeader(regT2); - emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); + emitFunctionPrologue(); + emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); Label beginLabel(this); @@ -507,9 +632,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) sampleInstruction(m_codeBlock->instructions().begin()); #endif - Jump stackCheck; if (m_codeBlock->codeType() == FunctionCode) { - ASSERT(m_bytecodeOffset == (unsigned)-1); + ASSERT(m_bytecodeOffset == std::numeric_limits::max()); if (shouldEmitProfiling()) { for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) { // If this is a constructor, then we want to put in a dummy profiling site (to @@ -526,49 +650,55 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument)); } } - - addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1); - stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1); } - Label functionBody = label(); + addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1); + Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1); + + move(regT1, stackPointerRegister); + checkStackPointerAlignment(); + + emitSaveCalleeSaves(); + emitMaterializeTagCheckRegisters(); + RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType())); + privateCompileMainPass(); privateCompileLinkPass(); privateCompileSlowCases(); if (m_disassembler) m_disassembler->setEndOfSlowPath(label()); + m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); - Label arityCheck; - if (m_codeBlock->codeType() == FunctionCode) { - stackCheck.link(this); - m_bytecodeOffset = 0; - callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock); -#ifndef NDEBUG - m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. -#endif - jump(functionBody); + stackOverflow.link(this); + m_bytecodeOffset = 0; + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); + callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock); - arityCheck = label(); + if (m_codeBlock->codeType() == FunctionCode) { + m_arityCheck = label(); store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined); - preserveReturnAddressAfterCall(regT2); - emitPutReturnPCToCallFrameHeader(regT2); - emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); + emitFunctionPrologue(); + emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock); - load32(payloadFor(JSStack::ArgumentCount), regT1); + load32(payloadFor(CallFrameSlot::argumentCount), regT1); branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); m_bytecodeOffset = 0; + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister); callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck); - if (returnValueGPR != regT0) - move(returnValueGPR, regT0); - branchTest32(Zero, regT0).linkTo(beginLabel, this); - emitNakedCall(m_vm->getCTIStub(arityFixup).code()); + if (maxFrameExtentForSlowPathCall) + addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister); + branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this); + move(returnValueGPR, GPRInfo::argumentGPR0); + emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code()); #if !ASSERT_DISABLED - m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. + m_bytecodeOffset = std::numeric_limits::max(); // Reset this, in order to guard its use with ASSERTs. #endif jump(beginLabel); @@ -580,14 +710,33 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) if (m_disassembler) m_disassembler->setEndOfCode(label()); + m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin()); + + m_linkBuffer = std::unique_ptr(new LinkBuffer(*m_vm, *this, m_codeBlock, effort)); - LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort); + double after = 0; + if (UNLIKELY(computeCompileTimes())) { + after = monotonicallyIncreasingTimeMS(); + + if (Options::reportTotalCompileTimes()) + totalBaselineCompileTime += after - before; + } + if (UNLIKELY(reportCompileTimes())) { + CString codeBlockName = toCString(*m_codeBlock); + + dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", m_linkBuffer->size(), " bytes in ", after - before, " ms.\n"); + } +} + +CompilationResult JIT::link() +{ + LinkBuffer& patchBuffer = *m_linkBuffer; + if (patchBuffer.didFailToAllocate()) return CompilationFailed; // Translate vPC offsets into addresses in JIT generated code, for switch tables. - for (unsigned i = 0; i < m_switches.size(); ++i) { - SwitchRecord record = m_switches[i]; + for (auto& record : m_switches) { unsigned bytecodeOffset = record.bytecodeOffset; if (record.type != SwitchRecord::String) { @@ -605,10 +754,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]); - StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); - for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { - unsigned offset = it->value.branchOffset; - it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; + for (auto& location : record.jumpTable.stringJumpTable->offsetTable.values()) { + unsigned offset = location.branchOffset; + location.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; } } } @@ -618,9 +766,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]); } - for (Vector::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) { - if (iter->to) - patchBuffer.link(iter->from, FunctionPtr(iter->to)); + for (auto& record : m_calls) { + if (record.to) + patchBuffer.link(record.from, FunctionPtr(record.to)); } for (unsigned i = m_getByIds.size(); i--;) @@ -628,68 +776,77 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) for (unsigned i = m_putByIds.size(); i--;) m_putByIds[i].finalize(patchBuffer); - m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); - for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { - CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); - CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); - CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); - CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); - - m_codeBlock->byValInfo(i) = ByValInfo( - m_byValCompilationInfo[i].bytecodeIndex, - badTypeJump, - m_byValCompilationInfo[i].arrayMode, - differenceBetweenCodePtr(badTypeJump, doneTarget), - differenceBetweenCodePtr(returnAddress, slowPathTarget)); + if (m_byValCompilationInfo.size()) { + CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler); + + for (const auto& byValCompilationInfo : m_byValCompilationInfo) { + PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump; + CodeLocationJump notIndexJump = CodeLocationJump(); + if (Jump(patchableNotIndexJump).isSet()) + notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump)); + CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump)); + CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget); + CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget); + CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget); + CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress); + + *byValCompilationInfo.byValInfo = ByValInfo( + byValCompilationInfo.bytecodeIndex, + notIndexJump, + badTypeJump, + exceptionHandler, + byValCompilationInfo.arrayMode, + byValCompilationInfo.arrayProfile, + differenceBetweenCodePtr(badTypeJump, doneTarget), + differenceBetweenCodePtr(badTypeJump, nextHotPathTarget), + differenceBetweenCodePtr(returnAddress, slowPathTarget)); + } } - m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); - for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { - CallLinkInfo& info = m_codeBlock->callLinkInfo(i); - info.callType = m_callStructureStubCompilationInfo[i].callType; - info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex); - info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation); - info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin); - info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther); - info.calleeGPR = regT0; + + for (auto& compilationInfo : m_callCompilationInfo) { + CallLinkInfo& info = *compilationInfo.callLinkInfo; + info.setCallLocations( + CodeLocationLabel(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation)), + CodeLocationLabel(patchBuffer.locationOf(compilationInfo.hotPathBegin)), + patchBuffer.locationOfNearCall(compilationInfo.hotPathOther)); } -#if ENABLE(DFG_JIT) || ENABLE(LLINT) - if (canBeOptimizedOrInlined() -#if ENABLE(LLINT) - || true -#endif - ) { - CompactJITCodeMap::Encoder jitCodeMapEncoder; - for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { - if (m_labels[bytecodeOffset].isSet()) - jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); - } - m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); + CompactJITCodeMap::Encoder jitCodeMapEncoder; + for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) { + if (m_labels[bytecodeOffset].isSet()) + jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset])); } -#endif + m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish()); MacroAssemblerCodePtr withArityCheck; if (m_codeBlock->codeType() == FunctionCode) - withArityCheck = patchBuffer.locationOf(arityCheck); + withArityCheck = patchBuffer.locationOf(m_arityCheck); - if (Options::showDisassembly()) + if (Options::dumpDisassembly()) { m_disassembler->dump(patchBuffer); + patchBuffer.didAlreadyDisassemble(); + } if (m_compilation) { - m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); - m_vm->m_perBytecodeProfiler->addCompilation(m_compilation); + if (Options::disassembleBaselineForProfiler()) + m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer); + m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation); } + + if (m_pcToCodeOriginMapBuilder.didBuildMapping()) + m_codeBlock->setPCToCodeOriginMap(std::make_unique(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer)); - CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly(); + CodeRef result = FINALIZE_CODE( + patchBuffer, + ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data())); - m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add( + m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add( static_cast(result.size()) / - static_cast(m_codeBlock->instructions().size())); - + static_cast(m_instructions.size())); + m_codeBlock->shrinkToFit(CodeBlock::LateShrink); m_codeBlock->setJITCode( - adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)), - withArityCheck); - + adoptRef(*new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT))); + #if ENABLE(JIT_VERBOSE) dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end()); #endif @@ -697,72 +854,100 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort) return CompilationSuccessful; } -void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind) +CompilationResult JIT::privateCompile(JITCompilationEffort effort) { - RepatchBuffer repatchBuffer(callerCodeBlock); - - ASSERT(!callLinkInfo->isLinked()); - callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee); - callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee); - repatchBuffer.relink(callLinkInfo->hotPathOther, code); - - if (calleeCodeBlock) - calleeCodeBlock->linkIncomingCall(exec, callLinkInfo); - - // Patch the slow patch so we do not continue to try to link. - if (kind == CodeForCall) { - ASSERT(callLinkInfo->callType == CallLinkInfo::Call - || callLinkInfo->callType == CallLinkInfo::CallVarargs); - if (callLinkInfo->callType == CallLinkInfo::Call) { - repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code()); - return; - } - - repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code()); - return; - } - - ASSERT(kind == CodeForConstruct); - repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code()); + doMainThreadPreparationBeforeCompile(); + compileWithoutLinking(effort); + return link(); } -void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo) +void JIT::privateCompileExceptionHandlers() { - RepatchBuffer repatchBuffer(callerCodeBlock); + if (!m_exceptionChecksWithCallFrameRollback.empty()) { + m_exceptionChecksWithCallFrameRollback.link(this); - repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code()); -} + copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(); -void JIT::privateCompileExceptionHandlers() -{ - if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty()) - return; + // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*). - Jump doLookup; + move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); - if (!m_exceptionChecksWithCallFrameRollback.empty()) { - m_exceptionChecksWithCallFrameRollback.link(this); - emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0); - doLookup = jump(); +#if CPU(X86) + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); + poke(GPRInfo::argumentGPR1, 1); +#endif + m_calls.append(CallRecord(call(), std::numeric_limits::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value())); + jumpToExceptionHandler(); } - if (!m_exceptionChecks.empty()) + if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) { + m_exceptionHandler = label(); m_exceptionChecks.link(this); - - // lookupExceptionHandler is passed one argument, the exec (the CallFrame*). - move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); - if (doLookup.isSet()) - doLookup.link(this); + copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(); + + // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*). + move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0); + move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1); #if CPU(X86) - // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! - poke(GPRInfo::argumentGPR0); + // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer! + poke(GPRInfo::argumentGPR0); + poke(GPRInfo::argumentGPR1, 1); #endif - m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value())); - jumpToExceptionHandler(); + m_calls.append(CallRecord(call(), std::numeric_limits::max(), FunctionPtr(lookupExceptionHandler).value())); + jumpToExceptionHandler(); + } +} + +void JIT::doMainThreadPreparationBeforeCompile() +{ + // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type. + if (m_vm->typeProfiler()) + m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation.")); +} + +unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock) +{ + ASSERT(static_cast(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast(codeBlock->m_numCalleeLocals))); + + return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters); +} + +int JIT::stackPointerOffsetFor(CodeBlock* codeBlock) +{ + return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset(); +} + +bool JIT::reportCompileTimes() +{ + return Options::reportCompileTimes() || Options::reportBaselineCompileTimes(); } +bool JIT::computeCompileTimes() +{ + return reportCompileTimes() || Options::reportTotalCompileTimes(); +} + +HashMap JIT::compileTimeStats() +{ + HashMap result; + if (Options::reportTotalCompileTimes()) { + result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime); + result.add("Baseline Compile Time", totalBaselineCompileTime); +#if ENABLE(DFG_JIT) + result.add("DFG Compile Time", totalDFGCompileTime); +#if ENABLE(FTL_JIT) + result.add("FTL Compile Time", totalFTLCompileTime); + result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime); + result.add("FTL (B3) Compile Time", totalFTLB3CompileTime); +#endif // ENABLE(FTL_JIT) +#endif // ENABLE(DFG_JIT) + } + return result; +} } // namespace JSC diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index 298075706..d8e74d45a 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -23,14 +23,13 @@ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ -#ifndef JIT_h -#define JIT_h +#pragma once #if ENABLE(JIT) // We've run into some problems where changing the size of the class JIT leads to // performance fluctuations. Try forcing alignment in an attempt to stabalize this. -#if COMPILER(GCC) +#if COMPILER(GCC_OR_CLANG) #define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32))) #else #define JIT_CLASS_ALIGNMENT @@ -40,38 +39,33 @@ #include "CodeBlock.h" #include "CompactJITCodeMap.h" -#include "Interpreter.h" #include "JITDisassembler.h" #include "JITInlineCacheGenerator.h" +#include "JITMathIC.h" #include "JSInterfaceJIT.h" -#include "LegacyProfiler.h" -#include "Opcode.h" -#include "ResultType.h" -#include "SamplingTool.h" +#include "PCToCodeOriginMap.h" #include "UnusedPointer.h" namespace JSC { + enum OpcodeID : unsigned; + class ArrayAllocationProfile; + class CallLinkInfo; class CodeBlock; class FunctionExecutable; class JIT; - class JSPropertyNameIterator; class Identifier; class Interpreter; - class JSScope; - class JSStack; class MarkedAllocator; class Register; class StructureChain; + class StructureStubInfo; - struct CallLinkInfo; struct Instruction; struct OperandTypes; - struct PolymorphicAccessStructureList; struct SimpleJumpTable; struct StringJumpTable; - struct StructureStubInfo; struct CallRecord { MacroAssembler::Call from; @@ -104,12 +98,10 @@ namespace JSC { struct SlowCaseEntry { MacroAssembler::Jump from; unsigned to; - unsigned hint; - SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0) + SlowCaseEntry(MacroAssembler::Jump f, unsigned t) : from(f) , to(t) - , hint(h) { } }; @@ -151,34 +143,38 @@ namespace JSC { struct ByValCompilationInfo { ByValCompilationInfo() { } - ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget) - : bytecodeIndex(bytecodeIndex) + ByValCompilationInfo(ByValInfo* byValInfo, unsigned bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget) + : byValInfo(byValInfo) + , bytecodeIndex(bytecodeIndex) + , notIndexJump(notIndexJump) , badTypeJump(badTypeJump) , arrayMode(arrayMode) + , arrayProfile(arrayProfile) , doneTarget(doneTarget) + , nextHotPathTarget(nextHotPathTarget) { } - + + ByValInfo* byValInfo; unsigned bytecodeIndex; + MacroAssembler::PatchableJump notIndexJump; MacroAssembler::PatchableJump badTypeJump; JITArrayMode arrayMode; + ArrayProfile* arrayProfile; MacroAssembler::Label doneTarget; + MacroAssembler::Label nextHotPathTarget; MacroAssembler::Label slowPathTarget; MacroAssembler::Call returnAddress; }; - struct StructureStubCompilationInfo { + struct CallCompilationInfo { MacroAssembler::DataLabelPtr hotPathBegin; MacroAssembler::Call hotPathOther; MacroAssembler::Call callReturnLocation; - CallLinkInfo::CallType callType; - unsigned bytecodeIndex; + CallLinkInfo* callLinkInfo; }; - // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions. - void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); - void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction); - void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction); + void ctiPatchCallByReturnAddress(ReturnAddressPtr, FunctionPtr newCalleeFunction); class JIT : private JSInterfaceJIT { friend class JITSlowPathCall; @@ -195,23 +191,31 @@ namespace JSC { static const int patchPutByIdDefaultOffset = 256; public: - static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort) + JIT(VM*, CodeBlock* = 0, unsigned loopOSREntryBytecodeOffset = 0); + ~JIT(); + + void compileWithoutLinking(JITCompilationEffort); + CompilationResult link(); + + void doMainThreadPreparationBeforeCompile(); + + static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned bytecodeOffset = 0) { - return JIT(vm, codeBlock).privateCompile(effort); + return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort); } - static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr) + static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { - JIT jit(vm, callerCodeBlock); - jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex; - jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr); + JIT jit(vm, codeBlock); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode); } - static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) + static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName) { JIT jit(vm, codeBlock); jit.m_bytecodeOffset = byValInfo->bytecodeIndex; - jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode); + jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName); } static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) @@ -228,39 +232,39 @@ namespace JSC { jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode); } - static CodeRef compileCTINativeCall(VM* vm, NativeFunction func) + static void compilePutByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName) { - if (!vm->canUseJIT()) { -#if ENABLE(LLINT) - return CodeRef::createLLIntCodeRef(llint_native_call_trampoline); -#else - return CodeRef(); -#endif - } - JIT jit(vm, 0); - return jit.privateCompileCTINativeCall(vm, func); + JIT jit(vm, codeBlock); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompilePutByValWithCachedId(byValInfo, returnAddress, putKind, propertyName); } - static void linkFor(ExecState*, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind); - static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*); - - static unsigned frameRegisterCountFor(CodeBlock* codeBlock) + static void compileHasIndexedProperty(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) { - return codeBlock->m_numCalleeRegisters; + JIT jit(vm, codeBlock); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode); } - private: - JIT(VM*, CodeBlock* = 0); + static CodeRef compileCTINativeCall(VM*, NativeFunction); + + static unsigned frameRegisterCountFor(CodeBlock*); + static int stackPointerOffsetFor(CodeBlock*); + JS_EXPORT_PRIVATE static HashMap compileTimeStats(); + + private: void privateCompileMainPass(); void privateCompileLinkPass(); void privateCompileSlowCases(); CompilationResult privateCompile(JITCompilationEffort); - void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr); - void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); + void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&); void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); + void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&); + + void privateCompileHasIndexedProperty(ByValInfo*, ReturnAddressPtr, JITArrayMode); Label privateCompileCTINativeCall(VM*, bool isConstruct = false); CodeRef privateCompileCTINativeCall(VM*, NativeFunction); @@ -274,6 +278,15 @@ namespace JSC { return functionCall; } +#if OS(WINDOWS) && CPU(X86_64) + Call appendCallWithSlowPathReturnType(const FunctionPtr& function) + { + Call functionCall = callWithSlowPathReturnType(); + m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value())); + return functionCall; + } +#endif + void exceptionCheck(Jump jumpToHandler) { m_exceptionChecks.append(jumpToHandler); @@ -292,52 +305,41 @@ namespace JSC { void privateCompileExceptionHandlers(); void addSlowCase(Jump); - void addSlowCase(JumpList); + void addSlowCase(const JumpList&); void addSlowCase(); void addJump(Jump, int); void emitJumpSlowToHot(Jump, int); void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex); void compileOpCallSlowCase(OpcodeID, Instruction*, Vector::iterator&, unsigned callLinkInfoIndex); - void compileLoadVarargs(Instruction*); + void compileSetupVarargsFrame(OpcodeID, Instruction*, CallLinkInfo*); void compileCallEval(Instruction*); void compileCallEvalSlowCase(Instruction*, Vector::iterator&); void emitPutCallResult(Instruction*); enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq }; void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type); - bool isOperandConstantImmediateDouble(int src); + bool isOperandConstantDouble(int src); void emitLoadDouble(int index, FPRegisterID value); void emitLoadInt32ToDouble(int index, FPRegisterID value); - Jump emitJumpIfNotObject(RegisterID structureReg); + Jump emitJumpIfCellObject(RegisterID cellReg); + Jump emitJumpIfCellNotObject(RegisterID cellReg); - Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch); - void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch); - void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*); - - enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterValue, ShouldFilterBaseAndValue }; + enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue }; // value register in write barrier is used before any scratch registers // so may safely be the same as either of the scratch registers. - Jump checkMarkWord(RegisterID owner, RegisterID scratch1, RegisterID scratch2); - Jump checkMarkWord(JSCell* owner); void emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode); void emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode); -/* - void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch1, RegisterID scratch2, WriteBarrierMode); - void emitWriteBarrier(JSCell* owner, RegisterID value, WriteBarrierMode); -*/ + void emitWriteBarrier(JSCell* owner); - template // StructureType can be RegisterID or ImmPtr. - void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch); - // This assumes that the value to profile is in regT0 and that regT3 is available for // scratch. void emitValueProfilingSite(ValueProfile*); void emitValueProfilingSite(unsigned bytecodeOffset); void emitValueProfilingSite(); - void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*); - void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex); + void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*); + void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex); void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*); void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*); @@ -347,14 +349,22 @@ namespace JSC { // Property is int-checked and zero extended. Base is cell checked. // Structure is already profiled. Returns the slow cases. Fall-through // case contains result in regT0, and it is not yet profiled. + JumpList emitInt32Load(Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); } + JumpList emitDoubleLoad(Instruction*, PatchableJump& badType); + JumpList emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); + JumpList emitArrayStorageLoad(Instruction*, PatchableJump& badType); + JumpList emitLoadForArrayMode(Instruction*, JITArrayMode, PatchableJump& badType); + JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); } JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType); JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape); JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType); + JumpList emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType); + JumpList emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType); JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType); - // Property is in regT0, base is in regT0. regT2 contains indecing type. + // Property is in regT1, base is in regT0. regT2 contains indecing type. // The value to store is not yet loaded. Property is int-checked and // zero-extended. Base is cell checked. Structure is already profiled. // returns the slow cases. @@ -374,11 +384,23 @@ namespace JSC { JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType); JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType); - + + // Identifier check helper for GetByVal and PutByVal. + void emitByValIdentifierCheck(ByValInfo*, RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases); + + JITGetByIdGenerator emitGetByValWithCachedId(ByValInfo*, Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases); + JITPutByIdGenerator emitPutByValWithCachedId(ByValInfo*, Instruction*, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases); + enum FinalObjectMode { MayBeFinal, KnownNotFinal }; + void emitGetVirtualRegister(int src, JSValueRegs dst); + void emitPutVirtualRegister(int dst, JSValueRegs src); + + int32_t getOperandConstantInt(int src); + double getOperandConstantDouble(int src); + #if USE(JSVALUE32_64) - bool getOperandConstantImmediateInt(int op1, int op2, int& op, int32_t& constant); + bool getOperandConstantInt(int op1, int op2, int& op, int32_t& constant); void emitLoadTag(int index, RegisterID tag); void emitLoadPayload(int index, RegisterID payload); @@ -399,14 +421,8 @@ namespace JSC { void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag); void compileGetByIdHotPath(const Identifier*); - void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); - void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); - void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal); - void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset); // Arithmetic opcode helpers - void emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType); - void emitSub32Constant(int dst, int op, int32_t constant, ResultType opType); void emitBinaryDoubleOp(OpcodeID, int dst, int op1, int op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true); #else // USE(JSVALUE32_64) @@ -425,72 +441,80 @@ namespace JSC { emitPutVirtualRegister(dst, payload); } - int32_t getConstantOperandImmediateInt(int src); - Jump emitJumpIfJSCell(RegisterID); Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID); void emitJumpSlowCaseIfJSCell(RegisterID); void emitJumpSlowCaseIfNotJSCell(RegisterID); void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg); - Jump emitJumpIfImmediateInteger(RegisterID); - Jump emitJumpIfNotImmediateInteger(RegisterID); - Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); - void emitJumpSlowCaseIfNotImmediateInteger(RegisterID); - void emitJumpSlowCaseIfNotImmediateNumber(RegisterID); - void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID); - - void emitFastArithReTagImmediate(RegisterID src, RegisterID dest); + Jump emitJumpIfInt(RegisterID); + Jump emitJumpIfNotInt(RegisterID); + Jump emitJumpIfNotInt(RegisterID, RegisterID, RegisterID scratch); + PatchableJump emitPatchableJumpIfNotInt(RegisterID); + void emitJumpSlowCaseIfNotInt(RegisterID); + void emitJumpSlowCaseIfNotNumber(RegisterID); + void emitJumpSlowCaseIfNotInt(RegisterID, RegisterID, RegisterID scratch); - void emitTagAsBoolImmediate(RegisterID reg); - void compileBinaryArithOp(OpcodeID, int dst, int src1, int src2, OperandTypes opi); - void compileBinaryArithOpSlowCase(Instruction*, OpcodeID, Vector::iterator&, int dst, int src1, int src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase); + void emitTagBool(RegisterID); void compileGetByIdHotPath(int baseVReg, const Identifier*); - void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset); - void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset); - void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal); - void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset); #endif // USE(JSVALUE32_64) void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition); void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector::iterator&); + + void assertStackPointerOffset(); - void emit_op_touch_entry(Instruction*); void emit_op_add(Instruction*); void emit_op_bitand(Instruction*); void emit_op_bitor(Instruction*); void emit_op_bitxor(Instruction*); void emit_op_call(Instruction*); + void emit_op_tail_call(Instruction*); void emit_op_call_eval(Instruction*); void emit_op_call_varargs(Instruction*); - void emit_op_captured_mov(Instruction*); + void emit_op_tail_call_varargs(Instruction*); + void emit_op_tail_call_forward_arguments(Instruction*); + void emit_op_construct_varargs(Instruction*); void emit_op_catch(Instruction*); void emit_op_construct(Instruction*); - void emit_op_get_callee(Instruction*); void emit_op_create_this(Instruction*); void emit_op_to_this(Instruction*); - void emit_op_create_arguments(Instruction*); + void emit_op_create_direct_arguments(Instruction*); + void emit_op_create_scoped_arguments(Instruction*); + void emit_op_create_cloned_arguments(Instruction*); + void emit_op_get_argument(Instruction*); + void emit_op_argument_count(Instruction*); + void emit_op_create_rest(Instruction*); + void emit_op_get_rest_length(Instruction*); + void emit_op_check_tdz(Instruction*); + void emit_op_assert(Instruction*); void emit_op_debug(Instruction*); void emit_op_del_by_id(Instruction*); + void emit_op_del_by_val(Instruction*); void emit_op_div(Instruction*); void emit_op_end(Instruction*); void emit_op_enter(Instruction*); - void emit_op_create_activation(Instruction*); + void emit_op_get_scope(Instruction*); void emit_op_eq(Instruction*); void emit_op_eq_null(Instruction*); + void emit_op_try_get_by_id(Instruction*); void emit_op_get_by_id(Instruction*); + void emit_op_get_by_id_with_this(Instruction*); + void emit_op_get_by_val_with_this(Instruction*); void emit_op_get_arguments_length(Instruction*); void emit_op_get_by_val(Instruction*); void emit_op_get_argument_by_val(Instruction*); - void emit_op_get_by_pname(Instruction*); void emit_op_init_lazy_reg(Instruction*); - void emit_op_check_has_instance(Instruction*); + void emit_op_overrides_has_instance(Instruction*); void emit_op_instanceof(Instruction*); + void emit_op_instanceof_custom(Instruction*); + void emit_op_is_empty(Instruction*); void emit_op_is_undefined(Instruction*); void emit_op_is_boolean(Instruction*); void emit_op_is_number(Instruction*); - void emit_op_is_string(Instruction*); + void emit_op_is_object(Instruction*); + void emit_op_is_cell_with_type(Instruction*); void emit_op_jeq_null(Instruction*); void emit_op_jfalse(Instruction*); void emit_op_jmp(Instruction*); @@ -506,6 +530,7 @@ namespace JSC { void emit_op_jngreatereq(Instruction*); void emit_op_jtrue(Instruction*); void emit_op_loop_hint(Instruction*); + void emit_op_watchdog(Instruction*); void emit_op_lshift(Instruction*); void emit_op_mod(Instruction*); void emit_op_mov(Instruction*); @@ -516,68 +541,93 @@ namespace JSC { void emit_op_new_array(Instruction*); void emit_op_new_array_with_size(Instruction*); void emit_op_new_array_buffer(Instruction*); + void emit_op_new_array_with_spread(Instruction*); + void emit_op_spread(Instruction*); void emit_op_new_func(Instruction*); - void emit_op_new_captured_func(Instruction*); void emit_op_new_func_exp(Instruction*); + void emit_op_new_generator_func(Instruction*); + void emit_op_new_generator_func_exp(Instruction*); + void emit_op_new_async_func(Instruction*); + void emit_op_new_async_func_exp(Instruction*); void emit_op_new_object(Instruction*); void emit_op_new_regexp(Instruction*); - void emit_op_get_pnames(Instruction*); - void emit_op_next_pname(Instruction*); void emit_op_not(Instruction*); void emit_op_nstricteq(Instruction*); - void emit_op_pop_scope(Instruction*); void emit_op_dec(Instruction*); void emit_op_inc(Instruction*); - void emit_op_profile_did_call(Instruction*); - void emit_op_profile_will_call(Instruction*); - void emit_op_push_name_scope(Instruction*); + void emit_op_pow(Instruction*); + void emit_op_profile_type(Instruction*); + void emit_op_profile_control_flow(Instruction*); void emit_op_push_with_scope(Instruction*); + void emit_op_create_lexical_environment(Instruction*); + void emit_op_get_parent_scope(Instruction*); void emit_op_put_by_id(Instruction*); + void emit_op_put_by_id_with_this(Instruction*); void emit_op_put_by_index(Instruction*); void emit_op_put_by_val(Instruction*); - void emit_op_put_getter_setter(Instruction*); - void emit_op_init_global_const(Instruction*); + void emit_op_put_by_val_with_this(Instruction*); + void emit_op_put_getter_by_id(Instruction*); + void emit_op_put_setter_by_id(Instruction*); + void emit_op_put_getter_setter_by_id(Instruction*); + void emit_op_put_getter_by_val(Instruction*); + void emit_op_put_setter_by_val(Instruction*); + void emit_op_define_data_property(Instruction*); + void emit_op_define_accessor_property(Instruction*); void emit_op_ret(Instruction*); - void emit_op_ret_object_or_this(Instruction*); void emit_op_rshift(Instruction*); + void emit_op_set_function_name(Instruction*); void emit_op_strcat(Instruction*); void emit_op_stricteq(Instruction*); void emit_op_sub(Instruction*); void emit_op_switch_char(Instruction*); void emit_op_switch_imm(Instruction*); void emit_op_switch_string(Instruction*); - void emit_op_tear_off_activation(Instruction*); void emit_op_tear_off_arguments(Instruction*); void emit_op_throw(Instruction*); void emit_op_throw_static_error(Instruction*); void emit_op_to_number(Instruction*); + void emit_op_to_string(Instruction*); void emit_op_to_primitive(Instruction*); void emit_op_unexpected_load(Instruction*); void emit_op_unsigned(Instruction*); void emit_op_urshift(Instruction*); + void emit_op_get_enumerable_length(Instruction*); + void emit_op_has_generic_property(Instruction*); + void emit_op_has_structure_property(Instruction*); + void emit_op_has_indexed_property(Instruction*); + void emit_op_get_direct_pname(Instruction*); + void emit_op_get_property_enumerator(Instruction*); + void emit_op_enumerator_structure_pname(Instruction*); + void emit_op_enumerator_generic_pname(Instruction*); + void emit_op_to_index_string(Instruction*); + void emit_op_log_shadow_chicken_prologue(Instruction*); + void emit_op_log_shadow_chicken_tail(Instruction*); void emitSlow_op_add(Instruction*, Vector::iterator&); void emitSlow_op_bitand(Instruction*, Vector::iterator&); void emitSlow_op_bitor(Instruction*, Vector::iterator&); void emitSlow_op_bitxor(Instruction*, Vector::iterator&); void emitSlow_op_call(Instruction*, Vector::iterator&); + void emitSlow_op_tail_call(Instruction*, Vector::iterator&); void emitSlow_op_call_eval(Instruction*, Vector::iterator&); void emitSlow_op_call_varargs(Instruction*, Vector::iterator&); - void emitSlow_op_captured_mov(Instruction*, Vector::iterator&); + void emitSlow_op_tail_call_varargs(Instruction*, Vector::iterator&); + void emitSlow_op_tail_call_forward_arguments(Instruction*, Vector::iterator&); + void emitSlow_op_construct_varargs(Instruction*, Vector::iterator&); void emitSlow_op_construct(Instruction*, Vector::iterator&); void emitSlow_op_to_this(Instruction*, Vector::iterator&); void emitSlow_op_create_this(Instruction*, Vector::iterator&); + void emitSlow_op_check_tdz(Instruction*, Vector::iterator&); void emitSlow_op_div(Instruction*, Vector::iterator&); void emitSlow_op_eq(Instruction*, Vector::iterator&); void emitSlow_op_get_callee(Instruction*, Vector::iterator&); + void emitSlow_op_try_get_by_id(Instruction*, Vector::iterator&); void emitSlow_op_get_by_id(Instruction*, Vector::iterator&); void emitSlow_op_get_arguments_length(Instruction*, Vector::iterator&); void emitSlow_op_get_by_val(Instruction*, Vector::iterator&); void emitSlow_op_get_argument_by_val(Instruction*, Vector::iterator&); - void emitSlow_op_get_by_pname(Instruction*, Vector::iterator&); - void emitSlow_op_check_has_instance(Instruction*, Vector::iterator&); void emitSlow_op_instanceof(Instruction*, Vector::iterator&); - void emitSlow_op_jfalse(Instruction*, Vector::iterator&); + void emitSlow_op_instanceof_custom(Instruction*, Vector::iterator&); void emitSlow_op_jless(Instruction*, Vector::iterator&); void emitSlow_op_jlesseq(Instruction*, Vector::iterator&); void emitSlow_op_jgreater(Instruction*, Vector::iterator&); @@ -588,6 +638,7 @@ namespace JSC { void emitSlow_op_jngreatereq(Instruction*, Vector::iterator&); void emitSlow_op_jtrue(Instruction*, Vector::iterator&); void emitSlow_op_loop_hint(Instruction*, Vector::iterator&); + void emitSlow_op_watchdog(Instruction*, Vector::iterator&); void emitSlow_op_lshift(Instruction*, Vector::iterator&); void emitSlow_op_mod(Instruction*, Vector::iterator&); void emitSlow_op_mul(Instruction*, Vector::iterator&); @@ -604,13 +655,19 @@ namespace JSC { void emitSlow_op_stricteq(Instruction*, Vector::iterator&); void emitSlow_op_sub(Instruction*, Vector::iterator&); void emitSlow_op_to_number(Instruction*, Vector::iterator&); + void emitSlow_op_to_string(Instruction*, Vector::iterator&); void emitSlow_op_to_primitive(Instruction*, Vector::iterator&); void emitSlow_op_unsigned(Instruction*, Vector::iterator&); void emitSlow_op_urshift(Instruction*, Vector::iterator&); + void emitSlow_op_has_indexed_property(Instruction*, Vector::iterator&); + void emitSlow_op_has_structure_property(Instruction*, Vector::iterator&); + void emitSlow_op_get_direct_pname(Instruction*, Vector::iterator&); void emit_op_resolve_scope(Instruction*); void emit_op_get_from_scope(Instruction*); void emit_op_put_to_scope(Instruction*); + void emit_op_get_from_arguments(Instruction*); + void emit_op_put_to_arguments(Instruction*); void emitSlow_op_resolve_scope(Instruction*, Vector::iterator&); void emitSlow_op_get_from_scope(Instruction*, Vector::iterator&); void emitSlow_op_put_to_scope(Instruction*, Vector::iterator&); @@ -618,33 +675,42 @@ namespace JSC { void emitRightShift(Instruction*, bool isUnsigned); void emitRightShiftSlowCase(Instruction*, Vector::iterator&, bool isUnsigned); + void emitNewFuncCommon(Instruction*); + void emitNewFuncExprCommon(Instruction*); void emitVarInjectionCheck(bool needsVarInjectionChecks); - void emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth); + void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth); void emitLoadWithStructureCheck(int scope, Structure** structureSlot); - void emitGetGlobalProperty(uintptr_t* operandSlot); - void emitGetGlobalVar(uintptr_t operand); - void emitGetClosureVar(int scope, uintptr_t operand); - void emitPutGlobalProperty(uintptr_t* operandSlot, int value); #if USE(JSVALUE64) - void emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet*); + void emitGetVarFromPointer(JSValue* operand, GPRReg); + void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg); #else - void emitNotifyWrite(RegisterID tag, RegisterID payload, RegisterID scratch, VariableWatchpointSet*); + void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload); + void emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload); #endif - void emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet*); - void emitPutClosureVar(int scope, uintptr_t operand, int value); + void emitGetClosureVar(int scope, uintptr_t operand); + void emitNotifyWrite(WatchpointSet*); + void emitNotifyWrite(GPRReg pointerToSet); + void emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet*); + void emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet**); + void emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet*); void emitInitRegister(int dst); - void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); - void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); - void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); -#if USE(JSVALUE64) - void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); -#endif + void emitPutIntToCallFrameHeader(RegisterID from, int entry); JSValue getConstantOperand(int src); - bool isOperandConstantImmediateInt(int src); - bool isOperandConstantImmediateChar(int src); + bool isOperandConstantInt(int src); + bool isOperandConstantChar(int src); + + template + void emitMathICFast(JITUnaryMathIC*, Instruction*, ProfiledFunction, NonProfiledFunction); + template + void emitMathICFast(JITBinaryMathIC*, Instruction*, ProfiledFunction, NonProfiledFunction); + + template + void emitMathICSlow(JITBinaryMathIC*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); + template + void emitMathICSlow(JITUnaryMathIC*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction); Jump getSlowCase(Vector::iterator& iter) { @@ -652,7 +718,8 @@ namespace JSC { } void linkSlowCase(Vector::iterator& iter) { - iter->from.link(this); + if (iter->from.isSet()) + iter->from.link(this); ++iter; } void linkDummySlowCase(Vector::iterator& iter) @@ -661,8 +728,13 @@ namespace JSC { ++iter; } void linkSlowCaseIfNotJSCell(Vector::iterator&, int virtualRegisterIndex); + void linkAllSlowCasesForBytecodeOffset(Vector& slowCases, + Vector::iterator&, unsigned bytecodeOffset); MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr&); +#if OS(WINDOWS) && CPU(X86_64) + MacroAssembler::Call appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr&); +#endif MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr&); MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr&, int); MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr&, int); @@ -671,9 +743,11 @@ namespace JSC { MacroAssembler::Call callOperation(C_JITOperation_E); MacroAssembler::Call callOperation(C_JITOperation_EO, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EL, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EL, TrustedImmPtr); MacroAssembler::Call callOperation(C_JITOperation_ESt, Structure*); MacroAssembler::Call callOperation(C_JITOperation_EZ, int32_t); - MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, int32_t); + MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, int32_t, int32_t); MacroAssembler::Call callOperation(J_JITOperation_E, int); MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg); MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, GPRReg, int32_t); @@ -681,13 +755,29 @@ namespace JSC { MacroAssembler::Call callOperation(J_JITOperation_EC, int, JSCell*); MacroAssembler::Call callOperation(V_JITOperation_EC, JSCell*); MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJ, JSValueRegs, JSValueRegs); #if USE(JSVALUE64) - MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, StringImpl*); + MacroAssembler::Call callOperation(J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*); + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*); #else - MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, StringImpl*); + MacroAssembler::Call callOperation(J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*); + MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*); #endif - MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, const Identifier*); + MacroAssembler::Call callOperation(J_JITOperation_EJI, int, GPRReg, UniquedStringImpl*); MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJArp, JSValueRegs, JSValueRegs, ArithProfile*); + MacroAssembler::Call callOperation(J_JITOperation_EJJArp, JSValueRegs, JSValueRegs, JSValueRegs, ArithProfile*); + MacroAssembler::Call callOperation(J_JITOperation_EJJ, JSValueRegs, JSValueRegs, JSValueRegs); + MacroAssembler::Call callOperation(J_JITOperation_EJMic, JSValueRegs, JSValueRegs, TrustedImmPtr); + MacroAssembler::Call callOperation(J_JITOperation_EJJMic, JSValueRegs, JSValueRegs, JSValueRegs, TrustedImmPtr); + MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, ArrayProfile*); + MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, ByValInfo*); + MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EJsc, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJscC, int, GPRReg, JSCell*); + MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg); + MacroAssembler::Call callOperation(C_JITOperation_EJscZ, GPRReg, int32_t); + MacroAssembler::Call callOperation(C_JITOperation_EJscZ, int, GPRReg, int32_t); #if USE(JSVALUE64) MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg); #else @@ -695,61 +785,93 @@ namespace JSC { #endif MacroAssembler::Call callOperation(J_JITOperation_EP, int, void*); MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EPc, int, Instruction*); + MacroAssembler::Call callOperation(J_JITOperation_EPc, int, Instruction*); MacroAssembler::Call callOperation(J_JITOperation_EZ, int, int32_t); + MacroAssembler::Call callOperation(J_JITOperation_EZZ, int, int32_t, int32_t); + MacroAssembler::Call callOperation(P_JITOperation_E); MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, size_t); - MacroAssembler::Call callOperation(P_JITOperation_EZ, int32_t); MacroAssembler::Call callOperation(S_JITOperation_ECC, RegisterID, RegisterID); MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID); + MacroAssembler::Call callOperation(S_JITOperation_EJI, GPRReg, UniquedStringImpl*); MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID); MacroAssembler::Call callOperation(S_JITOperation_EOJss, RegisterID, RegisterID); + MacroAssembler::Call callOperation(Sprt_JITOperation_EZ, int32_t); MacroAssembler::Call callOperation(V_JITOperation_E); MacroAssembler::Call callOperation(V_JITOperation_EC, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_ECC, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_ECICC, RegisterID, const Identifier*, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, int32_t); + MacroAssembler::Call callOperation(V_JITOperation_ECIZC, RegisterID, UniquedStringImpl*, int32_t, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ECIZCC, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID); +#if USE(JSVALUE64) + MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, int32_t, RegisterID); +#else + MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, RegisterID, int32_t, RegisterID); +#endif + MacroAssembler::Call callOperation(J_JITOperation_EE, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID); + MacroAssembler::Call callOperation(J_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID); + MacroAssembler::Call callOperationNoExceptionCheck(Z_JITOperation_E); #if USE(JSVALUE64) MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID); #else MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID, RegisterID); #endif - MacroAssembler::Call callOperation(V_JITOperation_EJIdJJ, RegisterID, const Identifier*, RegisterID, RegisterID); #if USE(JSVALUE64) - MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, StringImpl*); + MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, int32_t, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, UniquedStringImpl*); + MacroAssembler::Call callOperation(V_JITOperation_ECIZJJ, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_ECJ, RegisterID, RegisterID); #else - MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, StringImpl*); + MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, UniquedStringImpl*); + MacroAssembler::Call callOperation(V_JITOperation_ECJ, RegisterID, RegisterID, RegisterID); #endif MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, ArrayProfile*); + MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, ByValInfo*); MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, int32_t, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, int32_t); MacroAssembler::Call callOperation(V_JITOperation_EPc, Instruction*); MacroAssembler::Call callOperation(V_JITOperation_EZ, int32_t); + MacroAssembler::Call callOperation(V_JITOperation_EZJ, int, GPRReg); MacroAssembler::Call callOperationWithCallFrameRollbackOnException(J_JITOperation_E); MacroAssembler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb, CodeBlock*); MacroAssembler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E); #if USE(JSVALUE32_64) - MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID); - MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, GPRReg, int32_t); + MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, RegisterID, int32_t, RegisterID); + MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, GPRReg, int32_t, int32_t); MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg, GPRReg); MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg, GPRReg); - MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, GPRReg, const Identifier*); + MacroAssembler::Call callOperation(J_JITOperation_EJI, int, GPRReg, GPRReg, UniquedStringImpl*); MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg); + MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg, GPRReg, GPRReg); + MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, GPRReg, GPRReg, ArrayProfile*); + MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, GPRReg, GPRReg, ByValInfo*); MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, GPRReg, size_t); MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID, RegisterID); + MacroAssembler::Call callOperation(S_JITOperation_EJI, GPRReg, GPRReg, UniquedStringImpl*); MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID, RegisterID, RegisterID); - MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, RegisterID, int32_t); + MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID, RegisterID); MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ArrayProfile*); + MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ByValInfo*); MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, RegisterID, int32_t); MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, RegisterID, int32_t, RegisterID, RegisterID); + MacroAssembler::Call callOperation(V_JITOperation_EZJ, int32_t, RegisterID, RegisterID); + MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg, GPRReg); #endif + template + void emitBitBinaryOpFastPath(Instruction* currentInstruction); + + void emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID); + Jump checkStructure(RegisterID reg, Structure* structure); void updateTopCallFrame(); Call emitNakedCall(CodePtr function = CodePtr()); + Call emitNakedTailCall(CodePtr function = CodePtr()); // Loads the character value of a single character string into dst. void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures); @@ -795,14 +917,24 @@ namespace JSC { bool shouldEmitProfiling() { return false; } #endif + static bool reportCompileTimes(); + static bool computeCompileTimes(); + + // If you need to check the value of an instruction multiple times and the instruction is + // part of a LLInt inline cache, then you want to use this. It will give you the value of + // the instruction at the start of JITing. + Instruction* copiedInstruction(Instruction*); + Interpreter* m_interpreter; + + RefCountedArray m_instructions; Vector m_calls; Vector