summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorKonstantin Tokarev <annulen@yandex.ru>2016-08-25 19:20:41 +0300
committerKonstantin Tokarev <annulen@yandex.ru>2017-02-02 12:30:55 +0000
commit6882a04fb36642862b11efe514251d32070c3d65 (patch)
treeb7959826000b061fd5ccc7512035c7478742f7b0 /Source/JavaScriptCore/jit
parentab6df191029eeeb0b0f16f127d553265659f739e (diff)
downloadqtwebkit-6882a04fb36642862b11efe514251d32070c3d65.tar.gz
Imported QtWebKit TP3 (git b57bc6801f1876c3220d5a4bfea33d620d477443)
Change-Id: I3b1d8a2808782c9f34d50240000e20cb38d3680f Reviewed-by: Konstantin Tokarev <annulen@yandex.ru>
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/AssemblyHelpers.cpp544
-rw-r--r--Source/JavaScriptCore/jit/AssemblyHelpers.h1394
-rw-r--r--Source/JavaScriptCore/jit/BinarySwitch.cpp391
-rw-r--r--Source/JavaScriptCore/jit/BinarySwitch.h147
-rw-r--r--Source/JavaScriptCore/jit/CCallHelpers.h2215
-rw-r--r--Source/JavaScriptCore/jit/CachedRecovery.cpp (renamed from Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp)52
-rw-r--r--Source/JavaScriptCore/jit/CachedRecovery.h137
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffleData.cpp68
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffleData.h54
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler.cpp774
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler.h804
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp305
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler64.cpp369
-rw-r--r--Source/JavaScriptCore/jit/CompactJITCodeMap.h38
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp73
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h51
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp45
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h72
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp84
-rw-r--r--Source/JavaScriptCore/jit/FPRInfo.h431
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp61
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h41
-rw-r--r--Source/JavaScriptCore/jit/GPRInfo.cpp42
-rw-r--r--Source/JavaScriptCore/jit/GPRInfo.h918
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.cpp1
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.h7
-rw-r--r--Source/JavaScriptCore/jit/IntrinsicEmitter.cpp134
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp705
-rw-r--r--Source/JavaScriptCore/jit/JIT.h761
-rw-r--r--Source/JavaScriptCore/jit/JITAddGenerator.cpp136
-rw-r--r--Source/JavaScriptCore/jit/JITAddGenerator.h80
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp1256
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp918
-rw-r--r--Source/JavaScriptCore/jit/JITBitAndGenerator.cpp85
-rw-r--r--Source/JavaScriptCore/jit/JITBitAndGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h71
-rw-r--r--Source/JavaScriptCore/jit/JITBitOrGenerator.cpp74
-rw-r--r--Source/JavaScriptCore/jit/JITBitOrGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITBitXorGenerator.cpp73
-rw-r--r--Source/JavaScriptCore/jit/JITBitXorGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp361
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp339
-rw-r--r--Source/JavaScriptCore/jit/JITCode.cpp220
-rw-r--r--Source/JavaScriptCore/jit/JITCode.h333
-rw-r--r--Source/JavaScriptCore/jit/JITCompilationEffort.h2
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.cpp6
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.h10
-rw-r--r--Source/JavaScriptCore/jit/JITDivGenerator.cpp116
-rw-r--r--Source/JavaScriptCore/jit/JITDivGenerator.h85
-rw-r--r--Source/JavaScriptCore/jit/JITDriver.h123
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.cpp57
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.h18
-rw-r--r--Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp175
-rw-r--r--Source/JavaScriptCore/jit/JITInlineCacheGenerator.h126
-rw-r--r--Source/JavaScriptCore/jit/JITInlines.h1112
-rw-r--r--Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp84
-rw-r--r--Source/JavaScriptCore/jit/JITLeftShiftGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITMulGenerator.cpp198
-rw-r--r--Source/JavaScriptCore/jit/JITMulGenerator.h83
-rw-r--r--Source/JavaScriptCore/jit/JITNegGenerator.cpp72
-rw-r--r--Source/JavaScriptCore/jit/JITNegGenerator.h (renamed from Source/JavaScriptCore/jit/ClosureCallStubRoutine.h)52
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp1768
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp1238
-rw-r--r--Source/JavaScriptCore/jit/JITOperations.cpp2237
-rw-r--r--Source/JavaScriptCore/jit/JITOperations.h393
-rw-r--r--Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp46
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp1964
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp1553
-rw-r--r--Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp140
-rw-r--r--Source/JavaScriptCore/jit/JITRightShiftGenerator.h63
-rw-r--r--Source/JavaScriptCore/jit/JITStubCall.h303
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.cpp9
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.h21
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp3576
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h460
-rw-r--r--Source/JavaScriptCore/jit/JITStubsMSVC64.asm66
-rw-r--r--Source/JavaScriptCore/jit/JITSubGenerator.cpp91
-rw-r--r--Source/JavaScriptCore/jit/JITSubGenerator.h78
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.cpp67
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.h24
-rw-r--r--Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp76
-rw-r--r--Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h56
-rw-r--r--Source/JavaScriptCore/jit/JITWriteBarrier.h10
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h263
-rw-r--r--Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp301
-rw-r--r--Source/JavaScriptCore/jit/PCToCodeOriginMap.h104
-rw-r--r--Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp137
-rw-r--r--Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h115
-rw-r--r--Source/JavaScriptCore/jit/Reg.cpp (renamed from Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp)36
-rw-r--r--Source/JavaScriptCore/jit/Reg.h250
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffset.cpp45
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffset.h (renamed from Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h)57
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp80
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffsetList.h82
-rw-r--r--Source/JavaScriptCore/jit/RegisterMap.h113
-rw-r--r--Source/JavaScriptCore/jit/RegisterSet.cpp404
-rw-r--r--Source/JavaScriptCore/jit/RegisterSet.h175
-rw-r--r--Source/JavaScriptCore/jit/Repatch.cpp939
-rw-r--r--Source/JavaScriptCore/jit/Repatch.h56
-rw-r--r--Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp302
-rw-r--r--Source/JavaScriptCore/jit/ScratchRegisterAllocator.h112
-rw-r--r--Source/JavaScriptCore/jit/SetupVarargsFrame.cpp143
-rw-r--r--Source/JavaScriptCore/jit/SetupVarargsFrame.h53
-rw-r--r--Source/JavaScriptCore/jit/SlowPathCall.h94
-rw-r--r--Source/JavaScriptCore/jit/SnippetOperand.h101
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h90
-rw-r--r--Source/JavaScriptCore/jit/SpillRegistersMode.h35
-rw-r--r--Source/JavaScriptCore/jit/TempRegisterSet.cpp54
-rw-r--r--Source/JavaScriptCore/jit/TempRegisterSet.h223
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerator.h2
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp856
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.h23
112 files changed, 24449 insertions, 12214 deletions
diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
new file mode 100644
index 000000000..c1be5932c
--- /dev/null
+++ b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "AssemblyHelpers.h"
+
+#if ENABLE(JIT)
+
+#include "JITOperations.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin)
+{
+ if (!codeOrigin.inlineCallFrame)
+ return m_codeBlock->ownerExecutable();
+
+ return codeOrigin.inlineCallFrame->baselineCodeBlock->ownerExecutable();
+}
+
+Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock* codeBlock)
+{
+ ASSERT(codeBlock == codeBlock->baselineVersion());
+ ASSERT(codeBlock->jitType() == JITCode::BaselineJIT);
+ ASSERT(codeBlock->jitCodeMap());
+
+ HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset>>::AddResult result = m_decodedCodeMaps.add(codeBlock, Vector<BytecodeAndMachineOffset>());
+
+ if (result.isNewEntry)
+ codeBlock->jitCodeMap()->decode(result.iterator->value);
+
+ return result.iterator->value;
+}
+
+AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType(
+ JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode)
+{
+ AssemblyHelpers::JumpList result;
+
+ switch (descriptor.kind()) {
+ case InferredType::Bottom:
+ result.append(jump());
+ break;
+
+ case InferredType::Boolean:
+ result.append(branchIfNotBoolean(regs, tempGPR));
+ break;
+
+ case InferredType::Other:
+ result.append(branchIfNotOther(regs, tempGPR));
+ break;
+
+ case InferredType::Int32:
+ result.append(branchIfNotInt32(regs, mode));
+ break;
+
+ case InferredType::Number:
+ result.append(branchIfNotNumber(regs, tempGPR, mode));
+ break;
+
+ case InferredType::String:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotString(regs.payloadGPR()));
+ break;
+
+ case InferredType::Symbol:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotSymbol(regs.payloadGPR()));
+ break;
+
+ case InferredType::ObjectWithStructure:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(
+ branchStructure(
+ NotEqual,
+ Address(regs.payloadGPR(), JSCell::structureIDOffset()),
+ descriptor.structure()));
+ break;
+
+ case InferredType::ObjectWithStructureOrOther: {
+ Jump ok = branchIfOther(regs, tempGPR);
+ result.append(branchIfNotCell(regs, mode));
+ result.append(
+ branchStructure(
+ NotEqual,
+ Address(regs.payloadGPR(), JSCell::structureIDOffset()),
+ descriptor.structure()));
+ ok.link(this);
+ break;
+ }
+
+ case InferredType::Object:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotObject(regs.payloadGPR()));
+ break;
+
+ case InferredType::ObjectOrOther: {
+ Jump ok = branchIfOther(regs, tempGPR);
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotObject(regs.payloadGPR()));
+ ok.link(this);
+ break;
+ }
+
+ case InferredType::Top:
+ break;
+ }
+
+ return result;
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR)
+{
+ return branch32(
+ Equal,
+ Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(FastTypedArray));
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR)
+{
+ return branch32(
+ NotEqual,
+ Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(FastTypedArray));
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR)
+{
+ RELEASE_ASSERT(baseGPR != resultGPR);
+
+ loadPtr(Address(baseGPR, JSArrayBufferView::offsetOfVector()), resultGPR);
+ Jump ok = branchIfToSpace(resultGPR);
+ Jump result = branchIfFastTypedArray(baseGPR);
+ ok.link(this);
+ return result;
+}
+
+void AssemblyHelpers::purifyNaN(FPRReg fpr)
+{
+ MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr);
+ static const double NaN = PNaN;
+ loadDouble(TrustedImmPtr(&NaN), fpr);
+ notNaN.link(this);
+}
+
+#if ENABLE(SAMPLING_FLAGS)
+void AssemblyHelpers::setSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ or32(TrustedImm32(1u << (flag - 1)), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+
+void AssemblyHelpers::clearSamplingFlag(int32_t flag)
+{
+ ASSERT(flag >= 1);
+ ASSERT(flag <= 32);
+ and32(TrustedImm32(~(1u << (flag - 1))), AbsoluteAddress(SamplingFlags::addressOfFlags()));
+}
+#endif
+
+#if !ASSERT_DISABLED
+#if USE(JSVALUE64)
+void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
+{
+#if CPU(X86_64)
+ Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu)));
+ abortWithReason(AHIsNotInt32);
+ checkInt32.link(this);
+#else
+ UNUSED_PARAM(gpr);
+#endif
+}
+
+void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ abortWithReason(AHIsNotJSInt32);
+ checkJSInt32.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
+{
+ Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ abortWithReason(AHIsNotJSNumber);
+ checkJSNumber.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
+ Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
+ checkJSInt32.link(this);
+ abortWithReason(AHIsNotJSDouble);
+ checkJSNumber.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
+{
+ Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
+ abortWithReason(AHIsNotCell);
+ checkCell.link(this);
+}
+
+void AssemblyHelpers::jitAssertTagsInPlace()
+{
+ Jump ok = branch64(Equal, GPRInfo::tagTypeNumberRegister, TrustedImm64(TagTypeNumber));
+ abortWithReason(AHTagTypeNumberNotInPlace);
+ breakpoint();
+ ok.link(this);
+
+ ok = branch64(Equal, GPRInfo::tagMaskRegister, TrustedImm64(TagMask));
+ abortWithReason(AHTagMaskNotInPlace);
+ ok.link(this);
+}
+#elif USE(JSVALUE32_64)
+void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
+{
+ UNUSED_PARAM(gpr);
+}
+
+void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
+ abortWithReason(AHIsNotJSInt32);
+ checkJSInt32.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
+{
+ Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
+ Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
+ abortWithReason(AHIsNotJSNumber);
+ checkJSInt32.link(this);
+ checkJSDouble.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
+{
+ Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
+ abortWithReason(AHIsNotJSDouble);
+ checkJSDouble.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
+{
+ Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag));
+ abortWithReason(AHIsNotCell);
+ checkCell.link(this);
+}
+
+void AssemblyHelpers::jitAssertTagsInPlace()
+{
+}
+#endif // USE(JSVALUE32_64)
+
+void AssemblyHelpers::jitAssertHasValidCallFrame()
+{
+ Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7));
+ abortWithReason(AHCallFrameMisaligned);
+ checkCFR.link(this);
+}
+
+void AssemblyHelpers::jitAssertIsNull(GPRReg gpr)
+{
+ Jump checkNull = branchTestPtr(Zero, gpr);
+ abortWithReason(AHIsNotNull);
+ checkNull.link(this);
+}
+
+void AssemblyHelpers::jitAssertArgumentCountSane()
+{
+ Jump ok = branch32(Below, payloadFor(JSStack::ArgumentCount), TrustedImm32(10000000));
+ abortWithReason(AHInsaneArgumentCount);
+ ok.link(this);
+}
+
+#endif // !ASSERT_DISABLED
+
+void AssemblyHelpers::jitReleaseAssertNoException()
+{
+ Jump noException;
+#if USE(JSVALUE64)
+ noException = branchTest64(Zero, AbsoluteAddress(vm()->addressOfException()));
+#elif USE(JSVALUE32_64)
+ noException = branch32(Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+#endif
+ abortWithReason(JITUncoughtExceptionAfterCall);
+ noException.link(this);
+}
+
+void AssemblyHelpers::callExceptionFuzz()
+{
+ if (!Options::useExceptionFuzz())
+ return;
+
+ EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
+
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0));
+ }
+
+ // Set up one argument.
+#if CPU(X86)
+ poke(GPRInfo::callFrameRegister, 0);
+#else
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+#endif
+ move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
+ call(GPRInfo::nonPreservedNonReturnGPR);
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i));
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind, ExceptionJumpWidth width)
+{
+ callExceptionFuzz();
+
+ if (width == FarJumpWidth)
+ kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck);
+
+ Jump result;
+#if USE(JSVALUE64)
+ result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
+#elif USE(JSVALUE32_64)
+ result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+#endif
+
+ if (width == NormalJumpWidth)
+ return result;
+
+ PatchableJump realJump = patchableJump();
+ result.link(this);
+
+ return realJump.m_jump;
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck()
+{
+ callExceptionFuzz();
+
+ Jump result;
+#if USE(JSVALUE64)
+ result = branchTest64(NonZero, AbsoluteAddress(vm()->addressOfException()));
+#elif USE(JSVALUE32_64)
+ result = branch32(NotEqual, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+#endif
+
+ return result;
+}
+
+void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest)
+{
+ const Structure* structurePtr = static_cast<const Structure*>(structure.m_value);
+#if USE(JSVALUE64)
+ jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+ if (!ASSERT_DISABLED) {
+ Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id()));
+ jit.abortWithReason(AHStructureIDIsValid);
+ correctStructure.link(&jit);
+
+ Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()), TrustedImm32(structurePtr->indexingType()));
+ jit.abortWithReason(AHIndexingTypeIsValid);
+ correctIndexingType.link(&jit);
+
+ Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type()));
+ jit.abortWithReason(AHTypeInfoIsValid);
+ correctType.link(&jit);
+
+ Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags()));
+ jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid);
+ correctFlags.link(&jit);
+ }
+#else
+ // Do a 32-bit wide store to initialize the cell's fields.
+ jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
+ jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+#endif
+}
+
+#if USE(JSVALUE64)
+template<typename LoadFromHigh, typename StoreToHigh, typename LoadFromLow, typename StoreToLow>
+void emitRandomThunkImpl(AssemblyHelpers& jit, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result, const LoadFromHigh& loadFromHigh, const StoreToHigh& storeToHigh, const LoadFromLow& loadFromLow, const StoreToLow& storeToLow)
+{
+ // Inlined WeakRandom::advance().
+ // uint64_t x = m_low;
+ loadFromLow(scratch0);
+ // uint64_t y = m_high;
+ loadFromHigh(scratch1);
+ // m_low = y;
+ storeToLow(scratch1);
+
+ // x ^= x << 23;
+ jit.move(scratch0, scratch2);
+ jit.lshift64(AssemblyHelpers::TrustedImm32(23), scratch2);
+ jit.xor64(scratch2, scratch0);
+
+ // x ^= x >> 17;
+ jit.move(scratch0, scratch2);
+ jit.rshift64(AssemblyHelpers::TrustedImm32(17), scratch2);
+ jit.xor64(scratch2, scratch0);
+
+ // x ^= y ^ (y >> 26);
+ jit.move(scratch1, scratch2);
+ jit.rshift64(AssemblyHelpers::TrustedImm32(26), scratch2);
+ jit.xor64(scratch1, scratch2);
+ jit.xor64(scratch2, scratch0);
+
+ // m_high = x;
+ storeToHigh(scratch0);
+
+ // return x + y;
+ jit.add64(scratch1, scratch0);
+
+ // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
+ jit.move(AssemblyHelpers::TrustedImm64((1ULL << 53) - 1), scratch1);
+ jit.and64(scratch1, scratch0);
+ // Now, scratch0 is always in range of int64_t. Safe to convert it to double with cvtsi2sdq.
+ jit.convertInt64ToDouble(scratch0, result);
+
+ // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
+ // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
+ static const double scale = 1.0 / (1ULL << 53);
+
+ // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
+ // It just reduces the exp part of the given 53bit double integer.
+ // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
+ // Now we get 53bit precision random double value in [0, 1).
+ jit.move(AssemblyHelpers::TrustedImmPtr(&scale), scratch1);
+ jit.mulDouble(AssemblyHelpers::Address(scratch1), result);
+}
+
+void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result)
+{
+ void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
+ void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
+
+ auto loadFromHigh = [&](GPRReg high) {
+ load64(highAddress, high);
+ };
+ auto storeToHigh = [&](GPRReg high) {
+ store64(high, highAddress);
+ };
+ auto loadFromLow = [&](GPRReg low) {
+ load64(lowAddress, low);
+ };
+ auto storeToLow = [&](GPRReg low) {
+ store64(low, lowAddress);
+ };
+
+ emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
+}
+
+void AssemblyHelpers::emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result)
+{
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, scratch3);
+ emitLoadStructure(scratch3, scratch3, scratch0);
+ loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3);
+ // Now, scratch3 holds JSGlobalObject*.
+
+ auto loadFromHigh = [&](GPRReg high) {
+ load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()), high);
+ };
+ auto storeToHigh = [&](GPRReg high) {
+ store64(high, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()));
+ };
+ auto loadFromLow = [&](GPRReg low) {
+ load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()), low);
+ };
+ auto storeToLow = [&](GPRReg low) {
+ store64(low, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()));
+ };
+
+ emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
+}
+#endif
+
+void AssemblyHelpers::restoreCalleeSavesFromVMCalleeSavesBuffer()
+{
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+ char* sourceBuffer = bitwise_cast<char*>(m_vm->calleeSaveRegistersBuffer);
+
+ RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+ RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = allCalleeSaves->at(i);
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ if (entry.reg().isGPR())
+ loadPtr(static_cast<void*>(sourceBuffer + entry.offset()), entry.reg().gpr());
+ else
+ loadDouble(TrustedImmPtr(sourceBuffer + entry.offset()), entry.reg().fpr());
+ }
+#endif
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.h b/Source/JavaScriptCore/jit/AssemblyHelpers.h
new file mode 100644
index 000000000..918af7dca
--- /dev/null
+++ b/Source/JavaScriptCore/jit/AssemblyHelpers.h
@@ -0,0 +1,1394 @@
+/*
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef AssemblyHelpers_h
+#define AssemblyHelpers_h
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "CopyBarrier.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "InlineCallFrame.h"
+#include "JITCode.h"
+#include "MacroAssembler.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "RegisterAtOffsetList.h"
+#include "RegisterSet.h"
+#include "TypeofType.h"
+#include "VM.h"
+
+namespace JSC {
+
+typedef void (*V_DebugOperation_EPP)(ExecState*, void*, void*);
+
+class AssemblyHelpers : public MacroAssembler {
+public:
+ AssemblyHelpers(VM* vm, CodeBlock* codeBlock)
+ : m_vm(vm)
+ , m_codeBlock(codeBlock)
+ , m_baselineCodeBlock(codeBlock ? codeBlock->baselineAlternative() : 0)
+ {
+ if (m_codeBlock) {
+ ASSERT(m_baselineCodeBlock);
+ ASSERT(!m_baselineCodeBlock->alternative());
+ ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType()));
+ }
+ }
+
+ CodeBlock* codeBlock() { return m_codeBlock; }
+ VM* vm() { return m_vm; }
+ AssemblerType_T& assembler() { return m_assembler; }
+
+ void checkStackPointerAlignment()
+ {
+ // This check is both unneeded and harder to write correctly for ARM64
+#if !defined(NDEBUG) && !CPU(ARM64)
+ Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf));
+ abortWithReason(AHStackPointerMisaligned);
+ stackPointerAligned.link(this);
+#endif
+ }
+
+ template<typename T>
+ void storeCell(T cell, Address address)
+ {
+#if USE(JSVALUE64)
+ store64(cell, address);
+#else
+ store32(cell, address.withOffset(PayloadOffset));
+ store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset));
+#endif
+ }
+
+ void storeValue(JSValueRegs regs, Address address)
+ {
+#if USE(JSVALUE64)
+ store64(regs.gpr(), address);
+#else
+ store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
+ store32(regs.tagGPR(), address.withOffset(TagOffset));
+#endif
+ }
+
+ void storeValue(JSValueRegs regs, BaseIndex address)
+ {
+#if USE(JSVALUE64)
+ store64(regs.gpr(), address);
+#else
+ store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
+ store32(regs.tagGPR(), address.withOffset(TagOffset));
+#endif
+ }
+
+ void storeValue(JSValueRegs regs, void* address)
+ {
+#if USE(JSVALUE64)
+ store64(regs.gpr(), address);
+#else
+ store32(regs.payloadGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset));
+ store32(regs.tagGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset));
+#endif
+ }
+
+ void loadValue(Address address, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ load64(address, regs.gpr());
+#else
+ if (address.base == regs.payloadGPR()) {
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ } else {
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ }
+#endif
+ }
+
+ void loadValue(BaseIndex address, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ load64(address, regs.gpr());
+#else
+ if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) {
+ // We actually could handle the case where the registers are aliased to both
+ // tag and payload, but we don't for now.
+ RELEASE_ASSERT(address.base != regs.tagGPR());
+ RELEASE_ASSERT(address.index != regs.tagGPR());
+
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ } else {
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ }
+#endif
+ }
+
+ void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs)
+ {
+#if USE(JSVALUE32_64)
+ move(srcRegs.tagGPR(), destRegs.tagGPR());
+#endif
+ move(srcRegs.payloadGPR(), destRegs.payloadGPR());
+ }
+
+ void moveValue(JSValue value, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ move(Imm64(JSValue::encode(value)), regs.gpr());
+#else
+ move(Imm32(value.tag()), regs.tagGPR());
+ move(Imm32(value.payload()), regs.payloadGPR());
+#endif
+ }
+
+ void moveTrustedValue(JSValue value, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ move(TrustedImm64(JSValue::encode(value)), regs.gpr());
+#else
+ move(TrustedImm32(value.tag()), regs.tagGPR());
+ move(TrustedImm32(value.payload()), regs.payloadGPR());
+#endif
+ }
+
+ void storeTrustedValue(JSValue value, Address address)
+ {
+#if USE(JSVALUE64)
+ store64(TrustedImm64(JSValue::encode(value)), address);
+#else
+ store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
+ store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
+#endif
+ }
+
+ void storeTrustedValue(JSValue value, BaseIndex address)
+ {
+#if USE(JSVALUE64)
+ store64(TrustedImm64(JSValue::encode(value)), address);
+#else
+ store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
+ store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
+#endif
+ }
+
+ void emitSaveCalleeSavesFor(CodeBlock* codeBlock)
+ {
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontSaveRegisters.get(entry.reg()))
+ continue;
+ storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset()));
+ }
+ }
+
+ enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame };
+
+ void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp)
+ {
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+#if USE(JSVALUE64)
+ RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
+#endif
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontSaveRegisters.get(entry.reg()))
+ continue;
+
+ GPRReg registerToWrite;
+
+#if USE(JSVALUE32_64)
+ UNUSED_PARAM(tagRegisterMode);
+ UNUSED_PARAM(temp);
+#else
+ if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) {
+ registerToWrite = temp;
+ loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite);
+ } else
+#endif
+ registerToWrite = entry.reg().gpr();
+
+ storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset()));
+ }
+ }
+
+ void emitRestoreCalleeSavesFor(CodeBlock* codeBlock)
+ {
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr());
+ }
+ }
+
+ void emitSaveCalleeSaves()
+ {
+ emitSaveCalleeSavesFor(codeBlock());
+ }
+
+ void emitRestoreCalleeSaves()
+ {
+ emitRestoreCalleeSavesFor(codeBlock());
+ }
+
+ void copyCalleeSavesToVMCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+ {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+ GPRReg temp1 = usedRegisters.getFreeGPR(0);
+
+ move(TrustedImmPtr(m_vm->calleeSaveRegistersBuffer), temp1);
+
+ RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+ RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = allCalleeSaves->at(i);
+ if (dontCopyRegisters.get(entry.reg()))
+ continue;
+ if (entry.reg().isGPR())
+ storePtr(entry.reg().gpr(), Address(temp1, entry.offset()));
+ else
+ storeDouble(entry.reg().fpr(), Address(temp1, entry.offset()));
+ }
+#else
+ UNUSED_PARAM(usedRegisters);
+#endif
+ }
+
+ void restoreCalleeSavesFromVMCalleeSavesBuffer();
+
+ void copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+ {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+ GPRReg temp1 = usedRegisters.getFreeGPR(0);
+ GPRReg temp2 = usedRegisters.getFreeGPR(1);
+ FPRReg fpTemp = usedRegisters.getFreeFPR();
+ ASSERT(temp2 != InvalidGPRReg);
+
+ ASSERT(codeBlock());
+
+ // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
+ move(TrustedImmPtr(m_vm->calleeSaveRegistersBuffer), temp1);
+
+ RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+ RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters();
+ RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset vmEntry = allCalleeSaves->at(i);
+ if (dontCopyRegisters.get(vmEntry.reg()))
+ continue;
+ RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(vmEntry.reg());
+
+ if (vmEntry.reg().isGPR()) {
+ GPRReg regToStore;
+ if (currentFrameEntry) {
+ // Load calleeSave from stack into temp register
+ regToStore = temp2;
+ loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore);
+ } else
+ // Just store callee save directly
+ regToStore = vmEntry.reg().gpr();
+
+ storePtr(regToStore, Address(temp1, vmEntry.offset()));
+ } else {
+ FPRReg fpRegToStore;
+ if (currentFrameEntry) {
+ // Load calleeSave from stack into temp register
+ fpRegToStore = fpTemp;
+ loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore);
+ } else
+ // Just store callee save directly
+ fpRegToStore = vmEntry.reg().fpr();
+
+ storeDouble(fpRegToStore, Address(temp1, vmEntry.offset()));
+ }
+ }
+#else
+ UNUSED_PARAM(usedRegisters);
+#endif
+ }
+
+ void emitMaterializeTagCheckRegisters()
+ {
+#if USE(JSVALUE64)
+ move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
+ orPtr(MacroAssembler::TrustedImm32(TagBitTypeOther), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#endif
+ }
+
+#if CPU(X86_64) || CPU(X86)
+ static size_t prologueStackPointerDelta()
+ {
+ // Prologue only saves the framePointerRegister
+ return sizeof(void*);
+ }
+
+ void emitFunctionPrologue()
+ {
+ push(framePointerRegister);
+ move(stackPointerRegister, framePointerRegister);
+ }
+
+ void emitFunctionEpilogueWithEmptyFrame()
+ {
+ pop(framePointerRegister);
+ }
+
+ void emitFunctionEpilogue()
+ {
+ move(framePointerRegister, stackPointerRegister);
+ pop(framePointerRegister);
+ }
+
+ void preserveReturnAddressAfterCall(GPRReg reg)
+ {
+ pop(reg);
+ }
+
+ void restoreReturnAddressBeforeReturn(GPRReg reg)
+ {
+ push(reg);
+ }
+
+ void restoreReturnAddressBeforeReturn(Address address)
+ {
+ push(address);
+ }
+#endif // CPU(X86_64) || CPU(X86)
+
+#if CPU(ARM) || CPU(ARM64)
+ static size_t prologueStackPointerDelta()
+ {
+ // Prologue saves the framePointerRegister and linkRegister
+ return 2 * sizeof(void*);
+ }
+
+ void emitFunctionPrologue()
+ {
+ pushPair(framePointerRegister, linkRegister);
+ move(stackPointerRegister, framePointerRegister);
+ }
+
+ void emitFunctionEpilogueWithEmptyFrame()
+ {
+ popPair(framePointerRegister, linkRegister);
+ }
+
+ void emitFunctionEpilogue()
+ {
+ move(framePointerRegister, stackPointerRegister);
+ emitFunctionEpilogueWithEmptyFrame();
+ }
+
+ ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ move(linkRegister, reg);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ move(reg, linkRegister);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
+ {
+ loadPtr(address, linkRegister);
+ }
+#endif
+
+#if CPU(MIPS)
+ static size_t prologueStackPointerDelta()
+ {
+ // Prologue saves the framePointerRegister and returnAddressRegister
+ return 2 * sizeof(void*);
+ }
+
+ void emitFunctionPrologue()
+ {
+ pushPair(framePointerRegister, returnAddressRegister);
+ move(stackPointerRegister, framePointerRegister);
+ }
+
+ void emitFunctionEpilogueWithEmptyFrame()
+ {
+ popPair(framePointerRegister, returnAddressRegister);
+ }
+
+ void emitFunctionEpilogue()
+ {
+ move(framePointerRegister, stackPointerRegister);
+ emitFunctionEpilogueWithEmptyFrame();
+ }
+
+ ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ move(returnAddressRegister, reg);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ move(reg, returnAddressRegister);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
+ {
+ loadPtr(address, returnAddressRegister);
+ }
+#endif
+
+#if CPU(SH4)
+ static size_t prologueStackPointerDelta()
+ {
+ // Prologue saves the framePointerRegister and link register
+ return 2 * sizeof(void*);
+ }
+
+ void emitFunctionPrologue()
+ {
+ push(linkRegister);
+ push(framePointerRegister);
+ move(stackPointerRegister, framePointerRegister);
+ }
+
+ void emitFunctionEpilogue()
+ {
+ move(framePointerRegister, stackPointerRegister);
+ pop(framePointerRegister);
+ pop(linkRegister);
+ }
+
+ ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
+ {
+ m_assembler.stspr(reg);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
+ {
+ m_assembler.ldspr(reg);
+ }
+
+ ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
+ {
+ loadPtrLinkReg(address);
+ }
+#endif
+
+ void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+ {
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+ }
+ void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+ {
+ load32(Address(from, entry * sizeof(Register)), to);
+ }
+#if USE(JSVALUE64)
+ void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+ {
+ load64(Address(from, entry * sizeof(Register)), to);
+ }
+#endif // USE(JSVALUE64)
+ void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
+ {
+ storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+ }
+
+ void emitPutToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+ {
+ storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
+ }
+
+ void emitGetCallerFrameFromCallFrameHeaderPtr(RegisterID to)
+ {
+ loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()), to);
+ }
+ void emitPutCallerFrameToCallFrameHeader(RegisterID from)
+ {
+ storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()));
+ }
+
+ void emitPutReturnPCToCallFrameHeader(RegisterID from)
+ {
+ storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
+ }
+ void emitPutReturnPCToCallFrameHeader(TrustedImmPtr from)
+ {
+ storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
+ }
+
+ // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
+ // fields before the code from emitFunctionPrologue() has executed.
+ // First, the access is via the stack pointer. Second, the address calculation must also take
+ // into account that the stack pointer may not have been adjusted down for the return PC and/or
+ // caller's frame pointer. On some platforms, the callee is responsible for pushing the
+ // "link register" containing the return address in the function prologue.
+#if USE(JSVALUE64)
+ void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
+ {
+ storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta()));
+ }
+#else
+ void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
+ {
+ storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ }
+
+ void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, JSStack::CallFrameHeaderEntry entry)
+ {
+ storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+ JumpList branchIfNotEqual(JSValueRegs regs, JSValue value)
+ {
+#if USE(JSVALUE64)
+ return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value)));
+#else
+ JumpList result;
+ result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag())));
+ if (value.isEmpty() || value.isUndefinedOrNull())
+ return result; // These don't have anything interesting in the payload.
+ result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())));
+ return result;
+#endif
+ }
+
+ Jump branchIfEqual(JSValueRegs regs, JSValue value)
+ {
+#if USE(JSVALUE64)
+ return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value)));
+#else
+ Jump notEqual;
+ // These don't have anything interesting in the payload.
+ if (!value.isEmpty() && !value.isUndefinedOrNull())
+ notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()));
+ Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag()));
+ if (notEqual.isSet())
+ notEqual.link(this);
+ return result;
+#endif
+ }
+
+ enum TagRegistersMode {
+ DoNotHaveTagRegisters,
+ HaveTagRegisters
+ };
+
+ Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == HaveTagRegisters)
+ return branchTest64(NonZero, reg, GPRInfo::tagMaskRegister);
+ return branchTest64(NonZero, reg, TrustedImm64(TagMask));
+#else
+ UNUSED_PARAM(mode);
+ return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
+#endif
+ }
+ Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ return branchIfNotCell(regs.gpr(), mode);
+#else
+ return branchIfNotCell(regs.tagGPR(), mode);
+#endif
+ }
+
+ Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == HaveTagRegisters)
+ return branchTest64(Zero, reg, GPRInfo::tagMaskRegister);
+ return branchTest64(Zero, reg, TrustedImm64(TagMask));
+#else
+ UNUSED_PARAM(mode);
+ return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag));
+#endif
+ }
+ Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ return branchIfCell(regs.gpr(), mode);
+#else
+ return branchIfCell(regs.tagGPR(), mode);
+#endif
+ }
+
+ Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ and64(TrustedImm32(~TagBitUndefined), tempGPR);
+ return branch64(Equal, tempGPR, TrustedImm64(ValueNull));
+#else
+ or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag));
+#endif
+ }
+
+ Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ and64(TrustedImm32(~TagBitUndefined), tempGPR);
+ return branch64(NotEqual, tempGPR, TrustedImm64(ValueNull));
+#else
+ or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag));
+#endif
+ }
+
+ Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == HaveTagRegisters)
+ return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+ return branch64(AboveOrEqual, regs.gpr(), TrustedImm64(TagTypeNumber));
+#else
+ UNUSED_PARAM(mode);
+ return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
+#endif
+ }
+
+#if USE(JSVALUE64)
+ Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
+ {
+ if (mode == HaveTagRegisters)
+ return branch64(Below, gpr, GPRInfo::tagTypeNumberRegister);
+ return branch64(Below, gpr, TrustedImm64(TagTypeNumber));
+ }
+#endif
+
+ Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ return branchIfNotInt32(regs.gpr(), mode);
+#else
+ UNUSED_PARAM(mode);
+ return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
+#endif
+ }
+
+ // Note that the tempGPR is not used in 64-bit mode.
+ Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ UNUSED_PARAM(tempGPR);
+ if (mode == HaveTagRegisters)
+ return branchTest64(NonZero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+ return branchTest64(NonZero, regs.gpr(), TrustedImm64(TagTypeNumber));
+#else
+ UNUSED_PARAM(mode);
+ add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
+#endif
+ }
+
+ // Note that the tempGPR is not used in 64-bit mode.
+ Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ UNUSED_PARAM(tempGPR);
+ if (mode == HaveTagRegisters)
+ return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+ return branchTest64(Zero, regs.gpr(), TrustedImm64(TagTypeNumber));
+#else
+ UNUSED_PARAM(mode);
+ add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
+#endif
+ }
+
+ // Note that the tempGPR is not used in 32-bit mode.
+ Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
+ return branchTest64(Zero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
+#else
+ UNUSED_PARAM(tempGPR);
+ return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag));
+#endif
+ }
+
+ // Note that the tempGPR is not used in 32-bit mode.
+ Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
+ return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
+#else
+ UNUSED_PARAM(tempGPR);
+ return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag));
+#endif
+ }
+
+ Jump branchIfObject(GPRReg cellGPR)
+ {
+ return branch8(
+ AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ }
+
+ Jump branchIfNotObject(GPRReg cellGPR)
+ {
+ return branch8(
+ Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ }
+
+ Jump branchIfType(GPRReg cellGPR, JSType type)
+ {
+ return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
+ }
+
+ Jump branchIfNotType(GPRReg cellGPR, JSType type)
+ {
+ return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
+ }
+
+ Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); }
+ Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); }
+ Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); }
+ Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); }
+ Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); }
+ Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); }
+
+ Jump branchIfEmpty(JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ return branchTest64(Zero, regs.gpr());
+#else
+ return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::EmptyValueTag));
+#endif
+ }
+
+ JumpList branchIfNotType(
+ JSValueRegs, GPRReg tempGPR, const InferredType::Descriptor&, TagRegistersMode);
+
+ template<typename T>
+ Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure)
+ {
+#if USE(JSVALUE64)
+ return branch32(condition, leftHandSide, TrustedImm32(structure->id()));
+#else
+ return branchPtr(condition, leftHandSide, TrustedImmPtr(structure));
+#endif
+ }
+
+ Jump branchIfToSpace(GPRReg storageGPR)
+ {
+ return branchTest32(Zero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits));
+ }
+
+ Jump branchIfNotToSpace(GPRReg storageGPR)
+ {
+ return branchTest32(NonZero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits));
+ }
+
+ void removeSpaceBits(GPRReg storageGPR)
+ {
+ andPtr(TrustedImmPtr(~static_cast<uintptr_t>(CopyBarrierBase::spaceBits)), storageGPR);
+ }
+
+ Jump branchIfFastTypedArray(GPRReg baseGPR);
+ Jump branchIfNotFastTypedArray(GPRReg baseGPR);
+
+ // Returns a jump to slow path for when we need to execute the barrier. Note that baseGPR and
+ // resultGPR must be different.
+ Jump loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR);
+
+ static Address addressForByteOffset(ptrdiff_t byteOffset)
+ {
+ return Address(GPRInfo::callFrameRegister, byteOffset);
+ }
+ static Address addressFor(VirtualRegister virtualRegister, GPRReg baseReg)
+ {
+ ASSERT(virtualRegister.isValid());
+ return Address(baseReg, virtualRegister.offset() * sizeof(Register));
+ }
+ static Address addressFor(VirtualRegister virtualRegister)
+ {
+ // NB. It's tempting on some architectures to sometimes use an offset from the stack
+ // register because for some offsets that will encode to a smaller instruction. But we
+ // cannot do this. We use this in places where the stack pointer has been moved to some
+ // unpredictable location.
+ ASSERT(virtualRegister.isValid());
+ return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register));
+ }
+ static Address addressFor(int operand)
+ {
+ return addressFor(static_cast<VirtualRegister>(operand));
+ }
+
+ static Address tagFor(VirtualRegister virtualRegister)
+ {
+ ASSERT(virtualRegister.isValid());
+ return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset);
+ }
+ static Address tagFor(int operand)
+ {
+ return tagFor(static_cast<VirtualRegister>(operand));
+ }
+
+ static Address payloadFor(VirtualRegister virtualRegister)
+ {
+ ASSERT(virtualRegister.isValid());
+ return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset);
+ }
+ static Address payloadFor(int operand)
+ {
+ return payloadFor(static_cast<VirtualRegister>(operand));
+ }
+
+ // Access to our fixed callee CallFrame.
+ static Address calleeFrameSlot(int slot)
+ {
+ ASSERT(slot >= JSStack::CallerFrameAndPCSize);
+ return Address(stackPointerRegister, sizeof(Register) * (slot - JSStack::CallerFrameAndPCSize));
+ }
+
+ // Access to our fixed callee CallFrame.
+ static Address calleeArgumentSlot(int argument)
+ {
+ return calleeFrameSlot(virtualRegisterForArgument(argument).offset());
+ }
+
+ static Address calleeFrameTagSlot(int slot)
+ {
+ return calleeFrameSlot(slot).withOffset(TagOffset);
+ }
+
+ static Address calleeFramePayloadSlot(int slot)
+ {
+ return calleeFrameSlot(slot).withOffset(PayloadOffset);
+ }
+
+ static Address calleeArgumentTagSlot(int argument)
+ {
+ return calleeArgumentSlot(argument).withOffset(TagOffset);
+ }
+
+ static Address calleeArgumentPayloadSlot(int argument)
+ {
+ return calleeArgumentSlot(argument).withOffset(PayloadOffset);
+ }
+
+ static Address calleeFrameCallerFrame()
+ {
+ return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset());
+ }
+
+ static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg, GPRReg preserve5 = InvalidGPRReg)
+ {
+ if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0 && preserve5 != GPRInfo::regT0)
+ return GPRInfo::regT0;
+
+ if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1 && preserve5 != GPRInfo::regT1)
+ return GPRInfo::regT1;
+
+ if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2 && preserve5 != GPRInfo::regT2)
+ return GPRInfo::regT2;
+
+ if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3 && preserve5 != GPRInfo::regT3)
+ return GPRInfo::regT3;
+
+ if (preserve1 != GPRInfo::regT4 && preserve2 != GPRInfo::regT4 && preserve3 != GPRInfo::regT4 && preserve4 != GPRInfo::regT4 && preserve5 != GPRInfo::regT4)
+ return GPRInfo::regT4;
+
+ return GPRInfo::regT5;
+ }
+
+ // Add a debug call. This call has no effect on JIT code execution state.
+ void debugCall(V_DebugOperation_EPP function, void* argument)
+ {
+ size_t scratchSize = sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters);
+ ScratchBuffer* scratchBuffer = m_vm->scratchBufferForSize(scratchSize);
+ EncodedJSValue* buffer = static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer());
+
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ storeDouble(FPRInfo::toRegister(i), GPRInfo::regT0);
+ }
+
+ // Tell GC mark phase how much of the scratch buffer is active during call.
+ move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+ storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
+
+#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
+ move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
+ move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2);
+#elif CPU(X86)
+ poke(GPRInfo::callFrameRegister, 0);
+ poke(TrustedImmPtr(argument), 1);
+ poke(TrustedImmPtr(buffer), 2);
+ GPRReg scratch = GPRInfo::regT0;
+#else
+#error "JIT not supported on this platform."
+#endif
+ move(TrustedImmPtr(reinterpret_cast<void*>(function)), scratch);
+ call(scratch);
+
+ move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
+ storePtr(TrustedImmPtr(0), GPRInfo::regT0);
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ loadDouble(GPRInfo::regT0, FPRInfo::toRegister(i));
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
+ }
+
+ // These methods JIT generate dynamic, debug-only checks - akin to ASSERTs.
+#if !ASSERT_DISABLED
+ void jitAssertIsInt32(GPRReg);
+ void jitAssertIsJSInt32(GPRReg);
+ void jitAssertIsJSNumber(GPRReg);
+ void jitAssertIsJSDouble(GPRReg);
+ void jitAssertIsCell(GPRReg);
+ void jitAssertHasValidCallFrame();
+ void jitAssertIsNull(GPRReg);
+ void jitAssertTagsInPlace();
+ void jitAssertArgumentCountSane();
+#else
+ void jitAssertIsInt32(GPRReg) { }
+ void jitAssertIsJSInt32(GPRReg) { }
+ void jitAssertIsJSNumber(GPRReg) { }
+ void jitAssertIsJSDouble(GPRReg) { }
+ void jitAssertIsCell(GPRReg) { }
+ void jitAssertHasValidCallFrame() { }
+ void jitAssertIsNull(GPRReg) { }
+ void jitAssertTagsInPlace() { }
+ void jitAssertArgumentCountSane() { }
+#endif
+
+ void jitReleaseAssertNoException();
+
+ void purifyNaN(FPRReg);
+
+ // These methods convert between doubles, and doubles boxed and JSValues.
+#if USE(JSVALUE64)
+ GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
+ {
+ moveDoubleTo64(fpr, gpr);
+ sub64(GPRInfo::tagTypeNumberRegister, gpr);
+ jitAssertIsJSDouble(gpr);
+ return gpr;
+ }
+ FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
+ {
+ add64(GPRInfo::tagTypeNumberRegister, gpr, resultGPR);
+ move64ToDouble(resultGPR, fpr);
+ return fpr;
+ }
+ FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
+ {
+ jitAssertIsJSDouble(gpr);
+ return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr);
+ }
+
+ void boxDouble(FPRReg fpr, JSValueRegs regs)
+ {
+ boxDouble(fpr, regs.gpr());
+ }
+
+ void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg)
+ {
+ unboxDouble(regs.payloadGPR(), resultGPR, destFPR);
+ }
+
+ // Here are possible arrangements of source, target, scratch:
+ // - source, target, scratch can all be separate registers.
+ // - source and target can be the same but scratch is separate.
+ // - target and scratch can be the same but source is separate.
+ void boxInt52(GPRReg source, GPRReg target, GPRReg scratch, FPRReg fpScratch)
+ {
+ // Is it an int32?
+ signExtend32ToPtr(source, scratch);
+ Jump isInt32 = branch64(Equal, source, scratch);
+
+ // Nope, it's not, but regT0 contains the int64 value.
+ convertInt64ToDouble(source, fpScratch);
+ boxDouble(fpScratch, target);
+ Jump done = jump();
+
+ isInt32.link(this);
+ zeroExtend32ToPtr(source, target);
+ or64(GPRInfo::tagTypeNumberRegister, target);
+
+ done.link(this);
+ }
+#endif
+
+#if USE(JSVALUE32_64)
+ void boxDouble(FPRReg fpr, GPRReg tagGPR, GPRReg payloadGPR)
+ {
+ moveDoubleToInts(fpr, payloadGPR, tagGPR);
+ }
+ void unboxDouble(GPRReg tagGPR, GPRReg payloadGPR, FPRReg fpr, FPRReg scratchFPR)
+ {
+ moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
+ }
+
+ void boxDouble(FPRReg fpr, JSValueRegs regs)
+ {
+ boxDouble(fpr, regs.tagGPR(), regs.payloadGPR());
+ }
+ void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR)
+ {
+ unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR);
+ }
+
+ void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR)
+ {
+ unboxDouble(regs, destFPR, scratchFPR);
+ }
+#endif
+
+ void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR)
+ {
+#if USE(JSVALUE64)
+ add32(TrustedImm32(ValueFalse), boolGPR, payloadGPR);
+#else
+ move(boolGPR, payloadGPR);
+#endif
+ }
+
+ void boxBooleanPayload(bool value, GPRReg payloadGPR)
+ {
+#if USE(JSVALUE64)
+ move(TrustedImm32(ValueFalse + value), payloadGPR);
+#else
+ move(TrustedImm32(value), payloadGPR);
+#endif
+ }
+
+ void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs)
+ {
+ boxBooleanPayload(boolGPR, boxedRegs.payloadGPR());
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR());
+#endif
+ }
+
+ void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == DoNotHaveTagRegisters) {
+ move(intGPR, boxedRegs.gpr());
+ or64(TrustedImm64(TagTypeNumber), boxedRegs.gpr());
+ } else
+ or64(GPRInfo::tagTypeNumberRegister, intGPR, boxedRegs.gpr());
+#else
+ UNUSED_PARAM(mode);
+ move(intGPR, boxedRegs.payloadGPR());
+ move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR());
+#endif
+ }
+
+ void callExceptionFuzz();
+
+ enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
+ enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth };
+ Jump emitExceptionCheck(
+ ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
+ Jump emitNonPatchableExceptionCheck();
+
+#if ENABLE(SAMPLING_COUNTERS)
+ static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
+ {
+ jit.add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
+ }
+ void emitCount(AbstractSamplingCounter& counter, int32_t increment = 1)
+ {
+ add64(TrustedImm32(increment), AbsoluteAddress(counter.addressOfCounter()));
+ }
+#endif
+
+#if ENABLE(SAMPLING_FLAGS)
+ void setSamplingFlag(int32_t);
+ void clearSamplingFlag(int32_t flag);
+#endif
+
+ JSGlobalObject* globalObjectFor(CodeOrigin codeOrigin)
+ {
+ return codeBlock()->globalObjectFor(codeOrigin);
+ }
+
+ bool isStrictModeFor(CodeOrigin codeOrigin)
+ {
+ if (!codeOrigin.inlineCallFrame)
+ return codeBlock()->isStrictMode();
+ return codeOrigin.inlineCallFrame->isStrictMode();
+ }
+
+ ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
+ {
+ return isStrictModeFor(codeOrigin) ? StrictMode : NotStrictMode;
+ }
+
+ ExecutableBase* executableFor(const CodeOrigin& codeOrigin);
+
+ CodeBlock* baselineCodeBlockFor(const CodeOrigin& codeOrigin)
+ {
+ return baselineCodeBlockForOriginAndBaselineCodeBlock(codeOrigin, baselineCodeBlock());
+ }
+
+ CodeBlock* baselineCodeBlockFor(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return baselineCodeBlock();
+ return baselineCodeBlockForInlineCallFrame(inlineCallFrame);
+ }
+
+ CodeBlock* baselineCodeBlock()
+ {
+ return m_baselineCodeBlock;
+ }
+
+ static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame)
+ {
+ if (!inlineCallFrame)
+ return VirtualRegister(CallFrame::argumentOffset(0));
+ if (inlineCallFrame->arguments.size() <= 1)
+ return virtualRegisterForLocal(0);
+ ValueRecovery recovery = inlineCallFrame->arguments[1];
+ RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
+ return recovery.virtualRegister();
+ }
+
+ static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin)
+ {
+ return argumentsStart(codeOrigin.inlineCallFrame);
+ }
+
+ void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch)
+ {
+#if USE(JSVALUE64)
+ load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
+ loadPtr(vm()->heap.structureIDTable().base(), scratch);
+ loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
+#else
+ UNUSED_PARAM(scratch);
+ loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
+#endif
+ }
+
+ static void emitLoadStructure(AssemblyHelpers& jit, RegisterID base, RegisterID dest, RegisterID scratch)
+ {
+#if USE(JSVALUE64)
+ jit.load32(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
+ jit.loadPtr(jit.vm()->heap.structureIDTable().base(), scratch);
+ jit.loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
+#else
+ UNUSED_PARAM(scratch);
+ jit.loadPtr(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
+#endif
+ }
+
+ void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
+ {
+ emitStoreStructureWithTypeInfo(*this, structure, dest);
+ }
+
+ void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch)
+ {
+#if USE(JSVALUE64)
+ load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch);
+ store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+#else
+ // Store all the info flags using a single 32-bit wide load and store.
+ load32(MacroAssembler::Address(structure, Structure::indexingTypeOffset()), scratch);
+ store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
+
+ // Store the StructureID
+ storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+#endif
+ }
+
+ static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest);
+
+ Jump jumpIfIsRememberedOrInEden(GPRReg cell)
+ {
+ return branchTest8(MacroAssembler::NonZero, MacroAssembler::Address(cell, JSCell::cellStateOffset()));
+ }
+
+ Jump jumpIfIsRememberedOrInEden(JSCell* cell)
+ {
+ uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
+ return branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(address));
+ }
+
+ // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
+ // functor is called at those points where we have pinpointed a type. One way to use this is to
+ // have the functor emit the code to put the type string into an appropriate register and then
+ // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow
+ // case. It is passed the unlinked jump to the slow case.
+ template<typename Functor, typename SlowPathFunctor>
+ void emitTypeOf(
+ JSValueRegs regs, GPRReg tempGPR, const Functor& functor,
+ const SlowPathFunctor& slowPathFunctor)
+ {
+ // Implements the following branching structure:
+ //
+ // if (is cell) {
+ // if (is object) {
+ // if (is function) {
+ // return function;
+ // } else if (doesn't have call trap and doesn't masquerade as undefined) {
+ // return object
+ // } else {
+ // return slowPath();
+ // }
+ // } else if (is string) {
+ // return string
+ // } else {
+ // return symbol
+ // }
+ // } else if (is number) {
+ // return number
+ // } else if (is null) {
+ // return object
+ // } else if (is boolean) {
+ // return boolean
+ // } else {
+ // return undefined
+ // }
+
+ Jump notCell = branchIfNotCell(regs);
+
+ GPRReg cellGPR = regs.payloadGPR();
+ Jump notObject = branchIfNotObject(cellGPR);
+
+ Jump notFunction = branchIfNotFunction(cellGPR);
+ functor(TypeofType::Function, false);
+
+ notFunction.link(this);
+ slowPathFunctor(
+ branchTest8(
+ NonZero,
+ Address(cellGPR, JSCell::typeInfoFlagsOffset()),
+ TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData)));
+ functor(TypeofType::Object, false);
+
+ notObject.link(this);
+
+ Jump notString = branchIfNotString(cellGPR);
+ functor(TypeofType::String, false);
+ notString.link(this);
+ functor(TypeofType::Symbol, false);
+
+ notCell.link(this);
+
+ Jump notNumber = branchIfNotNumber(regs, tempGPR);
+ functor(TypeofType::Number, false);
+ notNumber.link(this);
+
+ JumpList notNull = branchIfNotEqual(regs, jsNull());
+ functor(TypeofType::Object, false);
+ notNull.link(this);
+
+ Jump notBoolean = branchIfNotBoolean(regs, tempGPR);
+ functor(TypeofType::Boolean, false);
+ notBoolean.link(this);
+
+ functor(TypeofType::Undefined, true);
+ }
+
+ Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
+
+ void makeSpaceOnStackForCCall()
+ {
+ unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
+ if (stackOffset)
+ subPtr(TrustedImm32(stackOffset), stackPointerRegister);
+ }
+
+ void reclaimSpaceOnStackForCCall()
+ {
+ unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
+ if (stackOffset)
+ addPtr(TrustedImm32(stackOffset), stackPointerRegister);
+ }
+
+#if USE(JSVALUE64)
+ void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result);
+ void emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
+#endif
+
+protected:
+ VM* m_vm;
+ CodeBlock* m_codeBlock;
+ CodeBlock* m_baselineCodeBlock;
+
+ HashMap<CodeBlock*, Vector<BytecodeAndMachineOffset>> m_decodedCodeMaps;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // AssemblyHelpers_h
+
diff --git a/Source/JavaScriptCore/jit/BinarySwitch.cpp b/Source/JavaScriptCore/jit/BinarySwitch.cpp
new file mode 100644
index 000000000..f3ddcfca9
--- /dev/null
+++ b/Source/JavaScriptCore/jit/BinarySwitch.cpp
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BinarySwitch.h"
+
+#if ENABLE(JIT)
+
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+static const bool verbose = false;
+
+static unsigned globalCounter; // We use a different seed every time we are invoked.
+
+BinarySwitch::BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type type)
+ : m_value(value)
+ , m_weakRandom(globalCounter++)
+ , m_index(0)
+ , m_caseIndex(UINT_MAX)
+ , m_type(type)
+{
+ if (cases.isEmpty())
+ return;
+
+ if (verbose)
+ dataLog("Original cases: ", listDump(cases), "\n");
+
+ for (unsigned i = 0; i < cases.size(); ++i)
+ m_cases.append(Case(cases[i], i));
+
+ std::sort(m_cases.begin(), m_cases.end());
+
+ if (verbose)
+ dataLog("Sorted cases: ", listDump(m_cases), "\n");
+
+ for (unsigned i = 1; i < m_cases.size(); ++i)
+ RELEASE_ASSERT(m_cases[i - 1] < m_cases[i]);
+
+ build(0, false, m_cases.size());
+}
+
+BinarySwitch::~BinarySwitch()
+{
+}
+
+bool BinarySwitch::advance(MacroAssembler& jit)
+{
+ if (m_cases.isEmpty()) {
+ m_fallThrough.append(jit.jump());
+ return false;
+ }
+
+ if (m_index == m_branches.size()) {
+ RELEASE_ASSERT(m_jumpStack.isEmpty());
+ return false;
+ }
+
+ for (;;) {
+ const BranchCode& code = m_branches[m_index++];
+ switch (code.kind) {
+ case NotEqualToFallThrough:
+ switch (m_type) {
+ case Int32:
+ m_fallThrough.append(jit.branch32(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_fallThrough.append(jit.branchPtr(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case NotEqualToPush:
+ switch (m_type) {
+ case Int32:
+ m_jumpStack.append(jit.branch32(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_jumpStack.append(jit.branchPtr(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case LessThanToPush:
+ switch (m_type) {
+ case Int32:
+ m_jumpStack.append(jit.branch32(
+ MacroAssembler::LessThan, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_jumpStack.append(jit.branchPtr(
+ MacroAssembler::LessThan, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case Pop:
+ m_jumpStack.takeLast().link(&jit);
+ break;
+ case ExecuteCase:
+ m_caseIndex = code.index;
+ return true;
+ }
+ }
+}
+
+void BinarySwitch::build(unsigned start, bool hardStart, unsigned end)
+{
+ if (verbose)
+ dataLog("Building with start = ", start, ", hardStart = ", hardStart, ", end = ", end, "\n");
+
+ auto append = [&] (const BranchCode& code) {
+ if (verbose)
+ dataLog("==> ", code, "\n");
+ m_branches.append(code);
+ };
+
+ unsigned size = end - start;
+
+ RELEASE_ASSERT(size);
+
+ // This code uses some random numbers to keep things balanced. It's important to keep in mind
+ // that this does not improve average-case throughput under the assumption that all cases fire
+ // with equal probability. It just ensures that there will not be some switch structure that
+ // when combined with some input will always produce pathologically good or pathologically bad
+ // performance.
+
+ const unsigned leafThreshold = 3;
+
+ if (size <= leafThreshold) {
+ if (verbose)
+ dataLog("It's a leaf.\n");
+
+ // It turns out that for exactly three cases or less, it's better to just compare each
+ // case individually. This saves 1/6 of a branch on average, and up to 1/3 of a branch in
+ // extreme cases where the divide-and-conquer bottoms out in a lot of 3-case subswitches.
+ //
+ // This assumes that we care about the cost of hitting some case more than we care about
+ // bottoming out in a default case. I believe that in most places where we use switch
+ // statements, we are more likely to hit one of the cases than we are to fall through to
+ // default. Intuitively, if we wanted to improve the performance of default, we would
+ // reduce the value of leafThreshold to 2 or even to 1. See below for a deeper discussion.
+
+ bool allConsecutive = false;
+
+ if ((hardStart || (start && m_cases[start - 1].value == m_cases[start].value - 1))
+ && start + size < m_cases.size()
+ && m_cases[start + size - 1].value == m_cases[start + size].value - 1) {
+ allConsecutive = true;
+ for (unsigned i = 0; i < size - 1; ++i) {
+ if (m_cases[start + i].value + 1 != m_cases[start + i + 1].value) {
+ allConsecutive = false;
+ break;
+ }
+ }
+ }
+
+ if (verbose)
+ dataLog("allConsecutive = ", allConsecutive, "\n");
+
+ Vector<unsigned, 3> localCaseIndices;
+ for (unsigned i = 0; i < size; ++i)
+ localCaseIndices.append(start + i);
+
+ std::random_shuffle(
+ localCaseIndices.begin(), localCaseIndices.end(),
+ [this] (unsigned n) {
+ // We use modulo to get a random number in the range we want fully knowing that
+ // this introduces a tiny amount of bias, but we're fine with such tiny bias.
+ return m_weakRandom.getUint32() % n;
+ });
+
+ for (unsigned i = 0; i < size - 1; ++i) {
+ append(BranchCode(NotEqualToPush, localCaseIndices[i]));
+ append(BranchCode(ExecuteCase, localCaseIndices[i]));
+ append(BranchCode(Pop));
+ }
+
+ if (!allConsecutive)
+ append(BranchCode(NotEqualToFallThrough, localCaseIndices.last()));
+
+ append(BranchCode(ExecuteCase, localCaseIndices.last()));
+ return;
+ }
+
+ if (verbose)
+ dataLog("It's not a leaf.\n");
+
+ // There are two different strategies we could consider here:
+ //
+ // Isolate median and split: pick a median and check if the comparison value is equal to it;
+ // if so, execute the median case. Otherwise check if the value is less than the median, and
+ // recurse left or right based on this. This has two subvariants: we could either first test
+ // equality for the median and then do the less-than, or we could first do the less-than and
+ // then check equality on the not-less-than path.
+ //
+ // Ignore median and split: do a less-than comparison on a value that splits the cases in two
+ // equal-sized halves. Recurse left or right based on the comparison. Do not test for equality
+ // against the median (or anything else); let the recursion handle those equality comparisons
+ // once we bottom out in a list that case 3 cases or less (see above).
+ //
+ // I'll refer to these strategies as Isolate and Ignore. I initially believed that Isolate
+ // would be faster since it leads to less branching for some lucky cases. It turns out that
+ // Isolate is almost a total fail in the average, assuming all cases are equally likely. How
+ // bad Isolate is depends on whether you believe that doing two consecutive branches based on
+ // the same comparison is cheaper than doing the compare/branches separately. This is
+ // difficult to evaluate. For small immediates that aren't blinded, we just care about
+ // avoiding a second compare instruction. For large immediates or when blinding is in play, we
+ // also care about the instructions used to materialize the immediate a second time. Isolate
+ // can help with both costs since it involves first doing a < compare+branch on some value,
+ // followed by a == compare+branch on the same exact value (or vice-versa). Ignore will do a <
+ // compare+branch on some value, and then the == compare+branch on that same value will happen
+ // much later.
+ //
+ // To evaluate these costs, I wrote the recurrence relation for Isolate and Ignore, assuming
+ // that ComparisonCost is the cost of a compare+branch and ChainedComparisonCost is the cost
+ // of a compare+branch on some value that you've just done another compare+branch for. These
+ // recurrence relations compute the total cost incurred if you executed the switch statement
+ // on each matching value. So the average cost of hitting some case can be computed as
+ // Isolate[n]/n or Ignore[n]/n, respectively for the two relations.
+ //
+ // Isolate[1] = ComparisonCost
+ // Isolate[2] = (2 + 1) * ComparisonCost
+ // Isolate[3] = (3 + 2 + 1) * ComparisonCost
+ // Isolate[n_] := With[
+ // {medianIndex = Floor[n/2] + If[EvenQ[n], RandomInteger[], 1]},
+ // ComparisonCost + ChainedComparisonCost +
+ // (ComparisonCost * (medianIndex - 1) + Isolate[medianIndex - 1]) +
+ // (2 * ComparisonCost * (n - medianIndex) + Isolate[n - medianIndex])]
+ //
+ // Ignore[1] = ComparisonCost
+ // Ignore[2] = (2 + 1) * ComparisonCost
+ // Ignore[3] = (3 + 2 + 1) * ComparisonCost
+ // Ignore[n_] := With[
+ // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
+ // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
+ // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])]
+ //
+ // This does not account for the average cost of hitting the default case. See further below
+ // for a discussion of that.
+ //
+ // It turns out that for ComparisonCost = 1 and ChainedComparisonCost = 1, Ignore is always
+ // better than Isolate. If we assume that ChainedComparisonCost = 0, then Isolate wins for
+ // switch statements that have 20 cases or fewer, though the margin of victory is never large
+ // - it might sometimes save an average of 0.3 ComparisonCost. For larger switch statements,
+ // we see divergence between the two with Ignore winning. This is of course rather
+ // unrealistic since the chained comparison is never free. For ChainedComparisonCost = 0.5, we
+ // see Isolate winning for 10 cases or fewer, by maybe 0.2 ComparisonCost. Again we see
+ // divergence for large switches with Ignore winning, for example if a switch statement has
+ // 100 cases then Ignore saves one branch on average.
+ //
+ // Our current JIT backends don't provide for optimization for chained comparisons, except for
+ // reducing the code for materializing the immediate if the immediates are large or blinding
+ // comes into play. Probably our JIT backends live somewhere north of
+ // ChainedComparisonCost = 0.5.
+ //
+ // This implies that using the Ignore strategy is likely better. If we wanted to incorporate
+ // the Isolate strategy, we'd want to determine the switch size threshold at which the two
+ // cross over and then use Isolate for switches that are smaller than that size.
+ //
+ // The average cost of hitting the default case is similar, but involves a different cost for
+ // the base cases: you have to assume that you will always fail each branch. For the Ignore
+ // strategy we would get this recurrence relation; the same kind of thing happens to the
+ // Isolate strategy:
+ //
+ // Ignore[1] = ComparisonCost
+ // Ignore[2] = (2 + 2) * ComparisonCost
+ // Ignore[3] = (3 + 3 + 3) * ComparisonCost
+ // Ignore[n_] := With[
+ // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
+ // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
+ // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])]
+ //
+ // This means that if we cared about the default case more, we would likely reduce
+ // leafThreshold. Reducing it to 2 would reduce the average cost of the default case by 1/3
+ // in the most extreme cases (num switch cases = 3, 6, 12, 24, ...). But it would also
+ // increase the average cost of taking one of the non-default cases by 1/3. Typically the
+ // difference is 1/6 in either direction. This makes it a very simple trade-off: if we believe
+ // that the default case is more important then we would want leafThreshold to be 2, and the
+ // default case would become 1/6 faster on average. But we believe that most switch statements
+ // are more likely to take one of the cases than the default, so we use leafThreshold = 3
+ // and get a 1/6 speed-up on average for taking an explicit case.
+
+ unsigned medianIndex = (start + end) / 2;
+
+ if (verbose)
+ dataLog("medianIndex = ", medianIndex, "\n");
+
+ // We want medianIndex to point to the thing we will do a less-than compare against. We want
+ // this less-than compare to split the current sublist into equal-sized sublists, or
+ // nearly-equal-sized with some randomness if we're in the odd case. With the above
+ // calculation, in the odd case we will have medianIndex pointing at either the element we
+ // want or the element to the left of the one we want. Consider the case of five elements:
+ //
+ // 0 1 2 3 4
+ //
+ // start will be 0, end will be 5. The average is 2.5, which rounds down to 2. If we do
+ // value < 2, then we will split the list into 2 elements on the left and three on the right.
+ // That's pretty good, but in this odd case we'd like to at random choose 3 instead to ensure
+ // that we don't become unbalanced on the right. This does not improve throughput since one
+ // side will always get shafted, and that side might still be odd, in which case it will also
+ // have two sides and one of them will get shafted - and so on. We just want to avoid
+ // deterministic pathologies.
+ //
+ // In the even case, we will always end up pointing at the element we want:
+ //
+ // 0 1 2 3
+ //
+ // start will be 0, end will be 4. So, the average is 2, which is what we'd like.
+ if (size & 1) {
+ RELEASE_ASSERT(medianIndex - start + 1 == end - medianIndex);
+ medianIndex += m_weakRandom.getUint32() & 1;
+ } else
+ RELEASE_ASSERT(medianIndex - start == end - medianIndex);
+
+ RELEASE_ASSERT(medianIndex > start);
+ RELEASE_ASSERT(medianIndex + 1 < end);
+
+ if (verbose)
+ dataLog("fixed medianIndex = ", medianIndex, "\n");
+
+ append(BranchCode(LessThanToPush, medianIndex));
+ build(medianIndex, true, end);
+ append(BranchCode(Pop));
+ build(start, hardStart, medianIndex);
+}
+
+void BinarySwitch::Case::dump(PrintStream& out) const
+{
+ out.print("<value: " , value, ", index: ", index, ">");
+}
+
+void BinarySwitch::BranchCode::dump(PrintStream& out) const
+{
+ switch (kind) {
+ case NotEqualToFallThrough:
+ out.print("NotEqualToFallThrough");
+ break;
+ case NotEqualToPush:
+ out.print("NotEqualToPush");
+ break;
+ case LessThanToPush:
+ out.print("LessThanToPush");
+ break;
+ case Pop:
+ out.print("Pop");
+ break;
+ case ExecuteCase:
+ out.print("ExecuteCase");
+ break;
+ }
+
+ if (index != UINT_MAX)
+ out.print("(", index, ")");
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/BinarySwitch.h b/Source/JavaScriptCore/jit/BinarySwitch.h
new file mode 100644
index 000000000..3ac08b701
--- /dev/null
+++ b/Source/JavaScriptCore/jit/BinarySwitch.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef BinarySwitch_h
+#define BinarySwitch_h
+
+#if ENABLE(JIT)
+
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+#include <wtf/WeakRandom.h>
+
+namespace JSC {
+
+// The BinarySwitch class makes it easy to emit a switch statement over either
+// 32-bit integers or pointers, where the switch uses a tree of branches
+// rather than a jump table. This makes it particularly useful if the case
+// values are too far apart to make a jump table practical, or if there are
+// sufficiently few cases that the total cost of log(numCases) branches is
+// less than the cost of an indirected jump.
+//
+// In an effort to simplify the logic of emitting code for each case, this
+// uses an iterator style, rather than a functor callback style. This makes
+// sense because even the iterator implementation found herein is relatively
+// simple, whereas the code it's used from is usually quite complex - one
+// example being the trie-of-trees string switch implementation, where the
+// code emitted for each case involves recursing to emit code for a sub-trie.
+//
+// Use this like so:
+//
+// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32);
+// while (switch.advance(jit)) {
+// int value = switch.caseValue();
+// unsigned index = switch.caseIndex(); // index into casesVector, above
+// ... // generate code for this case
+// ... = jit.jump(); // you have to jump out yourself; falling through causes undefined behavior
+// }
+// switch.fallThrough().link(&jit);
+
+class BinarySwitch {
+public:
+ enum Type {
+ Int32,
+ IntPtr
+ };
+
+ BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type);
+ ~BinarySwitch();
+
+ unsigned caseIndex() const { return m_cases[m_caseIndex].index; }
+ int64_t caseValue() const { return m_cases[m_caseIndex].value; }
+
+ bool advance(MacroAssembler&);
+
+ MacroAssembler::JumpList& fallThrough() { return m_fallThrough; }
+
+private:
+ void build(unsigned start, bool hardStart, unsigned end);
+
+ GPRReg m_value;
+
+ struct Case {
+ Case() { }
+
+ Case(int64_t value, unsigned index)
+ : value(value)
+ , index(index)
+ {
+ }
+
+ bool operator<(const Case& other) const
+ {
+ return value < other.value;
+ }
+
+ void dump(PrintStream& out) const;
+
+ int64_t value;
+ unsigned index;
+ };
+
+ Vector<Case> m_cases;
+
+ enum BranchKind {
+ NotEqualToFallThrough,
+ NotEqualToPush,
+ LessThanToPush,
+ Pop,
+ ExecuteCase
+ };
+
+ struct BranchCode {
+ BranchCode() { }
+
+ BranchCode(BranchKind kind, unsigned index = UINT_MAX)
+ : kind(kind)
+ , index(index)
+ {
+ }
+
+ void dump(PrintStream& out) const;
+
+ BranchKind kind;
+ unsigned index;
+ };
+
+ WeakRandom m_weakRandom;
+
+ Vector<BranchCode> m_branches;
+
+ unsigned m_index;
+ unsigned m_caseIndex;
+ Vector<MacroAssembler::Jump> m_jumpStack;
+
+ MacroAssembler::JumpList m_fallThrough;
+
+ Type m_type;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // BinarySwitch_h
+
diff --git a/Source/JavaScriptCore/jit/CCallHelpers.h b/Source/JavaScriptCore/jit/CCallHelpers.h
new file mode 100644
index 000000000..e649d39e6
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CCallHelpers.h
@@ -0,0 +1,2215 @@
+/*
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CCallHelpers_h
+#define CCallHelpers_h
+
+#if ENABLE(JIT)
+
+#include "AssemblyHelpers.h"
+#include "GPRInfo.h"
+#include "StackAlignment.h"
+
+namespace JSC {
+
+#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64))
+#define POKE_ARGUMENT_OFFSET 4
+#else
+#define POKE_ARGUMENT_OFFSET 0
+#endif
+
+class CCallHelpers : public AssemblyHelpers {
+public:
+ CCallHelpers(VM* vm, CodeBlock* codeBlock = 0)
+ : AssemblyHelpers(vm, codeBlock)
+ {
+ }
+
+ // The most general helper for setting arguments that fit in a GPR, if you can compute each
+ // argument without using any argument registers. You usually want one of the setupArguments*()
+ // methods below instead of this. This thing is most useful if you have *a lot* of arguments.
+ template<typename Functor>
+ void setupArgument(unsigned argumentIndex, const Functor& functor)
+ {
+ unsigned numberOfRegs = GPRInfo::numberOfArgumentRegisters; // Disguise the constant from clang's tautological compare warning.
+ if (argumentIndex < numberOfRegs) {
+ functor(GPRInfo::toArgumentRegister(argumentIndex));
+ return;
+ }
+
+ functor(GPRInfo::nonArgGPR0);
+ poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET + argumentIndex - GPRInfo::numberOfArgumentRegisters);
+ }
+
+ void setupArgumentsWithExecState() { setupArgumentsExecState(); }
+
+ // These methods used to sort arguments into the correct registers.
+ // On X86 we use cdecl calling conventions, which pass all arguments on the
+ // stack. On other architectures we may need to sort values into the
+ // correct registers.
+#if !NUMBER_OF_ARGUMENT_REGISTERS
+ unsigned m_callArgumentOffset;
+ void resetCallArguments() { m_callArgumentOffset = 0; }
+
+ // These methods are using internally to implement the callOperation methods.
+ void addCallArgument(GPRReg value)
+ {
+ poke(value, m_callArgumentOffset++);
+ }
+ void addCallArgument(TrustedImm32 imm)
+ {
+ poke(imm, m_callArgumentOffset++);
+ }
+ void addCallArgument(TrustedImmPtr pointer)
+ {
+ poke(pointer, m_callArgumentOffset++);
+ }
+ void addCallArgument(FPRReg value)
+ {
+ storeDouble(value, Address(stackPointerRegister, m_callArgumentOffset * sizeof(void*)));
+ m_callArgumentOffset += sizeof(double) / sizeof(void*);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5, GPRReg arg6)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+ ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsExecState()
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, TrustedImm32 arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ addCallArgument(arg7);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+#endif // !NUMBER_OF_ARGUMENT_REGISTERS
+ // These methods are suitable for any calling convention that provides for
+ // at least 4 argument registers, e.g. X86_64, ARMv7.
+#if NUMBER_OF_ARGUMENT_REGISTERS >= 4
+ template<GPRReg destA, GPRReg destB>
+ void setupTwoStubArgsGPR(GPRReg srcA, GPRReg srcB)
+ {
+ // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
+ // (1) both are already in arg regs, the right way around.
+ // (2) both are already in arg regs, the wrong way around.
+ // (3) neither are currently in arg registers.
+ // (4) srcA in in its correct reg.
+ // (5) srcA in in the incorrect reg.
+ // (6) srcB in in its correct reg.
+ // (7) srcB in in the incorrect reg.
+ //
+ // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
+ // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
+ // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
+ // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ move(srcA, destA);
+ move(srcB, destB);
+ } else if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ move(srcB, destB);
+ move(srcA, destA);
+ } else
+ swap(destA, destB);
+ }
+
+ template<GPRReg destA, GPRReg destB, GPRReg destC>
+ void setupThreeStubArgsGPR(GPRReg srcA, GPRReg srcB, GPRReg srcC)
+ {
+ // If neither of srcB/srcC are in our way, then we can move srcA into place.
+ // Then we can use setupTwoStubArgs to fix srcB/srcC.
+ if (srcB != destA && srcC != destA) {
+ move(srcA, destA);
+ setupTwoStubArgsGPR<destB, destC>(srcB, srcC);
+ return;
+ }
+
+ // If neither of srcA/srcC are in our way, then we can move srcB into place.
+ // Then we can use setupTwoStubArgs to fix srcA/srcC.
+ if (srcA != destB && srcC != destB) {
+ move(srcB, destB);
+ setupTwoStubArgsGPR<destA, destC>(srcA, srcC);
+ return;
+ }
+
+ // If neither of srcA/srcB are in our way, then we can move srcC into place.
+ // Then we can use setupTwoStubArgs to fix srcA/srcB.
+ if (srcA != destC && srcB != destC) {
+ move(srcC, destC);
+ setupTwoStubArgsGPR<destA, destB>(srcA, srcB);
+ return;
+ }
+
+ // If we get here, we haven't been able to move any of srcA/srcB/srcC.
+ // Since all three are blocked, then all three must already be in the argument register.
+ // But are they in the right ones?
+
+ // First, ensure srcA is in place.
+ if (srcA != destA) {
+ swap(srcA, destA);
+
+ // If srcA wasn't in argumentGPR1, one of srcB/srcC must be.
+ ASSERT(srcB == destA || srcC == destA);
+ // If srcB was in argumentGPR1 it no longer is (due to the swap).
+ // Otherwise srcC must have been. Mark him as moved.
+ if (srcB == destA)
+ srcB = srcA;
+ else
+ srcC = srcA;
+ }
+
+ // Either srcB & srcC need swapping, or we're all done.
+ ASSERT((srcB == destB || srcC == destC)
+ || (srcB == destC || srcC == destB));
+
+ if (srcB != destB)
+ swap(destB, destC);
+ }
+
+#if CPU(X86_64) || CPU(ARM64)
+ template<FPRReg destA, FPRReg destB>
+ void setupTwoStubArgsFPR(FPRReg srcA, FPRReg srcB)
+ {
+ // Assuming that srcA != srcB, there are 7 interesting states the registers may be in:
+ // (1) both are already in arg regs, the right way around.
+ // (2) both are already in arg regs, the wrong way around.
+ // (3) neither are currently in arg registers.
+ // (4) srcA in in its correct reg.
+ // (5) srcA in in the incorrect reg.
+ // (6) srcB in in its correct reg.
+ // (7) srcB in in the incorrect reg.
+ //
+ // The trivial approach is to simply emit two moves, to put srcA in place then srcB in
+ // place (the MacroAssembler will omit redundant moves). This apporach will be safe in
+ // cases 1, 3, 4, 5, 6, and in cases where srcA==srcB. The two problem cases are 2
+ // (requires a swap) and 7 (must move srcB first, to avoid trampling.)
+
+ if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ moveDouble(srcA, destA);
+ moveDouble(srcB, destB);
+ return;
+ }
+
+ if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ moveDouble(srcB, destB);
+ moveDouble(srcA, destA);
+ return;
+ }
+
+ ASSERT(srcB == destA && srcA == destB);
+ // Need to swap; pick a temporary register.
+ FPRReg temp;
+ if (destA != FPRInfo::argumentFPR3 && destA != FPRInfo::argumentFPR3)
+ temp = FPRInfo::argumentFPR3;
+ else if (destA != FPRInfo::argumentFPR2 && destA != FPRInfo::argumentFPR2)
+ temp = FPRInfo::argumentFPR2;
+ else {
+ ASSERT(destA != FPRInfo::argumentFPR1 && destA != FPRInfo::argumentFPR1);
+ temp = FPRInfo::argumentFPR1;
+ }
+ moveDouble(destA, temp);
+ moveDouble(destB, destA);
+ moveDouble(temp, destB);
+ }
+#endif
+ void setupStubArguments(GPRReg arg1, GPRReg arg2)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
+ }
+
+ void setupStubArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
+ }
+
+#if CPU(X86_64) || CPU(ARM64)
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ setupTwoStubArgsFPR<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
+ {
+#if OS(WINDOWS) && CPU(X86_64)
+ // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments.
+ // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
+ moveDouble(arg1, FPRInfo::argumentFPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+#else
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ move(arg2, GPRInfo::argumentGPR1);
+#endif
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
+ {
+#if OS(WINDOWS) && CPU(X86_64)
+ // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments.
+ // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
+ moveDouble(arg3, FPRInfo::argumentFPR3);
+#else
+ moveDouble(arg3, FPRInfo::argumentFPR0);
+#endif
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#elif CPU(ARM)
+#if CPU(ARM_HARDFP)
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ if (arg2 != FPRInfo::argumentFPR0) {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ moveDouble(arg2, FPRInfo::argumentFPR1);
+ } else if (arg1 != FPRInfo::argumentFPR1) {
+ moveDouble(arg2, FPRInfo::argumentFPR1);
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ } else {
+ // Swap arg1, arg2.
+ moveDouble(FPRInfo::argumentFPR0, ARMRegisters::d2);
+ moveDouble(FPRInfo::argumentFPR1, FPRInfo::argumentFPR0);
+ moveDouble(ARMRegisters::d2, FPRInfo::argumentFPR1);
+ }
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ move(arg2, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
+ {
+ moveDouble(arg3, FPRInfo::argumentFPR0);
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32, FPRReg arg2, GPRReg arg3)
+ {
+ moveDouble(arg2, FPRInfo::argumentFPR0);
+ move(arg3, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32, FPRReg arg4)
+ {
+ moveDouble(arg4, FPRInfo::argumentFPR0);
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+#else
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1);
+ assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR3);
+ assembler().vmov(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, arg1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ assembler().vmov(GPRInfo::argumentGPR3, GPRInfo::nonArgGPR0, arg3);
+ poke(GPRInfo::nonArgGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, FPRReg arg2, GPRReg arg3)
+ {
+ poke(arg3, POKE_ARGUMENT_OFFSET);
+ move(arg1, GPRInfo::argumentGPR1);
+ assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, FPRReg arg4)
+ {
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ move(arg3, GPRInfo::argumentGPR3);
+ assembler().vmov(GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, arg4);
+ poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET);
+ poke(GPRInfo::nonArgGPR1, POKE_ARGUMENT_OFFSET + 1);
+ }
+#endif // CPU(ARM_HARDFP)
+#elif CPU(MIPS)
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ if (arg2 != FPRInfo::argumentFPR0) {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ moveDouble(arg2, FPRInfo::argumentFPR1);
+ } else if (arg1 != FPRInfo::argumentFPR1) {
+ moveDouble(arg2, FPRInfo::argumentFPR1);
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ } else {
+ // Swap arg1, arg2.
+ swapDouble(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1);
+ }
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
+ {
+ assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ poke(arg2, 4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ poke(arg3, 4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32, FPRReg arg2, GPRReg arg3)
+ {
+ setupArgumentsWithExecState(arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32, FPRReg arg4)
+ {
+ setupArgumentsWithExecState(arg1, arg2, arg4);
+ }
+#elif CPU(SH4)
+ ALWAYS_INLINE void setupArguments(FPRReg arg1)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
+ {
+ if (arg2 != FPRInfo::argumentFPR0) {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ moveDouble(arg2, FPRInfo::argumentFPR1);
+ } else if (arg1 != FPRInfo::argumentFPR1) {
+ moveDouble(arg2, FPRInfo::argumentFPR1);
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ } else
+ swapDouble(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ move(arg2, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
+ {
+ moveDouble(arg3, FPRInfo::argumentFPR0);
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#else
+#error "JIT not supported on this platform."
+#endif
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR1);
+ move(arg1, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR2);
+ move(arg4, GPRInfo::argumentGPR3);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2, arg3);
+ move(arg4, GPRInfo::argumentGPR3);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR2>(arg1, arg3);
+ move(arg2, GPRInfo::argumentGPR1);
+ move(arg4, GPRInfo::argumentGPR3);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5, GPRReg arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg5, POKE_ARGUMENT_OFFSET);
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR2);
+ move(arg4, GPRInfo::argumentGPR3);
+ }
+
+ ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsExecState()
+ {
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+#if OS(WINDOWS) && CPU(X86_64)
+ ALWAYS_INLINE void setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32 arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ }
+#endif
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
+ {
+ setupStubArguments(arg1, arg2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#if CPU(X86_64) || CPU(ARM64)
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm64 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm64 arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#endif
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, ImmPtr arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(ImmPtr arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // Move this first, so setting arg1 does not trample!
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupStubArguments(arg1, arg2, arg3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3>(arg1, arg3);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ setupStubArguments(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3)
+ {
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+#endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4
+ // These methods are suitable for any calling convention that provides for
+ // exactly 4 argument registers, e.g. ARMv7.
+#if NUMBER_OF_ARGUMENT_REGISTERS == 4
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+#if CPU(X86_64)
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+#endif
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, GPRReg arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, TrustedImmPtr arg8)
+ {
+ poke(arg8, POKE_ARGUMENT_OFFSET + 4);
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET);
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR2);
+ move(arg4, GPRInfo::argumentGPR3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
+
+#if NUMBER_OF_ARGUMENT_REGISTERS >= 5
+ void setupStubArguments134(GPRReg arg1, GPRReg arg3, GPRReg arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4>(arg1, arg3, arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4>(arg1, arg4);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4, GPRInfo::argumentGPR5>(arg1, arg4, arg5);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+ {
+ setupStubArguments134(arg1, arg3, arg4);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg2, arg4);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg1, arg2, arg4);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg3, arg4);
+ move(arg2, GPRInfo::argumentGPR1);
+ move(arg5, GPRInfo::argumentGPR4);
+ }
+
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR4>(arg1, arg2, arg5);
+ move(arg3, GPRInfo::argumentGPR2);
+ move(arg4, GPRInfo::argumentGPR3);
+ }
+#endif
+
+ void setupArguments(JSValueRegs arg1)
+ {
+#if USE(JSVALUE64)
+ setupArguments(arg1.gpr());
+#else
+ setupArguments(arg1.payloadGPR(), arg1.tagGPR());
+#endif
+ }
+
+ void setupResults(GPRReg destA, GPRReg destB)
+ {
+ GPRReg srcA = GPRInfo::returnValueGPR;
+ GPRReg srcB = GPRInfo::returnValueGPR2;
+
+ if (destA == InvalidGPRReg)
+ move(srcB, destB);
+ else if (destB == InvalidGPRReg)
+ move(srcA, destA);
+ else if (srcB != destA) {
+ // Handle the easy cases - two simple moves.
+ move(srcA, destA);
+ move(srcB, destB);
+ } else if (srcA != destB) {
+ // Handle the non-swap case - just put srcB in place first.
+ move(srcB, destB);
+ move(srcA, destA);
+ } else
+ swap(destA, destB);
+ }
+
+ void setupResults(JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ move(GPRInfo::returnValueGPR, regs.gpr());
+#else
+ setupResults(regs.payloadGPR(), regs.tagGPR());
+#endif
+ }
+
+ void jumpToExceptionHandler()
+ {
+ // genericUnwind() leaves the handler CallFrame* in vm->callFrameForCatch,
+ // and the address of the handler in vm->targetMachinePCForThrow.
+ loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1);
+ jump(GPRInfo::regT1);
+ }
+
+ void prepareForTailCallSlow(GPRReg calleeGPR = InvalidGPRReg)
+ {
+ GPRReg temp1 = calleeGPR == GPRInfo::regT0 ? GPRInfo::regT3 : GPRInfo::regT0;
+ GPRReg temp2 = calleeGPR == GPRInfo::regT1 ? GPRInfo::regT3 : GPRInfo::regT1;
+ GPRReg temp3 = calleeGPR == GPRInfo::regT2 ? GPRInfo::regT3 : GPRInfo::regT2;
+
+ GPRReg newFramePointer = temp1;
+ GPRReg newFrameSizeGPR = temp2;
+ {
+ // The old frame size is its number of arguments (or number of
+ // parameters in case of arity fixup), plus the frame header size,
+ // aligned
+ GPRReg oldFrameSizeGPR = temp2;
+ {
+ GPRReg argCountGPR = oldFrameSizeGPR;
+ load32(Address(framePointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), argCountGPR);
+
+ {
+ GPRReg numParametersGPR = temp1;
+ {
+ GPRReg codeBlockGPR = numParametersGPR;
+ loadPtr(Address(framePointerRegister, JSStack::CodeBlock * static_cast<int>(sizeof(Register))), codeBlockGPR);
+ load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);
+ }
+
+ ASSERT(numParametersGPR != argCountGPR);
+ Jump argumentCountWasNotFixedUp = branch32(BelowOrEqual, numParametersGPR, argCountGPR);
+ move(numParametersGPR, argCountGPR);
+ argumentCountWasNotFixedUp.link(this);
+ }
+
+ add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), argCountGPR, oldFrameSizeGPR);
+ and32(TrustedImm32(-stackAlignmentRegisters()), oldFrameSizeGPR);
+ // We assume < 2^28 arguments
+ mul32(TrustedImm32(sizeof(Register)), oldFrameSizeGPR, oldFrameSizeGPR);
+ }
+
+ // The new frame pointer is at framePointer + oldFrameSize - newFrameSize
+ ASSERT(newFramePointer != oldFrameSizeGPR);
+ addPtr(framePointerRegister, oldFrameSizeGPR, newFramePointer);
+
+ // The new frame size is just the number of arguments plus the
+ // frame header size, aligned
+ ASSERT(newFrameSizeGPR != newFramePointer);
+ load32(Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)),
+ newFrameSizeGPR);
+ add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), newFrameSizeGPR);
+ and32(TrustedImm32(-stackAlignmentRegisters()), newFrameSizeGPR);
+ // We assume < 2^28 arguments
+ mul32(TrustedImm32(sizeof(Register)), newFrameSizeGPR, newFrameSizeGPR);
+ }
+
+ GPRReg tempGPR = temp3;
+ ASSERT(tempGPR != newFramePointer && tempGPR != newFrameSizeGPR);
+
+ // We don't need the current frame beyond this point. Masquerade as our
+ // caller.
+#if CPU(ARM) || CPU(SH4) || CPU(ARM64)
+ loadPtr(Address(framePointerRegister, sizeof(void*)), linkRegister);
+ subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
+#elif CPU(MIPS)
+ loadPtr(Address(framePointerRegister, sizeof(void*)), returnAddressRegister);
+ subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
+#elif CPU(X86) || CPU(X86_64)
+ loadPtr(Address(framePointerRegister, sizeof(void*)), tempGPR);
+ push(tempGPR);
+ subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ subPtr(newFrameSizeGPR, newFramePointer);
+ loadPtr(Address(framePointerRegister), framePointerRegister);
+
+
+ // We need to move the newFrameSizeGPR slots above the stack pointer by
+ // newFramePointer registers. We use pointer-sized chunks.
+ MacroAssembler::Label copyLoop(label());
+
+ subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
+ loadPtr(BaseIndex(stackPointerRegister, newFrameSizeGPR, TimesOne), tempGPR);
+ storePtr(tempGPR, BaseIndex(newFramePointer, newFrameSizeGPR, TimesOne));
+
+ branchTest32(MacroAssembler::NonZero, newFrameSizeGPR).linkTo(copyLoop, this);
+
+ // Ready for a jump!
+ move(newFramePointer, stackPointerRegister);
+ }
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // CCallHelpers_h
+
diff --git a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp b/Source/JavaScriptCore/jit/CachedRecovery.cpp
index 13270d4d3..f4aacc6c8 100644
--- a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.cpp
+++ b/Source/JavaScriptCore/jit/CachedRecovery.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,36 +24,48 @@
*/
#include "config.h"
-#include "JumpReplacementWatchpoint.h"
+#include "CachedRecovery.h"
#if ENABLE(JIT)
-#include "LinkBuffer.h"
-#include "Options.h"
-
namespace JSC {
-void JumpReplacementWatchpoint::correctLabels(LinkBuffer& linkBuffer)
+// We prefer loading doubles and undetermined JSValues into FPRs
+// because it would otherwise use up GPRs. Two in JSVALUE32_64.
+bool CachedRecovery::loadsIntoFPR() const
{
- MacroAssembler::Label label;
- label.m_label.m_offset = m_source;
- m_source = bitwise_cast<uintptr_t>(linkBuffer.locationOf(label).dataLocation());
- label.m_label.m_offset = m_destination;
- m_destination = bitwise_cast<uintptr_t>(linkBuffer.locationOf(label).dataLocation());
+ switch (recovery().technique()) {
+ case DoubleDisplacedInJSStack:
+ case DisplacedInJSStack:
+#if USE(JSVALUE64)
+ case CellDisplacedInJSStack:
+#endif
+ return true;
+
+ default:
+ return false;
+ }
}
-void JumpReplacementWatchpoint::fireInternal()
+// Integers, booleans and cells can be loaded into GPRs
+bool CachedRecovery::loadsIntoGPR() const
{
- void* source = bitwise_cast<void*>(m_source);
- void* destination = bitwise_cast<void*>(m_destination);
- if (Options::showDisassembly())
- dataLogF("Firing jump replacement watchpoint from %p, to %p.\n", source, destination);
- MacroAssembler::replaceWithJump(CodeLocationLabel(source), CodeLocationLabel(destination));
- if (isOnList())
- remove();
+ switch (recovery().technique()) {
+ case Int32DisplacedInJSStack:
+#if USE(JSVALUE64)
+ case Int52DisplacedInJSStack:
+ case StrictInt52DisplacedInJSStack:
+ case DisplacedInJSStack:
+#endif
+ case BooleanDisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ return true;
+
+ default:
+ return false;
+ }
}
} // namespace JSC
#endif // ENABLE(JIT)
-
diff --git a/Source/JavaScriptCore/jit/CachedRecovery.h b/Source/JavaScriptCore/jit/CachedRecovery.h
new file mode 100644
index 000000000..5fe39dee7
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CachedRecovery.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CachedRecovery_h
+#define CachedRecovery_h
+
+#if ENABLE(JIT)
+
+#include "ValueRecovery.h"
+#include "VirtualRegister.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+// A CachedRecovery is a wrapper around a ValueRecovery that records where said
+// value should go on the stack and/or in registers. Whenever we perform an
+// operation changing the ValueRecovery, we update the CachedRecovery's member
+// in place.
+class CachedRecovery {
+public:
+ CachedRecovery(ValueRecovery recovery)
+ : m_recovery { recovery }
+ {
+ }
+
+ CachedRecovery(CachedRecovery&) = delete;
+ CachedRecovery(CachedRecovery&&) = delete;
+ CachedRecovery& operator=(CachedRecovery&) = delete;
+ CachedRecovery& operator=(CachedRecovery&&) = delete;
+
+ const Vector<VirtualRegister, 1>& targets() const { return m_targets; }
+
+ void addTarget(VirtualRegister reg)
+ {
+ ASSERT(m_targets.isEmpty() || m_targets.last() < reg);
+ m_targets.append(reg);
+ }
+
+ void removeTarget(VirtualRegister reg)
+ {
+ ASSERT_UNUSED(reg, m_targets.last() == reg);
+ m_targets.shrink(m_targets.size() - 1);
+ }
+
+ void clearTargets()
+ {
+ m_targets.clear();
+ }
+
+ void setWantedJSValueRegs(JSValueRegs jsValueRegs)
+ {
+ ASSERT(m_wantedFPR == InvalidFPRReg);
+ m_wantedJSValueRegs = jsValueRegs;
+ }
+
+ void setWantedFPR(FPRReg fpr)
+ {
+ ASSERT(!m_wantedJSValueRegs);
+ m_wantedFPR = fpr;
+ }
+
+ // Determine whether converting this recovery into a JSValue will
+ // require additional GPRs and/or FPRs.
+ // This is guaranteed to only depend on the DataFormat, and the
+ // result of these calls will stay valid after loads and/or stores.
+ bool boxingRequiresGPR() const
+ {
+#if USE(JSVALUE64)
+ return recovery().dataFormat() == DataFormatDouble;
+#else
+ return false;
+#endif
+ }
+ bool boxingRequiresFPR() const
+ {
+#if USE(JSVALUE64)
+ switch (recovery().dataFormat()) {
+ case DataFormatInt52:
+ case DataFormatStrictInt52:
+ return true;
+
+ default:
+ return false;
+ }
+#else
+ return false;
+#endif
+ }
+
+ // This is used to determine what kind of register we need to be
+ // able to load a recovery. We only use it when a direct load is
+ // currently impossible, to determine whether we should spill a
+ // GPR or an FPR for loading this value.
+ bool loadsIntoGPR() const;
+ bool loadsIntoFPR() const;
+
+ ValueRecovery recovery() const { return m_recovery; }
+
+ void setRecovery(ValueRecovery recovery) { m_recovery = recovery; }
+
+ JSValueRegs wantedJSValueRegs() const { return m_wantedJSValueRegs; }
+
+ FPRReg wantedFPR() const { return m_wantedFPR; }
+private:
+ ValueRecovery m_recovery;
+ JSValueRegs m_wantedJSValueRegs;
+ FPRReg m_wantedFPR { InvalidFPRReg };
+ Vector<VirtualRegister, 1> m_targets;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // CachedRecovery_h
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp
new file mode 100644
index 000000000..567202c15
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffleData.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+
+namespace JSC {
+
+#if USE(JSVALUE64)
+
+void CallFrameShuffleData::setupCalleeSaveRegisters(CodeBlock* codeBlock)
+{
+ RegisterSet calleeSaveRegisters { RegisterSet::vmCalleeSaveRegisters() };
+ RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
+
+ for (size_t i = 0; i < registerSaveLocations->size(); ++i) {
+ RegisterAtOffset entry { registerSaveLocations->at(i) };
+ if (!calleeSaveRegisters.get(entry.reg()))
+ continue;
+
+ VirtualRegister saveSlot { entry.offsetAsIndex() };
+ registers[entry.reg()]
+ = ValueRecovery::displacedInJSStack(saveSlot, DataFormatJS);
+ }
+
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (!calleeSaveRegisters.get(reg))
+ continue;
+
+ if (registers[reg])
+ continue;
+
+ registers[reg] = ValueRecovery::inRegister(reg, DataFormatJS);
+ }
+}
+
+#endif // USE(JSVALUE64)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.h b/Source/JavaScriptCore/jit/CallFrameShuffleData.h
new file mode 100644
index 000000000..d85e55b3e
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CallFrameShuffleData_h
+#define CallFrameShuffleData_h
+
+#if ENABLE(JIT)
+
+#include "RegisterMap.h"
+#include "ValueRecovery.h"
+
+namespace JSC {
+
+struct CallFrameShuffleData {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ unsigned numLocals;
+ ValueRecovery callee;
+ Vector<ValueRecovery> args;
+#if USE(JSVALUE64)
+ RegisterMap<ValueRecovery> registers;
+ GPRReg tagTypeNumber { InvalidGPRReg };
+
+ void setupCalleeSaveRegisters(CodeBlock*);
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // CallFrameShuffleData_h
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp
new file mode 100644
index 000000000..45af55dd6
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp
@@ -0,0 +1,774 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffler.h"
+
+#if ENABLE(JIT)
+
+#include "CachedRecovery.h"
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+
+namespace JSC {
+
+CallFrameShuffler::CallFrameShuffler(CCallHelpers& jit, const CallFrameShuffleData& data)
+ : m_jit(jit)
+ , m_oldFrame(data.numLocals + JSStack::CallerFrameAndPCSize, nullptr)
+ , m_newFrame(data.args.size() + JSStack::CallFrameHeaderSize, nullptr)
+ , m_alignedOldFrameSize(JSStack::CallFrameHeaderSize
+ + roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters()))
+ , m_alignedNewFrameSize(JSStack::CallFrameHeaderSize
+ + roundArgumentCountToAlignFrame(data.args.size()))
+ , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize)
+ , m_lockedRegisters(RegisterSet::allRegisters())
+{
+ // We are allowed all the usual registers...
+ for (unsigned i = GPRInfo::numberOfRegisters; i--; )
+ m_lockedRegisters.clear(GPRInfo::toRegister(i));
+ for (unsigned i = FPRInfo::numberOfRegisters; i--; )
+ m_lockedRegisters.clear(FPRInfo::toRegister(i));
+ // ... as well as the runtime registers.
+ m_lockedRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
+
+ ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal());
+ addNew(VirtualRegister(JSStack::Callee), data.callee);
+
+ for (size_t i = 0; i < data.args.size(); ++i) {
+ ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal());
+ addNew(virtualRegisterForArgument(i), data.args[i]);
+ }
+
+#if USE(JSVALUE64)
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (!data.registers[reg].isSet())
+ continue;
+
+ if (reg.isGPR())
+ addNew(JSValueRegs(reg.gpr()), data.registers[reg]);
+ else
+ addNew(reg.fpr(), data.registers[reg]);
+ }
+
+ m_tagTypeNumber = data.tagTypeNumber;
+ if (m_tagTypeNumber != InvalidGPRReg)
+ lockGPR(m_tagTypeNumber);
+#endif
+}
+
+void CallFrameShuffler::dump(PrintStream& out) const
+{
+ static const char* delimiter = " +-------------------------------+ ";
+ static const char* dangerDelimiter = " X-------------------------------X ";
+ static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
+ static const char* emptySpace = " ";
+ out.print(" ");
+ out.print(" Old frame ");
+ out.print(" New frame ");
+ out.print("\n");
+ int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3;
+ for (int i = 0; i < totalSize; ++i) {
+ VirtualRegister old { m_alignedOldFrameSize - i - 1 };
+ VirtualRegister newReg { old + m_frameDelta };
+
+ if (!isValidOld(old) && old != firstOld() - 1
+ && !isValidNew(newReg) && newReg != firstNew() - 1)
+ continue;
+
+ out.print(" ");
+ if (dangerFrontier() >= firstNew()
+ && (newReg == dangerFrontier() || newReg == firstNew() - 1))
+ out.print(dangerBoundsDelimiter);
+ else if (isValidOld(old))
+ out.print(isValidNew(newReg) && isDangerNew(newReg) ? dangerDelimiter : delimiter);
+ else if (old == firstOld() - 1)
+ out.print(delimiter);
+ else
+ out.print(emptySpace);
+ if (dangerFrontier() >= firstNew()
+ && (newReg == dangerFrontier() || newReg == firstNew() - 1))
+ out.print(dangerBoundsDelimiter);
+ else if (isValidNew(newReg) || newReg == firstNew() - 1)
+ out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter);
+ else
+ out.print(emptySpace);
+ out.print("\n");
+ if (old == firstOld())
+ out.print(" sp --> ");
+ else if (!old.offset())
+ out.print(" fp --> ");
+ else
+ out.print(" ");
+ if (isValidOld(old)) {
+ if (getOld(old)) {
+ auto str = toCString(old);
+ if (isValidNew(newReg) && isDangerNew(newReg))
+ out.printf(" X %18s X ", str.data());
+ else
+ out.printf(" | %18s | ", str.data());
+ } else if (isValidNew(newReg) && isDangerNew(newReg))
+ out.printf(" X%30s X ", "");
+ else
+ out.printf(" |%30s | ", "");
+ } else
+ out.print(emptySpace);
+ if (isValidNew(newReg)) {
+ const char d = isDangerNew(newReg) ? 'X' : '|';
+ auto str = toCString(newReg);
+ if (getNew(newReg)) {
+ if (getNew(newReg)->recovery().isConstant())
+ out.printf(" %c%8s <- constant %c ", d, str.data(), d);
+ else {
+ auto recoveryStr = toCString(getNew(newReg)->recovery());
+ out.printf(" %c%8s <- %18s %c ", d, str.data(),
+ recoveryStr.data(), d);
+ }
+ } else if (newReg == VirtualRegister { JSStack::ArgumentCount })
+ out.printf(" %c%8s <- %18zu %c ", d, str.data(), argCount(), d);
+ else
+ out.printf(" %c%30s %c ", d, "", d);
+ } else
+ out.print(emptySpace);
+ if (newReg == firstNew() - m_newFrameOffset && !isSlowPath())
+ out.print(" <-- new sp before jump (current ", m_newFrameBase, ") ");
+ if (newReg == firstNew())
+ out.print(" <-- new fp after prologue");
+ out.print("\n");
+ }
+ out.print(" ");
+ out.print(" Live registers ");
+ out.print(" Wanted registers ");
+ out.print("\n");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* oldCachedRecovery { m_registers[reg] };
+ CachedRecovery* newCachedRecovery { m_newRegisters[reg] };
+ if (!oldCachedRecovery && !newCachedRecovery)
+ continue;
+ out.print(" ");
+ if (oldCachedRecovery) {
+ auto str = toCString(reg);
+ out.printf(" %8s ", str.data());
+ } else
+ out.print(emptySpace);
+#if USE(JSVALUE32_64)
+ if (newCachedRecovery) {
+ JSValueRegs wantedJSValueRegs { newCachedRecovery->wantedJSValueRegs() };
+ if (reg.isFPR())
+ out.print(reg, " <- ", newCachedRecovery->recovery());
+ else {
+ if (reg.gpr() == wantedJSValueRegs.tagGPR())
+ out.print(reg.gpr(), " <- tag(", newCachedRecovery->recovery(), ")");
+ else
+ out.print(reg.gpr(), " <- payload(", newCachedRecovery->recovery(), ")");
+ }
+ }
+#else
+ if (newCachedRecovery)
+ out.print(" ", reg, " <- ", newCachedRecovery->recovery());
+#endif
+ out.print("\n");
+ }
+ out.print(" Locked registers: ");
+ bool firstLocked { true };
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (m_lockedRegisters.get(reg)) {
+ out.print(firstLocked ? "" : ", ", reg);
+ firstLocked = false;
+ }
+ }
+ out.print("\n");
+
+ if (isSlowPath())
+ out.print(" Using fp-relative addressing for slow path call\n");
+ else
+ out.print(" Using sp-relative addressing for jump (using ", m_newFrameBase, " as new sp)\n");
+ if (m_oldFrameOffset)
+ out.print(" Old frame offset is ", m_oldFrameOffset, "\n");
+ if (m_newFrameOffset)
+ out.print(" New frame offset is ", m_newFrameOffset, "\n");
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg)
+ out.print(" TagTypeNumber is currently in ", m_tagTypeNumber, "\n");
+#endif
+}
+
+CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery)
+{
+ ASSERT(!recovery.isConstant());
+ if (recovery.isInGPR())
+ return m_registers[recovery.gpr()];
+ if (recovery.isInFPR())
+ return m_registers[recovery.fpr()];
+#if USE(JSVALUE32_64)
+ if (recovery.technique() == InPair) {
+ ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]);
+ return m_registers[recovery.payloadGPR()];
+ }
+#endif
+ ASSERT(recovery.isInJSStack());
+ return getOld(recovery.virtualRegister());
+}
+
+CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery)
+{
+ ASSERT(!recovery.isConstant());
+ if (recovery.isInGPR())
+ return m_registers[recovery.gpr()] = cachedRecovery;
+ if (recovery.isInFPR())
+ return m_registers[recovery.fpr()] = cachedRecovery;
+#if USE(JSVALUE32_64)
+ if (recovery.technique() == InPair) {
+ m_registers[recovery.tagGPR()] = cachedRecovery;
+ return m_registers[recovery.payloadGPR()] = cachedRecovery;
+ }
+#endif
+ ASSERT(recovery.isInJSStack());
+ setOld(recovery.virtualRegister(), cachedRecovery);
+ return cachedRecovery;
+}
+
+void CallFrameShuffler::spill(CachedRecovery& cachedRecovery)
+{
+ ASSERT(!isSlowPath());
+ ASSERT(cachedRecovery.recovery().isInRegisters());
+
+ VirtualRegister spillSlot { 0 };
+ for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) {
+ if (slot >= newAsOld(firstNew()))
+ break;
+
+ if (getOld(slot))
+ continue;
+
+ spillSlot = slot;
+ break;
+ }
+ // We must have enough slots to be able to fit the whole callee's
+ // frame for the slow path - unless we are in the FTL. In that
+ // case, we are allowed to extend the frame *once*, since we are
+ // guaranteed to have enough available space for that.
+ if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) {
+ RELEASE_ASSERT(!m_didExtendFrame);
+ extendFrameIfNeeded();
+ spill(cachedRecovery);
+ return;
+ }
+
+ if (verbose)
+ dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n");
+ auto format = emitStore(cachedRecovery, addressForOld(spillSlot));
+ ASSERT(format != DataFormatNone);
+ updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format));
+}
+
+void CallFrameShuffler::emitDeltaCheck()
+{
+ if (ASSERT_DISABLED)
+ return;
+
+ GPRReg scratchGPR { getFreeGPR() };
+ if (scratchGPR != InvalidGPRReg) {
+ if (verbose)
+ dataLog(" Using ", scratchGPR, " for the fp-sp delta check\n");
+ m_jit.move(MacroAssembler::stackPointerRegister, scratchGPR);
+ m_jit.subPtr(GPRInfo::callFrameRegister, scratchGPR);
+ MacroAssembler::Jump ok = m_jit.branch32(
+ MacroAssembler::Equal, scratchGPR,
+ MacroAssembler::TrustedImm32(-numLocals() * sizeof(Register)));
+ m_jit.abortWithReason(JITUnexpectedCallFrameSize);
+ ok.link(&m_jit);
+ } else if (verbose)
+ dataLog(" Skipping the fp-sp delta check since there is too much pressure");
+}
+
+void CallFrameShuffler::extendFrameIfNeeded()
+{
+ ASSERT(!m_didExtendFrame);
+
+ VirtualRegister firstRead { firstOld() };
+ for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) {
+ if (getOld(firstRead))
+ break;
+ }
+ size_t availableSize = static_cast<size_t>(firstRead.offset() - firstOld().offset());
+ size_t wantedSize = m_newFrame.size() + m_newFrameOffset;
+
+ if (availableSize < wantedSize) {
+ size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize);
+ m_oldFrame.grow(m_oldFrame.size() + delta);
+ for (size_t i = 0; i < delta; ++i)
+ m_oldFrame[m_oldFrame.size() - i - 1] = nullptr;
+ m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister);
+
+ if (isSlowPath())
+ m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize;
+ else
+ m_oldFrameOffset = numLocals();
+
+ if (verbose)
+ dataLogF(" Not enough space - extending the old frame %zu slot\n", delta);
+ }
+
+ m_didExtendFrame = true;
+}
+
+void CallFrameShuffler::prepareForSlowPath()
+{
+ ASSERT(isUndecided());
+ emitDeltaCheck();
+
+ m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize;
+ m_newFrameBase = MacroAssembler::stackPointerRegister;
+ m_newFrameOffset = -JSStack::CallerFrameAndPCSize;
+
+ if (verbose)
+ dataLog("\n\nPreparing frame for slow path call:\n");
+
+ // When coming from the FTL, we need to extend the frame. In other
+ // cases, we may end up extending the frame if we previously
+ // spilled things (e.g. in polymorphic cache).
+ extendFrameIfNeeded();
+
+ if (verbose)
+ dataLog(*this);
+
+ prepareAny();
+
+ if (verbose)
+ dataLog("Ready for slow path call!\n");
+}
+
+void CallFrameShuffler::prepareForTailCall()
+{
+ ASSERT(isUndecided());
+ emitDeltaCheck();
+
+ // We'll use sp-based indexing so that we can load the
+ // caller's frame pointer into the fpr immediately
+ m_oldFrameBase = MacroAssembler::stackPointerRegister;
+ m_oldFrameOffset = numLocals();
+ m_newFrameBase = acquireGPR();
+#if CPU(X86)
+ // We load the frame pointer manually, but we need to ask the
+ // algorithm to move the return PC for us (it'd probably
+ // require a write to the danger zone). Since it'd be awkward
+ // to ask for half a value move, we ask that the whole thing
+ // be moved for us.
+ addNew(VirtualRegister { 0 },
+ ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS));
+
+ // sp will point to head0 and we will move it up half a slot
+ // manually
+ m_newFrameOffset = 0;
+#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+ // We load the the frame pointer and link register
+ // manually. We could ask the algorithm to load them for us,
+ // and it would allow us to use the link register as an extra
+ // temporary - but it'd mean that the frame pointer can also
+ // be used as an extra temporary, so we keep the link register
+ // locked instead.
+
+ // sp will point to head1 since the callee's prologue pushes
+ // the call frame and link register.
+ m_newFrameOffset = -1;
+#elif CPU(ARM64)
+ // We load the frame pointer and link register manually. We
+ // could ask the algorithm to load the link register for us
+ // (which would allow for its use as an extra temporary), but
+ // since its not in GPRInfo, we can't do it.
+
+ // sp will point to head2 since the callee's prologue pushes the
+ // call frame and link register
+ m_newFrameOffset = -2;
+#elif CPU(X86_64)
+ // We load the frame pointer manually, but we ask the
+ // algorithm to move the return PC for us (it'd probably
+ // require a write in the danger zone)
+ addNew(VirtualRegister { 1 },
+ ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS));
+
+ // sp will point to head1 since the callee's prologue pushes
+ // the call frame register
+ m_newFrameOffset = -1;
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+
+ if (verbose)
+ dataLog(" Emitting code for computing the new frame base\n");
+
+ // We compute the new frame base by first computing the top of the
+ // old frame (taking into account an argument count higher than
+ // the number of parameters), then substracting to it the aligned
+ // new frame size (adjusted).
+ m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), m_newFrameBase);
+ MacroAssembler::Jump argumentCountOK =
+ m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase,
+ MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters()));
+ m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + JSStack::CallFrameHeaderSize), m_newFrameBase);
+ m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase);
+ m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase);
+ MacroAssembler::Jump done = m_jit.jump();
+ argumentCountOK.link(&m_jit);
+ m_jit.move(
+ MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)),
+ m_newFrameBase);
+ done.link(&m_jit);
+
+ m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase);
+ m_jit.subPtr(
+ MacroAssembler::TrustedImm32(
+ (m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)),
+ m_newFrameBase);
+
+ // We load the link register manually for architectures that have one
+#if CPU(ARM) || CPU(SH4) || CPU(ARM64)
+ m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
+ MacroAssembler::linkRegister);
+#elif CPU(MIPS)
+ m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
+ MacroAssembler::returnAddressRegister);
+#endif
+
+ // We want the frame pointer to always point to a valid frame, and
+ // we are going to trash the current one. Let's make it point to
+ // our caller's frame, since that's what we want to end up with.
+ m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister),
+ MacroAssembler::framePointerRegister);
+
+ if (verbose)
+ dataLog("Preparing frame for tail call:\n", *this);
+
+ prepareAny();
+
+#if CPU(X86)
+ if (verbose)
+ dataLog(" Simulating pop of the call frame register\n");
+ m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister);
+#endif
+
+ if (verbose)
+ dataLog("Ready for tail call!\n");
+}
+
+bool CallFrameShuffler::tryWrites(CachedRecovery& cachedRecovery)
+{
+ ASSERT(m_newFrameBase != InvalidGPRReg);
+
+ // If the value is already set up correctly, we don't have
+ // anything to do.
+ if (isSlowPath() && cachedRecovery.recovery().isInJSStack()
+ && cachedRecovery.targets().size() == 1
+ && newAsOld(cachedRecovery.targets()[0]) == cachedRecovery.recovery().virtualRegister()) {
+ cachedRecovery.clearTargets();
+ if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
+ clearCachedRecovery(cachedRecovery.recovery());
+ return true;
+ }
+
+ if (!canLoadAndBox(cachedRecovery))
+ return false;
+
+ emitLoad(cachedRecovery);
+ emitBox(cachedRecovery);
+ ASSERT(cachedRecovery.recovery().isInRegisters()
+ || cachedRecovery.recovery().isConstant());
+
+ if (verbose)
+ dataLog(" * Storing ", cachedRecovery.recovery());
+ for (size_t i = 0; i < cachedRecovery.targets().size(); ++i) {
+ VirtualRegister target { cachedRecovery.targets()[i] };
+ ASSERT(!isDangerNew(target));
+ if (verbose)
+ dataLog(!i ? " into " : ", and ", "NEW ", target);
+ emitStore(cachedRecovery, addressForNew(target));
+ setNew(target, nullptr);
+ }
+ if (verbose)
+ dataLog("\n");
+ cachedRecovery.clearTargets();
+ if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
+ clearCachedRecovery(cachedRecovery.recovery());
+
+ return true;
+}
+
+bool CallFrameShuffler::performSafeWrites()
+{
+ VirtualRegister firstSafe;
+ VirtualRegister end { lastNew() + 1 };
+ Vector<VirtualRegister> failures;
+
+ // For all cachedRecoveries that writes to the safe zone, if it
+ // doesn't also write to the danger zone, we try to perform
+ // the writes. This may free up danger slots, so we iterate
+ // again until it doesn't happen anymore.
+ //
+ // Note that even though we have a while block, we look at
+ // each slot of the new call frame at most once since in each
+ // iteration beyond the first, we only load up the portion of
+ // the new call frame that was dangerous and became safe due
+ // to the previous iteration.
+ do {
+ firstSafe = dangerFrontier() + 1;
+ if (verbose)
+ dataLog(" Trying safe writes (between NEW ", firstSafe, " and NEW ", end - 1, ")\n");
+ bool didProgress = false;
+ for (VirtualRegister reg = firstSafe; reg < end; reg += 1) {
+ CachedRecovery* cachedRecovery = getNew(reg);
+ if (!cachedRecovery) {
+ if (verbose)
+ dataLog(" + ", reg, " is OK.\n");
+ continue;
+ }
+ if (!hasOnlySafeWrites(*cachedRecovery)) {
+ if (verbose) {
+ dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
+ " but also has dangerous writes.\n");
+ }
+ continue;
+ }
+ if (cachedRecovery->wantedJSValueRegs()) {
+ if (verbose) {
+ dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
+ " but is also needed in registers.\n");
+ }
+ continue;
+ }
+ if (cachedRecovery->wantedFPR() != InvalidFPRReg) {
+ if (verbose) {
+ dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
+ " but is also needed in an FPR.\n");
+ }
+ continue;
+ }
+ if (!tryWrites(*cachedRecovery)) {
+ if (verbose)
+ dataLog(" - Unable to write to NEW ", reg, " from ", cachedRecovery->recovery(), "\n");
+ failures.append(reg);
+ }
+ didProgress = true;
+ }
+ end = firstSafe;
+
+ // If we have cachedRecoveries that failed to write, it is
+ // because they are on the stack and we didn't have enough
+ // registers available at the time to load them into. If
+ // we have a free register, we should try again because it
+ // could free up some danger slots.
+ if (didProgress && hasFreeRegister()) {
+ Vector<VirtualRegister> stillFailing;
+ for (VirtualRegister failed : failures) {
+ CachedRecovery* cachedRecovery = getNew(failed);
+ // It could have been handled later if it had
+ // several targets
+ if (!cachedRecovery)
+ continue;
+
+ ASSERT(hasOnlySafeWrites(*cachedRecovery)
+ && !cachedRecovery->wantedJSValueRegs()
+ && cachedRecovery->wantedFPR() == InvalidFPRReg);
+ if (!tryWrites(*cachedRecovery))
+ stillFailing.append(failed);
+ }
+ failures = WTFMove(stillFailing);
+ }
+ if (verbose && firstSafe != dangerFrontier() + 1)
+ dataLog(" We freed up danger slots!\n");
+ } while (firstSafe != dangerFrontier() + 1);
+
+ return failures.isEmpty();
+}
+
+void CallFrameShuffler::prepareAny()
+{
+ ASSERT(!isUndecided());
+
+ updateDangerFrontier();
+
+ // First, we try to store any value that goes above the danger
+ // frontier. This will never use more registers since we are only
+ // loading+storing if we ensure that any register used for the load
+ // will be freed up after the stores (i.e., all stores are above
+ // the danger frontier, and there is no wanted register).
+ performSafeWrites();
+
+ // At this point, we couldn't have more available registers than
+ // we have withouth spilling: all values currently in registers
+ // either require a write to the danger zone, or have a wanted
+ // register, which means that in any case they will have to go
+ // through registers again.
+
+ // We now slowly free up the danger zone by first loading the old
+ // value on the danger frontier, spilling as many registers as
+ // needed to do so and ensuring that the corresponding slot in the
+ // new frame is now ready to be written. Then, we store the old
+ // value to its target location if possible (we could have failed
+ // to load it previously due to high pressure). Finally, we write
+ // to any of the newly safe slots that we can, which could free up
+ // registers (hence why we do it eagerly).
+ for (VirtualRegister reg = dangerFrontier(); reg >= firstNew(); reg -= 1) {
+ if (reg == dangerFrontier()) {
+ if (verbose)
+ dataLog(" Next slot (NEW ", reg, ") is the danger frontier\n");
+ CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) };
+ ASSERT(cachedRecovery);
+ ensureLoad(*cachedRecovery);
+ emitLoad(*cachedRecovery);
+ ensureBox(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ if (hasOnlySafeWrites(*cachedRecovery))
+ tryWrites(*cachedRecovery);
+ } else if (verbose)
+ dataLog(" Next slot is NEW ", reg, "\n");
+
+ ASSERT(!isDangerNew(reg));
+ CachedRecovery* cachedRecovery = getNew(reg);
+ // This could be one of the header slots we don't care about.
+ if (!cachedRecovery) {
+ if (verbose)
+ dataLog(" + ", reg, " is OK\n");
+ continue;
+ }
+
+ if (canLoadAndBox(*cachedRecovery) && hasOnlySafeWrites(*cachedRecovery)
+ && !cachedRecovery->wantedJSValueRegs()
+ && cachedRecovery->wantedFPR() == InvalidFPRReg) {
+ emitLoad(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ bool writesOK = tryWrites(*cachedRecovery);
+ ASSERT_UNUSED(writesOK, writesOK);
+ } else if (verbose)
+ dataLog(" - ", cachedRecovery->recovery(), " can't be handled just yet.\n");
+ }
+ ASSERT(dangerFrontier() < firstNew());
+
+ // Now, the danger zone is empty, but we still have a couple of
+ // things to do:
+ //
+ // 1) There could be remaining safe writes that failed earlier due
+ // to high register pressure and had nothing to do with the
+ // danger zone whatsoever.
+ //
+ // 2) Some wanted registers could have to be loaded (this could
+ // happen either when making a call to a new function with a
+ // lower number of arguments - since above here, we only load
+ // wanted registers when they are at the danger frontier -, or
+ // if a wanted register got spilled).
+ //
+ // 3) Some wanted registers could have been loaded in the wrong
+ // registers
+ //
+ // 4) We have to take care of some bookkeeping - namely, storing
+ // the argument count and updating the stack pointer.
+
+ // At this point, we must have enough registers available for
+ // handling 1). None of the loads can fail because we have been
+ // eagerly freeing up registers in all the previous phases - so
+ // the only values that are in registers at this point must have
+ // wanted registers.
+ if (verbose)
+ dataLog(" Danger zone is clear, performing remaining writes.\n");
+ for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
+ CachedRecovery* cachedRecovery { getNew(reg) };
+ if (!cachedRecovery)
+ continue;
+
+ emitLoad(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ bool writesOK = tryWrites(*cachedRecovery);
+ ASSERT_UNUSED(writesOK, writesOK);
+ }
+
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg && m_newRegisters[m_tagTypeNumber])
+ releaseGPR(m_tagTypeNumber);
+#endif
+
+ // Handle 2) by loading all registers. We don't have to do any
+ // writes, since they have been taken care of above.
+ if (verbose)
+ dataLog(" Loading wanted registers into registers\n");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+ emitLoad(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ ASSERT(cachedRecovery->targets().isEmpty());
+ }
+
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg)
+ releaseGPR(m_tagTypeNumber);
+#endif
+
+ // At this point, we have read everything we cared about from the
+ // stack, and written everything we had to to the stack.
+ if (verbose)
+ dataLog(" Callee frame is fully set up\n");
+ if (!ASSERT_DISABLED) {
+ for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1)
+ ASSERT_UNUSED(reg, !getNew(reg));
+
+ for (CachedRecovery* cachedRecovery : m_cachedRecoveries) {
+ ASSERT_UNUSED(cachedRecovery, cachedRecovery->targets().isEmpty());
+ ASSERT(!cachedRecovery->recovery().isInJSStack());
+ }
+ }
+
+ // We need to handle 4) first because it implies releasing
+ // m_newFrameBase, which could be a wanted register.
+ if (verbose)
+ dataLog(" * Storing the argument count into ", VirtualRegister { JSStack::ArgumentCount }, "\n");
+ m_jit.store32(MacroAssembler::TrustedImm32(0),
+ addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(TagOffset));
+ m_jit.store32(MacroAssembler::TrustedImm32(argCount()),
+ addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(PayloadOffset));
+
+ if (!isSlowPath()) {
+ ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister);
+ if (verbose)
+ dataLog(" Releasing the new frame base pointer\n");
+ m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister);
+ releaseGPR(m_newFrameBase);
+ }
+
+ // Finally we handle 3)
+ if (verbose)
+ dataLog(" Ensuring wanted registers are in the right register\n");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+ emitDisplace(*cachedRecovery);
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.h b/Source/JavaScriptCore/jit/CallFrameShuffler.h
new file mode 100644
index 000000000..d5e6f4253
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler.h
@@ -0,0 +1,804 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef CallFrameShuffler_h
+#define CallFrameShuffler_h
+
+#if ENABLE(JIT)
+
+#include "CachedRecovery.h"
+#include "CallFrameShuffleData.h"
+#include "MacroAssembler.h"
+#include "RegisterSet.h"
+#include "StackAlignment.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CallFrameShuffler {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ CallFrameShuffler(CCallHelpers&, const CallFrameShuffleData&);
+
+ void dump(PrintStream&) const;
+
+ // Any register that has been locked or acquired must be released
+ // before calling prepareForTailCall() or prepareForSlowPath().
+ void lockGPR(GPRReg gpr)
+ {
+ ASSERT(!m_lockedRegisters.get(gpr));
+ m_lockedRegisters.set(gpr);
+ if (verbose)
+ dataLog(" * Locking ", gpr, "\n");
+ }
+
+ GPRReg acquireGPR()
+ {
+ ensureGPR();
+ GPRReg gpr { getFreeGPR() };
+ ASSERT(!m_registers[gpr]);
+ lockGPR(gpr);
+ return gpr;
+ }
+
+ void releaseGPR(GPRReg gpr)
+ {
+ if (verbose) {
+ if (m_lockedRegisters.get(gpr))
+ dataLog(" * Releasing ", gpr, "\n");
+ else
+ dataLog(" * ", gpr, " was not locked\n");
+ }
+ m_lockedRegisters.clear(gpr);
+ }
+
+ void restoreGPR(GPRReg gpr)
+ {
+ if (!m_newRegisters[gpr])
+ return;
+
+ ensureGPR();
+#if USE(JSVALUE32_64)
+ GPRReg tempGPR { getFreeGPR() };
+ lockGPR(tempGPR);
+ ensureGPR();
+ releaseGPR(tempGPR);
+#endif
+ emitDisplace(*m_newRegisters[gpr]);
+ }
+
+ // You can only take a snapshot if the recovery has not started
+ // yet. The only operations that are valid before taking a
+ // snapshot are lockGPR(), acquireGPR() and releaseGPR().
+ //
+ // Locking status is *NOT* preserved by the snapshot: it only
+ // contains information about where the
+ // arguments/callee/callee-save registers are by taking into
+ // account any spilling that acquireGPR() could have done.
+ CallFrameShuffleData snapshot() const
+ {
+ ASSERT(isUndecided());
+
+ CallFrameShuffleData data;
+ data.numLocals = numLocals();
+ data.callee = getNew(VirtualRegister { JSStack::Callee })->recovery();
+ data.args.resize(argCount());
+ for (size_t i = 0; i < argCount(); ++i)
+ data.args[i] = getNew(virtualRegisterForArgument(i))->recovery();
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+#if USE(JSVALUE64)
+ data.registers[reg] = cachedRecovery->recovery();
+#else
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+ return data;
+ }
+
+ // Ask the shuffler to put the callee into some registers once the
+ // shuffling is done. You should call this before any of the
+ // prepare() methods, and must not take a snapshot afterwards, as
+ // this would crash 32bits platforms.
+ void setCalleeJSValueRegs(JSValueRegs jsValueRegs)
+ {
+ ASSERT(isUndecided());
+ ASSERT(!getNew(jsValueRegs));
+ CachedRecovery* cachedRecovery { getNew(VirtualRegister(JSStack::Callee)) };
+ ASSERT(cachedRecovery);
+ addNew(jsValueRegs, cachedRecovery->recovery());
+ }
+
+ // Ask the suhffler to assume the callee has already be checked to
+ // be a cell. This is a no-op on 64bit platforms, but allows to
+ // free up a GPR on 32bit platforms.
+ // You obviously must have ensured that this is the case before
+ // running any of the prepare methods.
+ void assumeCalleeIsCell()
+ {
+#if USE(JSVALUE32_64)
+ CachedRecovery& calleeCachedRecovery = *getNew(VirtualRegister(JSStack::Callee));
+ switch (calleeCachedRecovery.recovery().technique()) {
+ case InPair:
+ updateRecovery(
+ calleeCachedRecovery,
+ ValueRecovery::inGPR(
+ calleeCachedRecovery.recovery().payloadGPR(),
+ DataFormatCell));
+ break;
+ case DisplacedInJSStack:
+ updateRecovery(
+ calleeCachedRecovery,
+ ValueRecovery::displacedInJSStack(
+ calleeCachedRecovery.recovery().virtualRegister(),
+ DataFormatCell));
+ break;
+ case InFPR:
+ case UnboxedCellInGPR:
+ case CellDisplacedInJSStack:
+ break;
+ case Constant:
+ ASSERT(calleeCachedRecovery.recovery().constant().isCell());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+#endif
+ }
+
+ // This will emit code to build the new frame over the old one.
+ void prepareForTailCall();
+
+ // This will emit code to build the new frame as if performing a
+ // regular call. However, the callee save registers will be
+ // restored, and any locals (not the header or arguments) of the
+ // current frame can be overwritten.
+ //
+ // A frame built using prepareForSlowPath() should be used either
+ // to throw an exception in, or destroyed using
+ // CCallHelpers::prepareForTailCallSlow() followed by a tail call.
+ void prepareForSlowPath();
+
+private:
+ static const bool verbose = false;
+
+ CCallHelpers& m_jit;
+
+ void prepareAny();
+
+ void spill(CachedRecovery&);
+
+ // "box" is arguably a bad name here. The meaning is that after
+ // calling emitBox(), your ensure that subsequently calling
+ // emitStore() will be able to store the value without additional
+ // transformation. In particular, this is a no-op for constants,
+ // and is a complete no-op on 32bits since any unboxed value can
+ // still be stored by storing the payload and a statically known
+ // tag.
+ void emitBox(CachedRecovery&);
+
+ bool canBox(CachedRecovery& cachedRecovery)
+ {
+ if (cachedRecovery.boxingRequiresGPR() && getFreeGPR() == InvalidGPRReg)
+ return false;
+
+ if (cachedRecovery.boxingRequiresFPR() && getFreeFPR() == InvalidFPRReg)
+ return false;
+
+ return true;
+ }
+
+ void ensureBox(CachedRecovery& cachedRecovery)
+ {
+ if (canBox(cachedRecovery))
+ return;
+
+ if (cachedRecovery.boxingRequiresGPR())
+ ensureGPR();
+
+ if (cachedRecovery.boxingRequiresFPR())
+ ensureFPR();
+ }
+
+ void emitLoad(CachedRecovery&);
+
+ bool canLoad(CachedRecovery&);
+
+ void ensureLoad(CachedRecovery& cachedRecovery)
+ {
+ if (canLoad(cachedRecovery))
+ return;
+
+ ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR());
+
+ if (cachedRecovery.loadsIntoFPR()) {
+ if (cachedRecovery.loadsIntoGPR())
+ ensureRegister();
+ else
+ ensureFPR();
+ } else
+ ensureGPR();
+ }
+
+ bool canLoadAndBox(CachedRecovery& cachedRecovery)
+ {
+ // We don't have interfering loads & boxes
+ ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR());
+ ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR());
+
+ return canLoad(cachedRecovery) && canBox(cachedRecovery);
+ }
+
+ DataFormat emitStore(CachedRecovery&, MacroAssembler::Address);
+
+ void emitDisplace(CachedRecovery&);
+
+ void emitDeltaCheck();
+
+ Bag<CachedRecovery> m_cachedRecoveries;
+
+ void updateRecovery(CachedRecovery& cachedRecovery, ValueRecovery recovery)
+ {
+ clearCachedRecovery(cachedRecovery.recovery());
+ cachedRecovery.setRecovery(recovery);
+ setCachedRecovery(recovery, &cachedRecovery);
+ }
+
+ CachedRecovery* getCachedRecovery(ValueRecovery);
+
+ CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*);
+
+ void clearCachedRecovery(ValueRecovery recovery)
+ {
+ if (!recovery.isConstant())
+ setCachedRecovery(recovery, nullptr);
+ }
+
+ CachedRecovery* addCachedRecovery(ValueRecovery recovery)
+ {
+ if (recovery.isConstant())
+ return m_cachedRecoveries.add(recovery);
+ CachedRecovery* cachedRecovery = getCachedRecovery(recovery);
+ if (!cachedRecovery)
+ return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery));
+ return cachedRecovery;
+ }
+
+ // This is the current recoveries present in the old frame's
+ // slots. A null CachedRecovery means we can trash the current
+ // value as we don't care about it.
+ Vector<CachedRecovery*> m_oldFrame;
+
+ int numLocals() const
+ {
+ return m_oldFrame.size() - JSStack::CallerFrameAndPCSize;
+ }
+
+ CachedRecovery* getOld(VirtualRegister reg) const
+ {
+ return m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1];
+ }
+
+ void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery)
+ {
+ m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1] = cachedRecovery;
+ }
+
+ VirtualRegister firstOld() const
+ {
+ return VirtualRegister { static_cast<int>(-numLocals()) };
+ }
+
+ VirtualRegister lastOld() const
+ {
+ return VirtualRegister { JSStack::CallerFrameAndPCSize - 1 };
+ }
+
+ bool isValidOld(VirtualRegister reg) const
+ {
+ return reg >= firstOld() && reg <= lastOld();
+ }
+
+ bool m_didExtendFrame { false };
+
+ void extendFrameIfNeeded();
+
+ // This stores, for each slot in the new frame, information about
+ // the recovery for the value that should eventually go into that
+ // slot.
+ //
+ // Once the slot has been written, the corresponding entry in
+ // m_newFrame will be empty.
+ Vector<CachedRecovery*> m_newFrame;
+
+ size_t argCount() const
+ {
+ return m_newFrame.size() - JSStack::CallFrameHeaderSize;
+ }
+
+ CachedRecovery* getNew(VirtualRegister newRegister) const
+ {
+ return m_newFrame[newRegister.offset()];
+ }
+
+ void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery)
+ {
+ m_newFrame[newRegister.offset()] = cachedRecovery;
+ }
+
+ void addNew(VirtualRegister newRegister, ValueRecovery recovery)
+ {
+ CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+ cachedRecovery->addTarget(newRegister);
+ setNew(newRegister, cachedRecovery);
+ }
+
+ VirtualRegister firstNew() const
+ {
+ return VirtualRegister { 0 };
+ }
+
+ VirtualRegister lastNew() const
+ {
+ return VirtualRegister { static_cast<int>(m_newFrame.size()) - 1 };
+ }
+
+ bool isValidNew(VirtualRegister reg) const
+ {
+ return reg >= firstNew() && reg <= lastNew();
+ }
+
+
+ int m_alignedOldFrameSize;
+ int m_alignedNewFrameSize;
+
+ // This is the distance, in slots, between the base of the new
+ // frame and the base of the old frame. It could be negative when
+ // preparing for a tail call to a function with smaller argument
+ // count.
+ //
+ // We will overwrite this appropriately for slow path calls, but
+ // we initialize it as if doing a fast path for the spills we
+ // could do while undecided (typically while calling acquireGPR()
+ // for a polymorphic call).
+ int m_frameDelta;
+
+ VirtualRegister newAsOld(VirtualRegister reg) const
+ {
+ return reg - m_frameDelta;
+ }
+
+ // This stores the set of locked registers, i.e. registers for
+ // which we have an implicit requirement that they are not changed.
+ //
+ // This will usually contains the link register on architectures
+ // that have one, any scratch register used by the macro assembler
+ // (e.g. r11 on X86_64), as well as any register that we use for
+ // addressing (see m_oldFrameBase and m_newFrameBase).
+ //
+ // We also use this to lock registers temporarily, for instance to
+ // ensure that we have at least 2 available registers for loading
+ // a pair on 32bits.
+ mutable RegisterSet m_lockedRegisters;
+
+ // This stores the current recoveries present in registers. A null
+ // CachedRecovery means we can trash the current value as we don't
+ // care about it.
+ RegisterMap<CachedRecovery*> m_registers;
+
+#if USE(JSVALUE64)
+ mutable GPRReg m_tagTypeNumber;
+
+ bool tryAcquireTagTypeNumber();
+#endif
+
+ // This stores, for each register, information about the recovery
+ // for the value that should eventually go into that register. The
+ // only registers that have a target recovery will be callee-save
+ // registers, as well as possibly one JSValueRegs for holding the
+ // callee.
+ //
+ // Once the correct value has been put into the registers, and
+ // contrary to what we do with m_newFrame, we keep the entry in
+ // m_newRegisters to simplify spilling.
+ RegisterMap<CachedRecovery*> m_newRegisters;
+
+ template<typename CheckFunctor>
+ Reg getFreeRegister(const CheckFunctor& check) const
+ {
+ Reg nonTemp { };
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (m_lockedRegisters.get(reg))
+ continue;
+
+ if (!check(reg))
+ continue;
+
+ if (!m_registers[reg]) {
+ if (!m_newRegisters[reg])
+ return reg;
+ if (!nonTemp)
+ nonTemp = reg;
+ }
+ }
+
+#if USE(JSVALUE64)
+ if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) {
+ ASSERT(m_lockedRegisters.get(m_tagTypeNumber));
+ m_lockedRegisters.clear(m_tagTypeNumber);
+ nonTemp = Reg { m_tagTypeNumber };
+ m_tagTypeNumber = InvalidGPRReg;
+ }
+#endif
+ return nonTemp;
+ }
+
+ GPRReg getFreeTempGPR() const
+ {
+ Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) };
+ if (!freeTempGPR)
+ return InvalidGPRReg;
+ return freeTempGPR.gpr();
+ }
+
+ GPRReg getFreeGPR() const
+ {
+ Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) };
+ if (!freeGPR)
+ return InvalidGPRReg;
+ return freeGPR.gpr();
+ }
+
+ FPRReg getFreeFPR() const
+ {
+ Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) };
+ if (!freeFPR)
+ return InvalidFPRReg;
+ return freeFPR.fpr();
+ }
+
+ bool hasFreeRegister() const
+ {
+ return static_cast<bool>(getFreeRegister([] (Reg) { return true; }));
+ }
+
+ // This frees up a register satisfying the check functor (this
+ // functor could theoretically have any kind of logic, but it must
+ // ensure that it will only return true for registers - spill
+ // assumes and asserts that it is passed a cachedRecovery stored in a
+ // register).
+ template<typename CheckFunctor>
+ void ensureRegister(const CheckFunctor& check)
+ {
+ // If we can spill a callee-save, that's best, because it will
+ // free up a register that would otherwise been taken for the
+ // longest amount of time.
+ //
+ // We could try to bias towards those that are not in their
+ // target registers yet, but the gain is probably super
+ // small. Unless you have a huge number of argument (at least
+ // around twice the number of available registers on your
+ // architecture), no spilling is going to take place anyways.
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (m_lockedRegisters.get(reg))
+ continue;
+
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+ if (check(*cachedRecovery)) {
+ if (verbose)
+ dataLog(" ", cachedRecovery->recovery(), " looks like a good spill candidate\n");
+ spill(*cachedRecovery);
+ return;
+ }
+ }
+
+ // We use the cachedRecovery associated with the first new slot we
+ // can, because that is the one for which a write will be
+ // possible the latest, i.e. that is the one that we would
+ // have had to retain in registers for the longest.
+ for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
+ CachedRecovery* cachedRecovery { getNew(reg) };
+ if (!cachedRecovery)
+ continue;
+
+ if (check(*cachedRecovery)) {
+ spill(*cachedRecovery);
+ return;
+ }
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ void ensureRegister()
+ {
+ if (hasFreeRegister())
+ return;
+
+ if (verbose)
+ dataLog(" Finding a register to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInGPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
+ if (cachedRecovery.recovery().isInFPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
+#if USE(JSVALUE32_64)
+ if (cachedRecovery.recovery().technique() == InPair) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+ && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
+ }
+#endif
+ return false;
+ });
+ }
+
+ void ensureTempGPR()
+ {
+ if (getFreeTempGPR() != InvalidGPRReg)
+ return;
+
+ if (verbose)
+ dataLog(" Finding a temp GPR to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInGPR()) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().gpr())
+ && !m_newRegisters[cachedRecovery.recovery().gpr()];
+ }
+#if USE(JSVALUE32_64)
+ if (cachedRecovery.recovery().technique() == InPair) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+ && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR())
+ && !m_newRegisters[cachedRecovery.recovery().tagGPR()]
+ && !m_newRegisters[cachedRecovery.recovery().payloadGPR()];
+ }
+#endif
+ return false;
+ });
+ }
+
+ void ensureGPR()
+ {
+ if (getFreeGPR() != InvalidGPRReg)
+ return;
+
+ if (verbose)
+ dataLog(" Finding a GPR to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInGPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
+#if USE(JSVALUE32_64)
+ if (cachedRecovery.recovery().technique() == InPair) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+ && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
+ }
+#endif
+ return false;
+ });
+ }
+
+ void ensureFPR()
+ {
+ if (getFreeFPR() != InvalidFPRReg)
+ return;
+
+ if (verbose)
+ dataLog(" Finding an FPR to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInFPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
+ return false;
+ });
+ }
+
+ CachedRecovery* getNew(JSValueRegs jsValueRegs) const
+ {
+#if USE(JSVALUE64)
+ return m_newRegisters[jsValueRegs.gpr()];
+#else
+ ASSERT(
+ jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg
+ || m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]);
+ if (jsValueRegs.payloadGPR() == InvalidGPRReg)
+ return m_newRegisters[jsValueRegs.tagGPR()];
+ return m_newRegisters[jsValueRegs.payloadGPR()];
+#endif
+ }
+
+ void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery)
+ {
+ ASSERT(jsValueRegs && !getNew(jsValueRegs));
+ CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+#if USE(JSVALUE64)
+ if (cachedRecovery->wantedJSValueRegs())
+ m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr;
+ m_newRegisters[jsValueRegs.gpr()] = cachedRecovery;
+#else
+ if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) {
+ if (oldRegs.payloadGPR())
+ m_newRegisters[oldRegs.payloadGPR()] = nullptr;
+ if (oldRegs.tagGPR())
+ m_newRegisters[oldRegs.tagGPR()] = nullptr;
+ }
+ if (jsValueRegs.payloadGPR() != InvalidGPRReg)
+ m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery;
+ if (jsValueRegs.tagGPR() != InvalidGPRReg)
+ m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery;
+#endif
+ ASSERT(!cachedRecovery->wantedJSValueRegs());
+ cachedRecovery->setWantedJSValueRegs(jsValueRegs);
+ }
+
+ void addNew(FPRReg fpr, ValueRecovery recovery)
+ {
+ ASSERT(fpr != InvalidFPRReg && !m_newRegisters[fpr]);
+ CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+ m_newRegisters[fpr] = cachedRecovery;
+ ASSERT(cachedRecovery->wantedFPR() == InvalidFPRReg);
+ cachedRecovery->setWantedFPR(fpr);
+ }
+
+ // m_oldFrameBase is the register relative to which we access
+ // slots in the old call frame, with an additional offset of
+ // m_oldFrameOffset.
+ //
+ // - For an actual tail call, m_oldFrameBase is the stack
+ // pointer, and m_oldFrameOffset is the number of locals of the
+ // tail caller's frame. We use such stack pointer-based
+ // addressing because it allows us to load the tail caller's
+ // caller's frame pointer in the frame pointer register
+ // immediately instead of awkwardly keeping it around on the
+ // stack.
+ //
+ // - For a slow path call, m_oldFrameBase is just the frame
+ // pointer, and m_oldFrameOffset is 0.
+ GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister };
+ int m_oldFrameOffset { 0 };
+
+ MacroAssembler::Address addressForOld(VirtualRegister reg) const
+ {
+ return MacroAssembler::Address(m_oldFrameBase,
+ (m_oldFrameOffset + reg.offset()) * sizeof(Register));
+ }
+
+ // m_newFrameBase is the register relative to which we access
+ // slots in the new call frame, and we always make it point to
+ // wherever the stack pointer will be right before making the
+ // actual call/jump. The actual base of the new frame is at offset
+ // m_newFrameOffset relative to m_newFrameBase.
+ //
+ // - For an actual tail call, m_newFrameBase is computed
+ // dynamically, and m_newFrameOffset varies between 0 and -2
+ // depending on the architecture's calling convention (see
+ // prepareForTailCall).
+ //
+ // - For a slow path call, m_newFrameBase is the actual stack
+ // pointer, and m_newFrameOffset is - CallerFrameAndPCSize,
+ // following the convention for a regular call.
+ GPRReg m_newFrameBase { InvalidGPRReg };
+ int m_newFrameOffset { 0};
+
+ bool isUndecided() const
+ {
+ return m_newFrameBase == InvalidGPRReg;
+ }
+
+ bool isSlowPath() const
+ {
+ return m_newFrameBase == MacroAssembler::stackPointerRegister;
+ }
+
+ MacroAssembler::Address addressForNew(VirtualRegister reg) const
+ {
+ return MacroAssembler::Address(m_newFrameBase,
+ (m_newFrameOffset + reg.offset()) * sizeof(Register));
+ }
+
+ // We use a concept of "danger zone". The danger zone consists of
+ // all the writes in the new frame that could overlap with reads
+ // in the old frame.
+ //
+ // Because we could have a higher actual number of arguments than
+ // parameters, when preparing a tail call, we need to assume that
+ // writing to a slot on the new frame could overlap not only with
+ // the corresponding slot in the old frame, but also with any slot
+ // above it. Thus, the danger zone consists of all writes between
+ // the first write and what I call the "danger frontier": the
+ // highest slot in the old frame we still care about. Thus, the
+ // danger zone contains all the slots between the first slot of
+ // the new frame and the danger frontier. Because the danger
+ // frontier is related to the new frame, it is stored as a virtual
+ // register *in the new frame*.
+ VirtualRegister m_dangerFrontier;
+
+ VirtualRegister dangerFrontier() const
+ {
+ ASSERT(!isUndecided());
+
+ return m_dangerFrontier;
+ }
+
+ bool isDangerNew(VirtualRegister reg) const
+ {
+ ASSERT(!isUndecided() && isValidNew(reg));
+ return reg <= dangerFrontier();
+ }
+
+ void updateDangerFrontier()
+ {
+ ASSERT(!isUndecided());
+
+ m_dangerFrontier = firstNew() - 1;
+ for (VirtualRegister reg = lastNew(); reg >= firstNew(); reg -= 1) {
+ if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg)))
+ continue;
+
+ m_dangerFrontier = reg;
+ if (verbose)
+ dataLog(" Danger frontier now at NEW ", m_dangerFrontier, "\n");
+ break;
+ }
+ if (verbose)
+ dataLog(" All clear! Danger zone is empty.\n");
+ }
+
+ // A safe write is a write that never writes into the danger zone.
+ bool hasOnlySafeWrites(CachedRecovery& cachedRecovery) const
+ {
+ for (VirtualRegister target : cachedRecovery.targets()) {
+ if (isDangerNew(target))
+ return false;
+ }
+ return true;
+ }
+
+ // You must ensure that there is no dangerous writes before
+ // calling this function.
+ bool tryWrites(CachedRecovery&);
+
+ // This function tries to ensure that there is no longer any
+ // possible safe write, i.e. all remaining writes are either to
+ // the danger zone or callee save restorations.
+ //
+ // It returns false if it was unable to perform some safe writes
+ // due to high register pressure.
+ bool performSafeWrites();
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // CallFrameShuffler_h
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp
new file mode 100644
index 000000000..5dfe96e81
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffler.h"
+
+#if ENABLE(JIT) && USE(JSVALUE32_64)
+
+#include "CCallHelpers.h"
+#include "DataFormat.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+DataFormat CallFrameShuffler::emitStore(CachedRecovery& location, MacroAssembler::Address address)
+{
+ ASSERT(!location.recovery().isInJSStack());
+
+ switch (location.recovery().technique()) {
+ case UnboxedInt32InGPR:
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
+ address.withOffset(TagOffset));
+ m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatInt32;
+ case UnboxedCellInGPR:
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag),
+ address.withOffset(TagOffset));
+ m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatCell;
+ case Constant:
+ m_jit.storeTrustedValue(location.recovery().constant(), address);
+ return DataFormatJS;
+ case InPair:
+ m_jit.storeValue(location.recovery().jsValueRegs(), address);
+ return DataFormatJS;
+ case UnboxedBooleanInGPR:
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
+ address.withOffset(TagOffset));
+ m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatBoolean;
+ case InFPR:
+ case UnboxedDoubleInFPR:
+ m_jit.storeDouble(location.recovery().fpr(), address);
+ return DataFormatJS;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+void CallFrameShuffler::emitBox(CachedRecovery& location)
+{
+ // Nothing to do, we're good! JSValues and doubles can be stored
+ // immediately, and other formats don't need any transformation -
+ // just storing a constant tag separately.
+ ASSERT_UNUSED(location, canBox(location));
+}
+
+void CallFrameShuffler::emitLoad(CachedRecovery& location)
+{
+ if (!location.recovery().isInJSStack())
+ return;
+
+ if (verbose)
+ dataLog(" * Loading ", location.recovery(), " into ");
+ VirtualRegister reg { location.recovery().virtualRegister() };
+ MacroAssembler::Address address { addressForOld(reg) };
+
+ bool tryFPR { true };
+ JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
+ if (wantedJSValueRegs) {
+ if (wantedJSValueRegs.payloadGPR() != InvalidGPRReg
+ && !m_registers[wantedJSValueRegs.payloadGPR()]
+ && !m_lockedRegisters.get(wantedJSValueRegs.payloadGPR()))
+ tryFPR = false;
+ if (wantedJSValueRegs.tagGPR() != InvalidGPRReg
+ && !m_registers[wantedJSValueRegs.tagGPR()]
+ && !m_lockedRegisters.get(wantedJSValueRegs.tagGPR()))
+ tryFPR = false;
+ }
+
+ if (tryFPR && location.loadsIntoFPR()) {
+ FPRReg resultFPR = location.wantedFPR();
+ if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR))
+ resultFPR = getFreeFPR();
+ if (resultFPR != InvalidFPRReg) {
+ m_jit.loadDouble(address, resultFPR);
+ DataFormat dataFormat = DataFormatJS;
+ if (location.recovery().dataFormat() == DataFormatDouble)
+ dataFormat = DataFormatDouble;
+ updateRecovery(location,
+ ValueRecovery::inFPR(resultFPR, dataFormat));
+ if (verbose)
+ dataLog(location.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+ return;
+ }
+ }
+
+ if (location.loadsIntoGPR()) {
+ GPRReg resultGPR { wantedJSValueRegs.payloadGPR() };
+ if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
+ resultGPR = getFreeGPR();
+ ASSERT(resultGPR != InvalidGPRReg);
+ m_jit.loadPtr(address.withOffset(PayloadOffset), resultGPR);
+ updateRecovery(location,
+ ValueRecovery::inGPR(resultGPR, location.recovery().dataFormat()));
+ if (verbose)
+ dataLog(location.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+ return;
+ }
+
+ ASSERT(location.recovery().technique() == DisplacedInJSStack);
+ GPRReg payloadGPR { wantedJSValueRegs.payloadGPR() };
+ GPRReg tagGPR { wantedJSValueRegs.tagGPR() };
+ if (payloadGPR == InvalidGPRReg || m_registers[payloadGPR] || m_lockedRegisters.get(payloadGPR))
+ payloadGPR = getFreeGPR();
+ m_lockedRegisters.set(payloadGPR);
+ if (tagGPR == InvalidGPRReg || m_registers[tagGPR] || m_lockedRegisters.get(tagGPR))
+ tagGPR = getFreeGPR();
+ m_lockedRegisters.clear(payloadGPR);
+ ASSERT(payloadGPR != InvalidGPRReg && tagGPR != InvalidGPRReg && tagGPR != payloadGPR);
+ m_jit.loadPtr(address.withOffset(PayloadOffset), payloadGPR);
+ m_jit.loadPtr(address.withOffset(TagOffset), tagGPR);
+ updateRecovery(location,
+ ValueRecovery::inPair(tagGPR, payloadGPR));
+ if (verbose)
+ dataLog(location.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+}
+
+bool CallFrameShuffler::canLoad(CachedRecovery& location)
+{
+ if (!location.recovery().isInJSStack())
+ return true;
+
+ if (location.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg)
+ return true;
+
+ if (location.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg)
+ return true;
+
+ if (location.recovery().technique() == DisplacedInJSStack) {
+ GPRReg payloadGPR { getFreeGPR() };
+ if (payloadGPR == InvalidGPRReg)
+ return false;
+ m_lockedRegisters.set(payloadGPR);
+ GPRReg tagGPR { getFreeGPR() };
+ m_lockedRegisters.clear(payloadGPR);
+ return tagGPR != InvalidGPRReg;
+ }
+
+ return false;
+}
+
+void CallFrameShuffler::emitDisplace(CachedRecovery& location)
+{
+ ASSERT(location.recovery().isInRegisters());
+ JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
+ ASSERT(wantedJSValueRegs); // We don't support wanted FPRs on 32bit platforms
+
+ GPRReg wantedTagGPR { wantedJSValueRegs.tagGPR() };
+ GPRReg wantedPayloadGPR { wantedJSValueRegs.payloadGPR() };
+
+ if (wantedTagGPR != InvalidGPRReg) {
+ ASSERT(!m_lockedRegisters.get(wantedTagGPR));
+ if (CachedRecovery* currentTag { m_registers[wantedTagGPR] }) {
+ if (currentTag == &location) {
+ if (verbose)
+ dataLog(" + ", wantedTagGPR, " is OK\n");
+ } else {
+ // This can never happen on 32bit platforms since we
+ // have at most one wanted JSValueRegs, for the
+ // callee, and no callee-save registers.
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+ }
+
+ if (wantedPayloadGPR != InvalidGPRReg) {
+ ASSERT(!m_lockedRegisters.get(wantedPayloadGPR));
+ if (CachedRecovery* currentPayload { m_registers[wantedPayloadGPR] }) {
+ if (currentPayload == &location) {
+ if (verbose)
+ dataLog(" + ", wantedPayloadGPR, " is OK\n");
+ } else {
+ // See above
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+ }
+
+ if (location.recovery().technique() == InPair
+ || location.recovery().isInGPR()) {
+ GPRReg payloadGPR;
+ if (location.recovery().technique() == InPair)
+ payloadGPR = location.recovery().payloadGPR();
+ else
+ payloadGPR = location.recovery().gpr();
+
+ if (wantedPayloadGPR == InvalidGPRReg)
+ wantedPayloadGPR = payloadGPR;
+
+ if (payloadGPR != wantedPayloadGPR) {
+ if (location.recovery().technique() == InPair
+ && wantedPayloadGPR == location.recovery().tagGPR()) {
+ if (verbose)
+ dataLog(" * Swapping ", payloadGPR, " and ", wantedPayloadGPR, "\n");
+ m_jit.swap(payloadGPR, wantedPayloadGPR);
+ updateRecovery(location,
+ ValueRecovery::inPair(payloadGPR, wantedPayloadGPR));
+ } else {
+ if (verbose)
+ dataLog(" * Moving ", payloadGPR, " into ", wantedPayloadGPR, "\n");
+ m_jit.move(payloadGPR, wantedPayloadGPR);
+ if (location.recovery().technique() == InPair) {
+ updateRecovery(location,
+ ValueRecovery::inPair(location.recovery().tagGPR(),
+ wantedPayloadGPR));
+ } else {
+ updateRecovery(location,
+ ValueRecovery::inGPR(wantedPayloadGPR, location.recovery().dataFormat()));
+ }
+ }
+ }
+
+ if (wantedTagGPR == InvalidGPRReg)
+ wantedTagGPR = getFreeGPR();
+ switch (location.recovery().dataFormat()) {
+ case DataFormatInt32:
+ if (verbose)
+ dataLog(" * Moving int32 tag into ", wantedTagGPR, "\n");
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
+ wantedTagGPR);
+ break;
+ case DataFormatCell:
+ if (verbose)
+ dataLog(" * Moving cell tag into ", wantedTagGPR, "\n");
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag),
+ wantedTagGPR);
+ break;
+ case DataFormatBoolean:
+ if (verbose)
+ dataLog(" * Moving boolean tag into ", wantedTagGPR, "\n");
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
+ wantedTagGPR);
+ break;
+ case DataFormatJS:
+ ASSERT(wantedTagGPR != location.recovery().payloadGPR());
+ if (wantedTagGPR != location.recovery().tagGPR()) {
+ if (verbose)
+ dataLog(" * Moving ", location.recovery().tagGPR(), " into ", wantedTagGPR, "\n");
+ m_jit.move(location.recovery().tagGPR(), wantedTagGPR);
+ }
+ break;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ } else {
+ ASSERT(location.recovery().isInFPR());
+ if (wantedTagGPR == InvalidGPRReg) {
+ ASSERT(wantedPayloadGPR != InvalidGPRReg);
+ m_lockedRegisters.set(wantedPayloadGPR);
+ wantedTagGPR = getFreeGPR();
+ m_lockedRegisters.clear(wantedPayloadGPR);
+ }
+ if (wantedPayloadGPR == InvalidGPRReg) {
+ m_lockedRegisters.set(wantedTagGPR);
+ wantedPayloadGPR = getFreeGPR();
+ m_lockedRegisters.clear(wantedTagGPR);
+ }
+ m_jit.boxDouble(location.recovery().fpr(), wantedTagGPR, wantedPayloadGPR);
+ }
+ updateRecovery(location, ValueRecovery::inPair(wantedTagGPR, wantedPayloadGPR));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT) && USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp
new file mode 100644
index 000000000..2ef6ed111
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp
@@ -0,0 +1,369 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffler.h"
+
+#if ENABLE(JIT) && USE(JSVALUE64)
+
+#include "CCallHelpers.h"
+#include "DataFormat.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+DataFormat CallFrameShuffler::emitStore(
+ CachedRecovery& cachedRecovery, MacroAssembler::Address address)
+{
+ ASSERT(!cachedRecovery.recovery().isInJSStack());
+
+ switch (cachedRecovery.recovery().technique()) {
+ case InGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatJS;
+ case UnboxedInt32InGPR:
+ m_jit.store32(cachedRecovery.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatInt32;
+ case UnboxedInt52InGPR:
+ m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
+ cachedRecovery.recovery().gpr());
+ FALLTHROUGH;
+ case UnboxedStrictInt52InGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatStrictInt52;
+ case UnboxedBooleanInGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatBoolean;
+ case UnboxedCellInGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatCell;
+ case UnboxedDoubleInFPR:
+ m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
+ return DataFormatDouble;
+ case InFPR:
+ m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
+ return DataFormatJS;
+ case Constant:
+ m_jit.storeTrustedValue(cachedRecovery.recovery().constant(), address);
+ return DataFormatJS;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+void CallFrameShuffler::emitBox(CachedRecovery& cachedRecovery)
+{
+ ASSERT(canBox(cachedRecovery));
+ if (cachedRecovery.recovery().isConstant())
+ return;
+
+ if (cachedRecovery.recovery().isInGPR()) {
+ switch (cachedRecovery.recovery().dataFormat()) {
+ case DataFormatInt32:
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ m_jit.zeroExtend32ToPtr(
+ cachedRecovery.recovery().gpr(),
+ cachedRecovery.recovery().gpr());
+ m_lockedRegisters.set(cachedRecovery.recovery().gpr());
+ if (tryAcquireTagTypeNumber())
+ m_jit.or64(m_tagTypeNumber, cachedRecovery.recovery().gpr());
+ else {
+ // We have to do this the hard way
+ m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber),
+ cachedRecovery.recovery().gpr());
+ }
+ m_lockedRegisters.clear(cachedRecovery.recovery().gpr());
+ cachedRecovery.setRecovery(
+ ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ return;
+ case DataFormatInt52:
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
+ cachedRecovery.recovery().gpr());
+ cachedRecovery.setRecovery(
+ ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatStrictInt52));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ FALLTHROUGH;
+ case DataFormatStrictInt52: {
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ FPRReg resultFPR = getFreeFPR();
+ ASSERT(resultFPR != InvalidFPRReg);
+ m_jit.convertInt64ToDouble(cachedRecovery.recovery().gpr(), resultFPR);
+ updateRecovery(cachedRecovery, ValueRecovery::inFPR(resultFPR, DataFormatDouble));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ break;
+ }
+ case DataFormatBoolean:
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ m_jit.add32(MacroAssembler::TrustedImm32(ValueFalse),
+ cachedRecovery.recovery().gpr());
+ cachedRecovery.setRecovery(
+ ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ return;
+ default:
+ return;
+ }
+ }
+
+ if (cachedRecovery.recovery().isInFPR()) {
+ if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ GPRReg resultGPR = cachedRecovery.wantedJSValueRegs().gpr();
+ if (resultGPR == InvalidGPRReg || m_registers[resultGPR])
+ resultGPR = getFreeGPR();
+ ASSERT(resultGPR != InvalidGPRReg);
+ m_jit.purifyNaN(cachedRecovery.recovery().fpr());
+ m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), resultGPR);
+ m_lockedRegisters.set(resultGPR);
+ if (tryAcquireTagTypeNumber())
+ m_jit.sub64(m_tagTypeNumber, resultGPR);
+ else
+ m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR);
+ m_lockedRegisters.clear(resultGPR);
+ updateRecovery(cachedRecovery, ValueRecovery::inGPR(resultGPR, DataFormatJS));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ return;
+ }
+ ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void CallFrameShuffler::emitLoad(CachedRecovery& cachedRecovery)
+{
+ if (!cachedRecovery.recovery().isInJSStack())
+ return;
+
+ if (verbose)
+ dataLog(" * Loading ", cachedRecovery.recovery(), " into ");
+
+ VirtualRegister reg = cachedRecovery.recovery().virtualRegister();
+ MacroAssembler::Address address { addressForOld(reg) };
+ bool tryFPR { true };
+ GPRReg resultGPR { cachedRecovery.wantedJSValueRegs().gpr() };
+
+ // If we want a GPR and it's available, that's better than loading
+ // into an FPR.
+ if (resultGPR != InvalidGPRReg && !m_registers[resultGPR]
+ && !m_lockedRegisters.get(resultGPR) && cachedRecovery.loadsIntoGPR())
+ tryFPR = false;
+
+ // Otherwise, we prefer loading into FPRs if possible
+ if (tryFPR && cachedRecovery.loadsIntoFPR()) {
+ FPRReg resultFPR { cachedRecovery.wantedFPR() };
+ if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR))
+ resultFPR = getFreeFPR();
+ if (resultFPR != InvalidFPRReg) {
+ m_jit.loadDouble(address, resultFPR);
+ DataFormat dataFormat = DataFormatJS;
+ // We could be transforming a DataFormatCell into a
+ // DataFormatJS here - but that's OK.
+ if (cachedRecovery.recovery().dataFormat() == DataFormatDouble)
+ dataFormat = DataFormatDouble;
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inFPR(resultFPR, dataFormat));
+ if (verbose)
+ dataLog(cachedRecovery.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+ return;
+ }
+ }
+
+ ASSERT(cachedRecovery.loadsIntoGPR());
+ if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
+ resultGPR = getFreeGPR();
+ ASSERT(resultGPR != InvalidGPRReg);
+ m_jit.loadPtr(address, resultGPR);
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inGPR(resultGPR, cachedRecovery.recovery().dataFormat()));
+ if (verbose)
+ dataLog(cachedRecovery.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+}
+
+bool CallFrameShuffler::canLoad(CachedRecovery& cachedRecovery)
+{
+ if (!cachedRecovery.recovery().isInJSStack())
+ return true;
+
+ ASSERT(cachedRecovery.loadsIntoFPR() || cachedRecovery.loadsIntoGPR());
+
+ if (cachedRecovery.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg)
+ return true;
+
+ if (cachedRecovery.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg)
+ return true;
+
+ return false;
+}
+
+void CallFrameShuffler::emitDisplace(CachedRecovery& cachedRecovery)
+{
+ Reg wantedReg;
+ if (!(wantedReg = Reg { cachedRecovery.wantedJSValueRegs().gpr() }))
+ wantedReg = Reg { cachedRecovery.wantedFPR() };
+ ASSERT(wantedReg);
+ ASSERT(!m_lockedRegisters.get(wantedReg));
+
+ if (CachedRecovery* current = m_registers[wantedReg]) {
+ if (current == &cachedRecovery) {
+ if (verbose)
+ dataLog(" + ", wantedReg, " is OK\n");
+ return;
+ }
+ // We could do a more complex thing by finding cycles
+ // etc. in that case.
+ // However, ending up in this situation will be super
+ // rare, and should actually be outright impossible for
+ // non-FTL tiers, since:
+ // (a) All doubles have been converted into JSValues with
+ // ValueRep nodes, so FPRs are initially free
+ //
+ // (b) The only recoveries with wanted registers are the
+ // callee (which always starts out in a register) and
+ // the callee-save registers
+ //
+ // (c) The callee-save registers are the first things we
+ // load (after the return PC), and they are loaded as JSValues
+ //
+ // (d) We prefer loading JSValues into FPRs if their
+ // wanted GPR is not available
+ //
+ // (e) If we end up spilling some registers with a
+ // target, we won't load them again before the very
+ // end of the algorithm
+ //
+ // Combined, this means that we will never load a recovery
+ // with a wanted GPR into any GPR other than its wanted
+ // GPR. The callee could however have been initially in
+ // one of the callee-save registers - but since the wanted
+ // GPR for the callee is always regT0, it will be the
+ // first one to be displaced, and we won't see it when
+ // handling any of the callee-save registers.
+ //
+ // Thus, the only way we could ever reach this path is in
+ // the FTL, when there is so much pressure that we
+ // absolutely need to load the callee-save registers into
+ // different GPRs initially but not enough pressure to
+ // then have to spill all of them. And even in that case,
+ // depending on the order in which B3 saves the
+ // callee-saves, we will probably still be safe. Anyway,
+ // the couple extra move instructions compared to an
+ // efficient cycle-based algorithm are not going to hurt
+ // us.
+ if (wantedReg.isFPR()) {
+ FPRReg tempFPR = getFreeFPR();
+ if (verbose)
+ dataLog(" * Moving ", wantedReg, " into ", tempFPR, "\n");
+ m_jit.moveDouble(wantedReg.fpr(), tempFPR);
+ updateRecovery(*current,
+ ValueRecovery::inFPR(tempFPR, current->recovery().dataFormat()));
+ } else {
+ GPRReg tempGPR = getFreeGPR();
+ if (verbose)
+ dataLog(" * Moving ", wantedReg.gpr(), " into ", tempGPR, "\n");
+ m_jit.move(wantedReg.gpr(), tempGPR);
+ updateRecovery(*current,
+ ValueRecovery::inGPR(tempGPR, current->recovery().dataFormat()));
+ }
+ }
+ ASSERT(!m_registers[wantedReg]);
+
+ if (cachedRecovery.recovery().isConstant()) {
+ // We only care about callee saves for wanted FPRs, and those are never constants
+ ASSERT(wantedReg.isGPR());
+ if (verbose)
+ dataLog(" * Loading ", cachedRecovery.recovery().constant(), " into ", wantedReg, "\n");
+ m_jit.moveTrustedValue(cachedRecovery.recovery().constant(), JSValueRegs { wantedReg.gpr() });
+ updateRecovery(
+ cachedRecovery,
+ ValueRecovery::inRegister(wantedReg, DataFormatJS));
+ } else if (cachedRecovery.recovery().isInGPR()) {
+ if (verbose)
+ dataLog(" * Moving ", cachedRecovery.recovery(), " into ", wantedReg, "\n");
+ if (wantedReg.isGPR())
+ m_jit.move(cachedRecovery.recovery().gpr(), wantedReg.gpr());
+ else
+ m_jit.move64ToDouble(cachedRecovery.recovery().gpr(), wantedReg.fpr());
+ RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inRegister(wantedReg, DataFormatJS));
+ } else {
+ ASSERT(cachedRecovery.recovery().isInFPR());
+ if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
+ // We only care about callee saves for wanted FPRs, and those are always DataFormatJS
+ ASSERT(wantedReg.isGPR());
+ // This will automatically pick the wanted GPR
+ emitBox(cachedRecovery);
+ } else {
+ if (verbose)
+ dataLog(" * Moving ", cachedRecovery.recovery().fpr(), " into ", wantedReg, "\n");
+ if (wantedReg.isGPR())
+ m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), wantedReg.gpr());
+ else
+ m_jit.moveDouble(cachedRecovery.recovery().fpr(), wantedReg.fpr());
+ RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inRegister(wantedReg, DataFormatJS));
+ }
+ }
+
+ ASSERT(m_registers[wantedReg] == &cachedRecovery);
+}
+
+bool CallFrameShuffler::tryAcquireTagTypeNumber()
+{
+ if (m_tagTypeNumber != InvalidGPRReg)
+ return true;
+
+ m_tagTypeNumber = getFreeGPR();
+
+ if (m_tagTypeNumber == InvalidGPRReg)
+ return false;
+
+ m_lockedRegisters.set(m_tagTypeNumber);
+ m_jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), m_tagTypeNumber);
+ return true;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT) && USE(JSVALUE64)
diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
index 45ab175ec..d5eaa4072 100644
--- a/Source/JavaScriptCore/jit/CompactJITCodeMap.h
+++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -30,10 +30,8 @@
#define CompactJITCodeMap_h
#include <wtf/Assertions.h>
-#include <wtf/FastAllocBase.h>
#include <wtf/FastMalloc.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
+#include <wtf/FastMalloc.h>
#include <wtf/Vector.h>
namespace JSC {
@@ -47,7 +45,7 @@ namespace JSC {
// CompactJITCodeMap::Encoder encoder(map);
// encoder.append(a, b);
// encoder.append(c, d); // preconditions: c >= a, d >= b
-// OwnPtr<CompactJITCodeMap> map = encoder.finish();
+// auto map = encoder.finish();
//
// At some later time:
//
@@ -80,6 +78,16 @@ struct BytecodeAndMachineOffset {
class CompactJITCodeMap {
WTF_MAKE_FAST_ALLOCATED;
public:
+ CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries)
+ : m_buffer(buffer)
+#if !ASSERT_DISABLED
+ , m_size(size)
+#endif
+ , m_numberOfEntries(numberOfEntries)
+ {
+ UNUSED_PARAM(size);
+ }
+
~CompactJITCodeMap()
{
if (m_buffer)
@@ -94,16 +102,6 @@ public:
void decode(Vector<BytecodeAndMachineOffset>& result) const;
private:
- CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries)
- : m_buffer(buffer)
-#if !ASSERT_DISABLED
- , m_size(size)
-#endif
- , m_numberOfEntries(numberOfEntries)
- {
- UNUSED_PARAM(size);
- }
-
uint8_t at(unsigned index) const
{
ASSERT(index < m_size);
@@ -138,8 +136,8 @@ public:
void ensureCapacityFor(unsigned numberOfEntriesToAdd);
void append(unsigned bytecodeIndex, unsigned machineCodeOffset);
- PassOwnPtr<CompactJITCodeMap> finish();
-
+ std::unique_ptr<CompactJITCodeMap> finish();
+
private:
void appendByte(uint8_t value);
void encodeNumber(uint32_t value);
@@ -212,18 +210,18 @@ inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned
m_numberOfEntries++;
}
-inline PassOwnPtr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish()
+inline std::unique_ptr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish()
{
m_capacity = m_size;
m_buffer = static_cast<uint8_t*>(fastRealloc(m_buffer, m_capacity));
- OwnPtr<CompactJITCodeMap> result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries));
+ auto result = std::make_unique<CompactJITCodeMap>(m_buffer, m_size, m_numberOfEntries);
m_buffer = 0;
m_size = 0;
m_capacity = 0;
m_numberOfEntries = 0;
m_previousBytecodeIndex = 0;
m_previousMachineCodeOffset = 0;
- return result.release();
+ return result;
}
inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp
new file mode 100644
index 000000000..b4f56650b
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ExecutableAllocationFuzz.h"
+
+#include "TestRunnerUtils.h"
+#include <wtf/Atomics.h>
+#include <wtf/DataLog.h>
+
+namespace JSC {
+
+static Atomic<unsigned> s_numberOfExecutableAllocationFuzzChecks;
+unsigned numberOfExecutableAllocationFuzzChecks()
+{
+ return s_numberOfExecutableAllocationFuzzChecks.load();
+}
+
+ExecutableAllocationFuzzResult doExecutableAllocationFuzzing()
+{
+ ASSERT(Options::useExecutableAllocationFuzz());
+
+ unsigned oldValue;
+ unsigned newValue;
+ do {
+ oldValue = s_numberOfExecutableAllocationFuzzChecks.load();
+ newValue = oldValue + 1;
+ } while (!s_numberOfExecutableAllocationFuzzChecks.compareExchangeWeak(oldValue, newValue));
+
+ if (newValue == Options::fireExecutableAllocationFuzzAt()) {
+ if (Options::verboseExecutableAllocationFuzz()) {
+ dataLog("Will pretend to fail executable allocation.\n");
+ WTFReportBacktrace();
+ }
+ return PretendToFailExecutableAllocation;
+ }
+
+ if (Options::fireExecutableAllocationFuzzAtOrAfter()
+ && newValue >= Options::fireExecutableAllocationFuzzAtOrAfter()) {
+ if (Options::verboseExecutableAllocationFuzz()) {
+ dataLog("Will pretend to fail executable allocation.\n");
+ WTFReportBacktrace();
+ }
+ return PretendToFailExecutableAllocation;
+ }
+
+ return AllowNormalExecutableAllocation;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h
new file mode 100644
index 000000000..b15cdef44
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ExecutableAllocationFuzz_h
+#define ExecutableAllocationFuzz_h
+
+#include "Options.h"
+
+namespace JSC {
+
+enum ExecutableAllocationFuzzResult {
+ AllowNormalExecutableAllocation,
+ PretendToFailExecutableAllocation
+};
+
+ExecutableAllocationFuzzResult doExecutableAllocationFuzzing();
+
+inline ExecutableAllocationFuzzResult doExecutableAllocationFuzzingIfEnabled()
+{
+ if (LIKELY(!Options::useExecutableAllocationFuzz()))
+ return AllowNormalExecutableAllocation;
+
+ return doExecutableAllocationFuzzing();
+}
+
+} // namespace JSC
+
+#endif // ExecutableAllocationFuzz_h
+
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index 5ac6cc412..4ede23531 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -24,18 +24,17 @@
*/
#include "config.h"
-
#include "ExecutableAllocator.h"
+#include "JSCInlines.h"
+
#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
#include "CodeProfiling.h"
#include <wtf/HashSet.h>
+#include <wtf/Lock.h>
#include <wtf/MetaAllocator.h>
+#include <wtf/NeverDestroyed.h>
#include <wtf/PageReservation.h>
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#include <wtf/PassOwnPtr.h>
-#endif
-#include <wtf/ThreadingPrimitives.h>
#include <wtf/VMTags.h>
#endif
@@ -57,7 +56,7 @@ public:
DemandExecutableAllocator()
: MetaAllocator(jitAllocationGranule)
{
- MutexLocker lock(allocatorsMutex());
+ std::lock_guard<StaticLock> lock(allocatorsMutex());
allocators().add(this);
// Don't preallocate any memory here.
}
@@ -65,7 +64,7 @@ public:
virtual ~DemandExecutableAllocator()
{
{
- MutexLocker lock(allocatorsMutex());
+ std::lock_guard<StaticLock> lock(allocatorsMutex());
allocators().remove(this);
}
for (unsigned i = 0; i < reservations.size(); ++i)
@@ -75,7 +74,7 @@ public:
static size_t bytesAllocatedByAllAllocators()
{
size_t total = 0;
- MutexLocker lock(allocatorsMutex());
+ std::lock_guard<StaticLock> lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
total += (*allocator)->bytesAllocated();
return total;
@@ -84,7 +83,7 @@ public:
static size_t bytesCommittedByAllocactors()
{
size_t total = 0;
- MutexLocker lock(allocatorsMutex());
+ std::lock_guard<StaticLock> lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
total += (*allocator)->bytesCommitted();
return total;
@@ -93,7 +92,7 @@ public:
#if ENABLE(META_ALLOCATOR_PROFILE)
static void dumpProfileFromAllAllocators()
{
- MutexLocker lock(allocatorsMutex());
+ std::lock_guard<StaticLock> lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
(*allocator)->dumpProfile();
}
@@ -135,12 +134,14 @@ private:
Vector<PageReservation, 16> reservations;
static HashSet<DemandExecutableAllocator*>& allocators()
{
- DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ());
- return sAllocators;
+ static NeverDestroyed<HashSet<DemandExecutableAllocator*>> set;
+ return set;
}
- static Mutex& allocatorsMutex()
+
+ static StaticLock& allocatorsMutex()
{
- DEFINE_STATIC_LOCAL(Mutex, mutex, ());
+ static StaticLock mutex;
+
return mutex;
}
};
@@ -169,7 +170,7 @@ void ExecutableAllocator::initializeAllocator()
ExecutableAllocator::ExecutableAllocator(VM&)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- : m_allocator(adoptPtr(new DemandExecutableAllocator()))
+ : m_allocator(std::make_unique<DemandExecutableAllocator>())
#endif
{
ASSERT(allocator());
@@ -212,11 +213,11 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
RELEASE_ASSERT(result || effort != JITCompilationMustSucceed);
- return result.release();
+ return result;
}
size_t ExecutableAllocator::committedByteCount()
@@ -231,6 +232,16 @@ void ExecutableAllocator::dumpProfile()
}
#endif
+Lock& ExecutableAllocator::getLock() const
+{
+ return gAllocator->getLock();
+}
+
+bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
+{
+ return gAllocator->isInAllocatedMemory(locker, address);
+}
+
#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
index 42e1f9594..09b768bed 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -29,10 +29,10 @@
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include <wtf/Assertions.h>
+#include <wtf/Lock.h>
#include <wtf/MetaAllocatorHandle.h>
#include <wtf/MetaAllocator.h>
#include <wtf/PageAllocation.h>
-#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/Vector.h>
@@ -40,7 +40,7 @@
#include <libkern/OSCacheControl.h>
#endif
-#if OS(IOS) || OS(QNX)
+#if OS(IOS)
#include <sys/mman.h>
#endif
@@ -55,44 +55,16 @@
#include <unistd.h>
#endif
-#if OS(WINCE)
-// From pkfuncs.h (private header file from the Platform Builder)
-#define CACHE_SYNC_ALL 0x07F
-extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
-#endif
-
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4)
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
-#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
-#define EXECUTABLE_POOL_WRITABLE false
-#else
#define EXECUTABLE_POOL_WRITABLE true
-#endif
namespace JSC {
class VM;
-void releaseExecutableMemory(VM&);
static const unsigned jitAllocationGranule = 32;
-inline size_t roundUpAllocationSize(size_t request, size_t granularity)
-{
- RELEASE_ASSERT((std::numeric_limits<size_t>::max() - granularity) > request);
-
- // Round up to next page boundary
- size_t size = request + (granularity - 1);
- size = size & ~(granularity - 1);
- ASSERT(size >= request);
- return size;
-}
-
-}
-
-namespace JSC {
-
typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
#if ENABLE(ASSEMBLER)
@@ -102,13 +74,20 @@ class DemandExecutableAllocator;
#endif
#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
-#if CPU(ARM) || CPU(MIPS)
+#if CPU(ARM)
static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
-#elif CPU(X86_64) && !CPU(X32)
+#elif CPU(ARM64)
+static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
+#elif CPU(X86_64)
static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
#else
static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
#endif
+#if CPU(ARM)
+static const double executablePoolReservationFraction = 0.15;
+#else
+static const double executablePoolReservationFraction = 0.25;
+#endif
extern uintptr_t startOfFixedExecutableMemoryPool;
#endif
@@ -134,36 +113,13 @@ public:
static void dumpProfile() { }
#endif
- PassRefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void makeWritable(void* start, size_t size)
- {
- reprotectRegion(start, size, Writable);
- }
+ RefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
- static void makeExecutable(void* start, size_t size)
- {
- reprotectRegion(start, size, Executable);
- }
-#else
- static void makeWritable(void*, size_t) {}
- static void makeExecutable(void*, size_t) {}
-#endif
+ bool isValidExecutableMemory(const LockHolder&, void* address);
static size_t committedByteCount();
-private:
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void reprotectRegion(void*, size_t, ProtectionSetting);
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
- // We create a MetaAllocator for each JS global object.
- OwnPtr<DemandExecutableAllocator> m_allocator;
- DemandExecutableAllocator* allocator() { return m_allocator.get(); }
-#endif
-#endif
-
+ Lock& getLock() const;
};
#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index ea2217924..5f601767e 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,21 +24,19 @@
*/
#include "config.h"
-
#include "ExecutableAllocator.h"
+#include "JSCInlines.h"
+
#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
#include "CodeProfiling.h"
-#include <errno.h>
+#include "ExecutableAllocationFuzz.h"
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
-#include <wtf/VMTags.h>
-#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090
+#if OS(DARWIN)
#include <sys/mman.h>
-// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case.
-#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1
#endif
using namespace WTF;
@@ -53,12 +51,15 @@ public:
FixedVMPoolExecutableAllocator()
: MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
{
- m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-#if !ENABLE(LLINT)
- RELEASE_ASSERT(m_reservation);
-#endif
+ size_t reservationSize;
+ if (Options::jitMemoryReservationSize())
+ reservationSize = Options::jitMemoryReservationSize();
+ else
+ reservationSize = fixedExecutableMemoryPoolSize;
+ reservationSize = roundUpToMultipleOf(pageSize(), reservationSize);
+ m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
if (m_reservation) {
- ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize);
+ ASSERT(m_reservation.size() == reservationSize);
addFreshFreeSpace(m_reservation.base(), m_reservation.size());
startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
@@ -68,13 +69,13 @@ public:
virtual ~FixedVMPoolExecutableAllocator();
protected:
- virtual void* allocateNewSpace(size_t&)
+ virtual void* allocateNewSpace(size_t&) override
{
// We're operating in a fixed pool, so new allocation is always prohibited.
return 0;
}
- virtual void notifyNeedPage(void* page)
+ virtual void notifyNeedPage(void* page) override
{
#if USE(MADV_FREE_FOR_JIT_MEMORY)
UNUSED_PARAM(page);
@@ -83,7 +84,7 @@ protected:
#endif
}
- virtual void notifyPageIsFree(void* page)
+ virtual void notifyPageIsFree(void* page) override
{
#if USE(MADV_FREE_FOR_JIT_MEMORY)
for (;;) {
@@ -144,28 +145,59 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
MetaAllocator::Statistics statistics = allocator->currentStatistics();
ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
- if (bytesAllocated >= statistics.bytesReserved)
- bytesAllocated = statistics.bytesReserved;
+ size_t bytesAvailable = static_cast<size_t>(
+ statistics.bytesReserved * (1 - executablePoolReservationFraction));
+ if (bytesAllocated >= bytesAvailable)
+ bytesAllocated = bytesAvailable;
double result = 1.0;
- size_t divisor = statistics.bytesReserved - bytesAllocated;
+ size_t divisor = bytesAvailable - bytesAllocated;
if (divisor)
- result = static_cast<double>(statistics.bytesReserved) / divisor;
+ result = static_cast<double>(bytesAvailable) / divisor;
if (result < 1.0)
result = 1.0;
return result;
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
+ if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
+ dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
+ WTFReportBacktrace();
+ }
+
+ if (effort == JITCompilationCanFail
+ && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
+ return nullptr;
+
+ if (effort == JITCompilationCanFail) {
+ // Don't allow allocations if we are down to reserve.
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes;
+ size_t bytesAvailable = static_cast<size_t>(
+ statistics.bytesReserved * (1 - executablePoolReservationFraction));
+ if (bytesAllocated > bytesAvailable)
+ return nullptr;
+ }
+
RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
if (!result) {
- if (effort == JITCompilationCanFail)
- return result;
- releaseExecutableMemory(vm);
- result = allocator->allocate(sizeInBytes, ownerUID);
- RELEASE_ASSERT(result);
+ if (effort != JITCompilationCanFail) {
+ dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
+ CRASH();
+ }
+ return nullptr;
}
- return result.release();
+ return result;
+}
+
+bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
+{
+ return allocator->isInAllocatedMemory(locker, address);
+}
+
+Lock& ExecutableAllocator::getLock() const
+{
+ return allocator->getLock();
}
size_t ExecutableAllocator::committedByteCount()
diff --git a/Source/JavaScriptCore/jit/FPRInfo.h b/Source/JavaScriptCore/jit/FPRInfo.h
new file mode 100644
index 000000000..a19a1ac38
--- /dev/null
+++ b/Source/JavaScriptCore/jit/FPRInfo.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef FPRInfo_h
+#define FPRInfo_h
+
+#include "MacroAssembler.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+typedef MacroAssembler::FPRegisterID FPRReg;
+#define InvalidFPRReg ((::JSC::FPRReg)-1)
+
+#if ENABLE(JIT)
+
+#if CPU(X86) || CPU(X86_64)
+
+class FPRInfo {
+public:
+ typedef FPRReg RegisterType;
+ static const unsigned numberOfRegisters = 6;
+ static const unsigned numberOfArgumentRegisters = 8;
+
+ // Temporary registers.
+ static const FPRReg fpRegT0 = X86Registers::xmm0;
+ static const FPRReg fpRegT1 = X86Registers::xmm1;
+ static const FPRReg fpRegT2 = X86Registers::xmm2;
+ static const FPRReg fpRegT3 = X86Registers::xmm3;
+ static const FPRReg fpRegT4 = X86Registers::xmm4;
+ static const FPRReg fpRegT5 = X86Registers::xmm5;
+#if CPU(X86_64)
+ // Only X86_64 passes aguments in xmm registers
+ static const FPRReg argumentFPR0 = X86Registers::xmm0; // fpRegT0
+ static const FPRReg argumentFPR1 = X86Registers::xmm1; // fpRegT1
+ static const FPRReg argumentFPR2 = X86Registers::xmm2; // fpRegT2
+ static const FPRReg argumentFPR3 = X86Registers::xmm3; // fpRegT3
+ static const FPRReg argumentFPR4 = X86Registers::xmm4; // fpRegT4
+ static const FPRReg argumentFPR5 = X86Registers::xmm5; // fpRegT5
+ static const FPRReg argumentFPR6 = X86Registers::xmm6;
+ static const FPRReg argumentFPR7 = X86Registers::xmm7;
+#endif
+ // On X86 the return will actually be on the x87 stack,
+ // so we'll copy to xmm0 for sanity!
+ static const FPRReg returnValueFPR = X86Registers::xmm0; // fpRegT0
+
+ // FPRReg mapping is direct, the machine regsiter numbers can
+ // be used directly as indices into the FPR RegisterBank.
+ COMPILE_ASSERT(X86Registers::xmm0 == 0, xmm0_is_0);
+ COMPILE_ASSERT(X86Registers::xmm1 == 1, xmm1_is_1);
+ COMPILE_ASSERT(X86Registers::xmm2 == 2, xmm2_is_2);
+ COMPILE_ASSERT(X86Registers::xmm3 == 3, xmm3_is_3);
+ COMPILE_ASSERT(X86Registers::xmm4 == 4, xmm4_is_4);
+ COMPILE_ASSERT(X86Registers::xmm5 == 5, xmm5_is_5);
+ static FPRReg toRegister(unsigned index)
+ {
+ return (FPRReg)index;
+ }
+ static unsigned toIndex(FPRReg reg)
+ {
+ unsigned result = (unsigned)reg;
+ if (result >= numberOfRegisters)
+ return InvalidIndex;
+ return result;
+ }
+
+ static FPRReg toArgumentRegister(unsigned index)
+ {
+ return (FPRReg)index;
+ }
+
+ static const char* debugName(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+#if CPU(X86_64)
+ ASSERT(static_cast<int>(reg) < 16);
+ static const char* nameForRegister[16] = {
+ "xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7",
+ "xmm8", "xmm9", "xmm10", "xmm11",
+ "xmm12", "xmm13", "xmm14", "xmm15"
+ };
+#elif CPU(X86)
+ ASSERT(static_cast<int>(reg) < 8);
+ static const char* nameForRegister[8] = {
+ "xmm0", "xmm1", "xmm2", "xmm3",
+ "xmm4", "xmm5", "xmm6", "xmm7"
+ };
+#endif
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(X86) || CPU(X86_64)
+
+#if CPU(ARM)
+
+class FPRInfo {
+public:
+ typedef FPRReg RegisterType;
+ static const unsigned numberOfRegisters = 6;
+
+ // Temporary registers.
+ // d7 is use by the MacroAssembler as fpTempRegister.
+ static const FPRReg fpRegT0 = ARMRegisters::d0;
+ static const FPRReg fpRegT1 = ARMRegisters::d1;
+ static const FPRReg fpRegT2 = ARMRegisters::d2;
+ static const FPRReg fpRegT3 = ARMRegisters::d3;
+ static const FPRReg fpRegT4 = ARMRegisters::d4;
+ static const FPRReg fpRegT5 = ARMRegisters::d5;
+ // ARMv7 doesn't pass arguments in fp registers. The return
+ // value is also actually in integer registers, for now
+ // we'll return in d0 for simplicity.
+ static const FPRReg returnValueFPR = ARMRegisters::d0; // fpRegT0
+
+#if CPU(ARM_HARDFP)
+ static const FPRReg argumentFPR0 = ARMRegisters::d0; // fpRegT0
+ static const FPRReg argumentFPR1 = ARMRegisters::d1; // fpRegT1
+#endif
+
+ // FPRReg mapping is direct, the machine regsiter numbers can
+ // be used directly as indices into the FPR RegisterBank.
+ COMPILE_ASSERT(ARMRegisters::d0 == 0, d0_is_0);
+ COMPILE_ASSERT(ARMRegisters::d1 == 1, d1_is_1);
+ COMPILE_ASSERT(ARMRegisters::d2 == 2, d2_is_2);
+ COMPILE_ASSERT(ARMRegisters::d3 == 3, d3_is_3);
+ COMPILE_ASSERT(ARMRegisters::d4 == 4, d4_is_4);
+ COMPILE_ASSERT(ARMRegisters::d5 == 5, d5_is_5);
+ static FPRReg toRegister(unsigned index)
+ {
+ return (FPRReg)index;
+ }
+ static unsigned toIndex(FPRReg reg)
+ {
+ return (unsigned)reg;
+ }
+
+ static const char* debugName(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(static_cast<int>(reg) < 32);
+ static const char* nameForRegister[32] = {
+ "d0", "d1", "d2", "d3",
+ "d4", "d5", "d6", "d7",
+ "d8", "d9", "d10", "d11",
+ "d12", "d13", "d14", "d15",
+ "d16", "d17", "d18", "d19",
+ "d20", "d21", "d22", "d23",
+ "d24", "d25", "d26", "d27",
+ "d28", "d29", "d30", "d31"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(ARM)
+
+#if CPU(ARM64)
+
+class FPRInfo {
+public:
+ typedef FPRReg RegisterType;
+ static const unsigned numberOfRegisters = 23;
+ static const unsigned numberOfArgumentRegisters = 8;
+
+ // Temporary registers.
+ // q8-q15 are callee saved, q31 is use by the MacroAssembler as fpTempRegister.
+ static const FPRReg fpRegT0 = ARM64Registers::q0;
+ static const FPRReg fpRegT1 = ARM64Registers::q1;
+ static const FPRReg fpRegT2 = ARM64Registers::q2;
+ static const FPRReg fpRegT3 = ARM64Registers::q3;
+ static const FPRReg fpRegT4 = ARM64Registers::q4;
+ static const FPRReg fpRegT5 = ARM64Registers::q5;
+ static const FPRReg fpRegT6 = ARM64Registers::q6;
+ static const FPRReg fpRegT7 = ARM64Registers::q7;
+ static const FPRReg fpRegT8 = ARM64Registers::q16;
+ static const FPRReg fpRegT9 = ARM64Registers::q17;
+ static const FPRReg fpRegT10 = ARM64Registers::q18;
+ static const FPRReg fpRegT11 = ARM64Registers::q19;
+ static const FPRReg fpRegT12 = ARM64Registers::q20;
+ static const FPRReg fpRegT13 = ARM64Registers::q21;
+ static const FPRReg fpRegT14 = ARM64Registers::q22;
+ static const FPRReg fpRegT15 = ARM64Registers::q23;
+ static const FPRReg fpRegT16 = ARM64Registers::q24;
+ static const FPRReg fpRegT17 = ARM64Registers::q25;
+ static const FPRReg fpRegT18 = ARM64Registers::q26;
+ static const FPRReg fpRegT19 = ARM64Registers::q27;
+ static const FPRReg fpRegT20 = ARM64Registers::q28;
+ static const FPRReg fpRegT21 = ARM64Registers::q29;
+ static const FPRReg fpRegT22 = ARM64Registers::q30;
+ static const FPRReg fpRegCS0 = ARM64Registers::q8;
+ static const FPRReg fpRegCS1 = ARM64Registers::q9;
+ static const FPRReg fpRegCS2 = ARM64Registers::q10;
+ static const FPRReg fpRegCS3 = ARM64Registers::q11;
+ static const FPRReg fpRegCS4 = ARM64Registers::q12;
+ static const FPRReg fpRegCS5 = ARM64Registers::q13;
+ static const FPRReg fpRegCS6 = ARM64Registers::q14;
+ static const FPRReg fpRegCS7 = ARM64Registers::q15;
+
+ static const FPRReg argumentFPR0 = ARM64Registers::q0; // fpRegT0
+ static const FPRReg argumentFPR1 = ARM64Registers::q1; // fpRegT1
+ static const FPRReg argumentFPR2 = ARM64Registers::q2; // fpRegT2
+ static const FPRReg argumentFPR3 = ARM64Registers::q3; // fpRegT3
+ static const FPRReg argumentFPR4 = ARM64Registers::q4; // fpRegT4
+ static const FPRReg argumentFPR5 = ARM64Registers::q5; // fpRegT5
+ static const FPRReg argumentFPR6 = ARM64Registers::q6; // fpRegT6
+ static const FPRReg argumentFPR7 = ARM64Registers::q7; // fpRegT7
+
+ static const FPRReg returnValueFPR = ARM64Registers::q0; // fpRegT0
+
+ static FPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const FPRReg registerForIndex[numberOfRegisters] = {
+ fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6, fpRegT7,
+ fpRegT8, fpRegT9, fpRegT10, fpRegT11, fpRegT12, fpRegT13, fpRegT14, fpRegT15,
+ fpRegT16, fpRegT17, fpRegT18, fpRegT19, fpRegT20, fpRegT21, fpRegT22
+ };
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(static_cast<int>(reg) < 32);
+ static const unsigned indexForRegister[32] = {
+ 0, 1, 2, 3, 4, 5, 6, 7,
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
+ 8, 9, 10, 11, 12, 13, 14, 15,
+ 16, 17, 18, 19, 20, 21, 22, InvalidIndex
+ };
+ unsigned result = indexForRegister[reg];
+ return result;
+ }
+
+ static FPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < 8);
+ return static_cast<FPRReg>(index);
+ }
+
+ static const char* debugName(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(static_cast<int>(reg) < 32);
+ static const char* nameForRegister[32] = {
+ "q0", "q1", "q2", "q3", "q4", "q5", "q6", "q7",
+ "q8", "q9", "q10", "q11", "q12", "q13", "q14", "q15",
+ "q16", "q17", "q18", "q19", "q20", "q21", "q22", "q23",
+ "q24", "q25", "q26", "q27", "q28", "q29", "q30", "q31"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(ARM64)
+
+#if CPU(MIPS)
+
+class FPRInfo {
+public:
+ typedef FPRReg RegisterType;
+ static const unsigned numberOfRegisters = 7;
+
+ // Temporary registers.
+ static const FPRReg fpRegT0 = MIPSRegisters::f0;
+ static const FPRReg fpRegT1 = MIPSRegisters::f2;
+ static const FPRReg fpRegT2 = MIPSRegisters::f4;
+ static const FPRReg fpRegT3 = MIPSRegisters::f6;
+ static const FPRReg fpRegT4 = MIPSRegisters::f8;
+ static const FPRReg fpRegT5 = MIPSRegisters::f10;
+ static const FPRReg fpRegT6 = MIPSRegisters::f18;
+
+ static const FPRReg returnValueFPR = MIPSRegisters::f0;
+
+ static const FPRReg argumentFPR0 = MIPSRegisters::f12;
+ static const FPRReg argumentFPR1 = MIPSRegisters::f14;
+
+ static FPRReg toRegister(unsigned index)
+ {
+ static const FPRReg registerForIndex[numberOfRegisters] = {
+ fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6 };
+
+ ASSERT(index < numberOfRegisters);
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(reg < 20);
+ static const unsigned indexForRegister[20] = {
+ 0, InvalidIndex, 1, InvalidIndex,
+ 2, InvalidIndex, 3, InvalidIndex,
+ 4, InvalidIndex, 5, InvalidIndex,
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
+ InvalidIndex, InvalidIndex, 6, InvalidIndex,
+ };
+ unsigned result = indexForRegister[reg];
+ return result;
+ }
+
+ static const char* debugName(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(reg < 32);
+ static const char* nameForRegister[32] = {
+ "f0", "f1", "f2", "f3",
+ "f4", "f5", "f6", "f7",
+ "f8", "f9", "f10", "f11",
+ "f12", "f13", "f14", "f15"
+ "f16", "f17", "f18", "f19"
+ "f20", "f21", "f22", "f23"
+ "f24", "f25", "f26", "f27"
+ "f28", "f29", "f30", "f31"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(MIPS)
+
+#if CPU(SH4)
+
+class FPRInfo {
+public:
+ typedef FPRReg RegisterType;
+ static const unsigned numberOfRegisters = 6;
+
+ // Temporary registers.
+ static const FPRReg fpRegT0 = SH4Registers::dr0;
+ static const FPRReg fpRegT1 = SH4Registers::dr2;
+ static const FPRReg fpRegT2 = SH4Registers::dr4;
+ static const FPRReg fpRegT3 = SH4Registers::dr6;
+ static const FPRReg fpRegT4 = SH4Registers::dr8;
+ static const FPRReg fpRegT5 = SH4Registers::dr10;
+
+ static const FPRReg returnValueFPR = SH4Registers::dr0;
+
+ static const FPRReg argumentFPR0 = SH4Registers::dr4;
+ static const FPRReg argumentFPR1 = SH4Registers::dr6;
+
+ static FPRReg toRegister(unsigned index)
+ {
+ static const FPRReg registerForIndex[numberOfRegisters] = {
+ fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 };
+
+ ASSERT(index < numberOfRegisters);
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(reg < 16);
+ static const unsigned indexForRegister[16] = {
+ 0, InvalidIndex, 1, InvalidIndex,
+ 2, InvalidIndex, 3, InvalidIndex,
+ 4, InvalidIndex, 5, InvalidIndex,
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
+ };
+ unsigned result = indexForRegister[reg];
+ return result;
+ }
+
+ static const char* debugName(FPRReg reg)
+ {
+ ASSERT(reg != InvalidFPRReg);
+ ASSERT(reg < 16);
+ static const char* nameForRegister[16] = {
+ "dr0", "fr1", "dr2", "fr3",
+ "dr4", "fr5", "dr6", "fr7",
+ "dr8", "fr9", "dr10", "fr11",
+ "dr12", "fr13", "dr14", "fr15"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(SH4)
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
+namespace WTF {
+
+inline void printInternal(PrintStream& out, JSC::FPRReg reg)
+{
+#if ENABLE(JIT)
+ out.print("%", JSC::FPRInfo::debugName(reg));
+#else
+ out.printf("%%fr%d", reg);
+#endif
+}
+
+} // namespace WTF
+
+#endif
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
index f681dd847..60c0c5514 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
@@ -28,24 +28,25 @@
#if ENABLE(JIT)
+#include "CodeBlock.h"
+#include "DFGCommonData.h"
#include "Heap.h"
#include "VM.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include "SlotVisitor.h"
#include "Structure.h"
namespace JSC {
GCAwareJITStubRoutine::GCAwareJITStubRoutine(
- const MacroAssemblerCodeRef& code, VM& vm, bool isClosureCall)
+ const MacroAssemblerCodeRef& code, VM& vm)
: JITStubRoutine(code)
, m_mayBeExecuting(false)
, m_isJettisoned(false)
- , m_isClosureCall(isClosureCall)
{
vm.heap.m_jitStubRoutines.add(this);
}
-
+
GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { }
void GCAwareJITStubRoutine::observeZeroRefCount()
@@ -95,29 +96,61 @@ void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(Slot
visitor.append(&m_object);
}
-PassRefPtr<JITStubRoutine> createJITStubRoutine(
- const MacroAssemblerCodeRef& code,
- VM& vm,
- const JSCell*,
- bool makesCalls)
+
+GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler(
+ const MacroAssemblerCodeRef& code, VM& vm,
+ CodeBlock* codeBlockForExceptionHandlers, CallSiteIndex exceptionHandlerCallSiteIndex)
+ : GCAwareJITStubRoutine(code, vm)
+ , m_codeBlockWithExceptionHandler(codeBlockForExceptionHandlers)
+ , m_exceptionHandlerCallSiteIndex(exceptionHandlerCallSiteIndex)
{
- if (!makesCalls)
- return adoptRef(new JITStubRoutine(code));
+ RELEASE_ASSERT(m_codeBlockWithExceptionHandler);
+ ASSERT(!!m_codeBlockWithExceptionHandler->handlerForIndex(exceptionHandlerCallSiteIndex.bits()));
+}
- return static_pointer_cast<JITStubRoutine>(
- adoptRef(new GCAwareJITStubRoutine(code, vm)));
+void GCAwareJITStubRoutineWithExceptionHandler::aboutToDie()
+{
+ m_codeBlockWithExceptionHandler = nullptr;
}
+void GCAwareJITStubRoutineWithExceptionHandler::observeZeroRefCount()
+{
+#if ENABLE(DFG_JIT)
+ if (m_codeBlockWithExceptionHandler) {
+ m_codeBlockWithExceptionHandler->jitCode()->dfgCommon()->removeCallSiteIndex(m_exceptionHandlerCallSiteIndex);
+ m_codeBlockWithExceptionHandler->removeExceptionHandlerForCallSite(m_exceptionHandlerCallSiteIndex);
+ m_codeBlockWithExceptionHandler = nullptr;
+ }
+#endif
+
+ Base::observeZeroRefCount();
+}
+
+
PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef& code,
VM& vm,
const JSCell* owner,
bool makesCalls,
- JSCell* object)
+ JSCell* object,
+ CodeBlock* codeBlockForExceptionHandlers,
+ CallSiteIndex exceptionHandlerCallSiteIndex)
{
if (!makesCalls)
return adoptRef(new JITStubRoutine(code));
+ if (codeBlockForExceptionHandlers) {
+ RELEASE_ASSERT(!object); // We're not a marking stub routine.
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(codeBlockForExceptionHandlers->jitType()));
+ return static_pointer_cast<JITStubRoutine>(
+ adoptRef(new GCAwareJITStubRoutineWithExceptionHandler(code, vm, codeBlockForExceptionHandlers, exceptionHandlerCallSiteIndex)));
+ }
+
+ if (!object) {
+ return static_pointer_cast<JITStubRoutine>(
+ adoptRef(new GCAwareJITStubRoutine(code, vm)));
+ }
+
return static_pointer_cast<JITStubRoutine>(
adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, vm, owner, object)));
}
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
index f0b282cf1..97d9016d6 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +26,6 @@
#ifndef GCAwareJITStubRoutine_h
#define GCAwareJITStubRoutine_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
#include "JITStubRoutine.h"
@@ -54,7 +52,7 @@ class JITStubRoutineSet;
// list which does not get reclaimed all at once).
class GCAwareJITStubRoutine : public JITStubRoutine {
public:
- GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&, bool isClosureCall = false);
+ GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&);
virtual ~GCAwareJITStubRoutine();
void markRequiredObjects(SlotVisitor& visitor)
@@ -64,10 +62,8 @@ public:
void deleteFromGC();
- bool isClosureCall() const { return m_isClosureCall; }
-
protected:
- virtual void observeZeroRefCount();
+ virtual void observeZeroRefCount() override;
virtual void markRequiredObjectsInternal(SlotVisitor&);
@@ -76,7 +72,6 @@ private:
bool m_mayBeExecuting;
bool m_isJettisoned;
- bool m_isClosureCall;
};
// Use this if you want to mark one additional object during GC if your stub
@@ -88,12 +83,30 @@ public:
virtual ~MarkingGCAwareJITStubRoutineWithOneObject();
protected:
- virtual void markRequiredObjectsInternal(SlotVisitor&);
+ virtual void markRequiredObjectsInternal(SlotVisitor&) override;
private:
WriteBarrier<JSCell> m_object;
};
+
+// The stub has exception handlers in it. So it clears itself from exception
+// handling table when it dies. It also frees space in CodeOrigin table
+// for new exception handlers to use the same CallSiteIndex.
+class GCAwareJITStubRoutineWithExceptionHandler : public GCAwareJITStubRoutine {
+public:
+ typedef GCAwareJITStubRoutine Base;
+
+ GCAwareJITStubRoutineWithExceptionHandler(const MacroAssemblerCodeRef&, VM&, CodeBlock*, CallSiteIndex);
+
+ void aboutToDie() override;
+ void observeZeroRefCount() override;
+
+private:
+ CodeBlock* m_codeBlockWithExceptionHandler;
+ CallSiteIndex m_exceptionHandlerCallSiteIndex;
+};
+
// Helper for easily creating a GC-aware JIT stub routine. For the varargs,
// pass zero or more JSCell*'s. This will either create a JITStubRoutine, a
// GCAwareJITStubRoutine, or an ObjectMarkingGCAwareJITStubRoutine as
@@ -114,10 +127,14 @@ private:
// way.
PassRefPtr<JITStubRoutine> createJITStubRoutine(
- const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls);
-PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls,
- JSCell*);
+ JSCell* = nullptr,
+ CodeBlock* codeBlockForExceptionHandlers = nullptr, CallSiteIndex exceptionHandlingCallSiteIndex = CallSiteIndex(std::numeric_limits<unsigned>::max()));
+
+// Helper for the creation of simple stub routines that need no help from the GC. Note
+// that codeBlock gets "executed" more than once.
+#define FINALIZE_CODE_FOR_GC_AWARE_STUB(codeBlock, patchBuffer, makesCalls, cell, dataLogFArguments) \
+ (createJITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments), *(codeBlock)->vm(), (codeBlock), (makesCalls), (cell)))
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/GPRInfo.cpp b/Source/JavaScriptCore/jit/GPRInfo.cpp
new file mode 100644
index 000000000..849354854
--- /dev/null
+++ b/Source/JavaScriptCore/jit/GPRInfo.cpp
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GPRInfo.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+// This is in the .cpp file to work around clang issues.
+#if CPU(X86_64)
+const GPRReg GPRInfo::patchpointScratchRegister = MacroAssembler::s_scratchRegister;
+#elif CPU(ARM64)
+const GPRReg GPRInfo::patchpointScratchRegister = ARM64Registers::ip0;
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/GPRInfo.h b/Source/JavaScriptCore/jit/GPRInfo.h
new file mode 100644
index 000000000..14a2ebd3d
--- /dev/null
+++ b/Source/JavaScriptCore/jit/GPRInfo.h
@@ -0,0 +1,918 @@
+/*
+ * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef GPRInfo_h
+#define GPRInfo_h
+
+#include "MacroAssembler.h"
+#include <array>
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+// We use the same conventions in the basline JIT as in the LLint. If you
+// change mappings in the GPRInfo, you should change them in the offlineasm
+// compiler adequately. The register naming conventions are described at the
+// top of the LowLevelInterpreter.asm file.
+
+typedef MacroAssembler::RegisterID GPRReg;
+#define InvalidGPRReg ((::JSC::GPRReg)-1)
+
+#if ENABLE(JIT)
+
+#if USE(JSVALUE64)
+class JSValueRegs {
+public:
+ JSValueRegs()
+ : m_gpr(InvalidGPRReg)
+ {
+ }
+
+ explicit JSValueRegs(GPRReg gpr)
+ : m_gpr(gpr)
+ {
+ }
+
+ static JSValueRegs payloadOnly(GPRReg gpr)
+ {
+ return JSValueRegs(gpr);
+ }
+
+ static JSValueRegs withTwoAvailableRegs(GPRReg gpr, GPRReg)
+ {
+ return JSValueRegs(gpr);
+ }
+
+ bool operator!() const { return m_gpr == InvalidGPRReg; }
+ explicit operator bool() const { return m_gpr != InvalidGPRReg; }
+
+ bool operator==(JSValueRegs other) { return m_gpr == other.m_gpr; }
+ bool operator!=(JSValueRegs other) { return !(*this == other); }
+
+ GPRReg gpr() const { return m_gpr; }
+ GPRReg tagGPR() const { return InvalidGPRReg; }
+ GPRReg payloadGPR() const { return m_gpr; }
+
+ bool uses(GPRReg gpr) const { return m_gpr == gpr; }
+
+private:
+ GPRReg m_gpr;
+};
+
+class JSValueSource {
+public:
+ JSValueSource()
+ : m_offset(notAddress())
+ , m_base(InvalidGPRReg)
+ {
+ }
+
+ JSValueSource(JSValueRegs regs)
+ : m_offset(notAddress())
+ , m_base(regs.gpr())
+ {
+ }
+
+ explicit JSValueSource(GPRReg gpr)
+ : m_offset(notAddress())
+ , m_base(gpr)
+ {
+ }
+
+ JSValueSource(MacroAssembler::Address address)
+ : m_offset(address.offset)
+ , m_base(address.base)
+ {
+ ASSERT(m_offset != notAddress());
+ ASSERT(m_base != InvalidGPRReg);
+ }
+
+ static JSValueSource unboxedCell(GPRReg payloadGPR)
+ {
+ return JSValueSource(payloadGPR);
+ }
+
+ bool operator!() const { return m_base == InvalidGPRReg; }
+ explicit operator bool() const { return m_base != InvalidGPRReg; }
+
+ bool isAddress() const { return m_offset != notAddress(); }
+
+ int32_t offset() const
+ {
+ ASSERT(isAddress());
+ return m_offset;
+ }
+
+ GPRReg base() const
+ {
+ ASSERT(isAddress());
+ return m_base;
+ }
+
+ GPRReg gpr() const
+ {
+ ASSERT(!isAddress());
+ return m_base;
+ }
+
+ MacroAssembler::Address asAddress() const { return MacroAssembler::Address(base(), offset()); }
+
+private:
+ static inline int32_t notAddress() { return 0x80000000; }
+
+ int32_t m_offset;
+ GPRReg m_base;
+};
+#endif // USE(JSVALUE64)
+
+#if USE(JSVALUE32_64)
+class JSValueRegs {
+public:
+ JSValueRegs()
+ : m_tagGPR(static_cast<int8_t>(InvalidGPRReg))
+ , m_payloadGPR(static_cast<int8_t>(InvalidGPRReg))
+ {
+ }
+
+ JSValueRegs(GPRReg tagGPR, GPRReg payloadGPR)
+ : m_tagGPR(tagGPR)
+ , m_payloadGPR(payloadGPR)
+ {
+ }
+
+ static JSValueRegs withTwoAvailableRegs(GPRReg gpr1, GPRReg gpr2)
+ {
+ return JSValueRegs(gpr1, gpr2);
+ }
+
+ static JSValueRegs payloadOnly(GPRReg gpr)
+ {
+ return JSValueRegs(InvalidGPRReg, gpr);
+ }
+
+ bool operator!() const { return !static_cast<bool>(*this); }
+ explicit operator bool() const
+ {
+ return static_cast<GPRReg>(m_tagGPR) != InvalidGPRReg
+ || static_cast<GPRReg>(m_payloadGPR) != InvalidGPRReg;
+ }
+
+ bool operator==(JSValueRegs other) const
+ {
+ return m_tagGPR == other.m_tagGPR
+ && m_payloadGPR == other.m_payloadGPR;
+ }
+ bool operator!=(JSValueRegs other) const { return !(*this == other); }
+
+ GPRReg tagGPR() const { return static_cast<GPRReg>(m_tagGPR); }
+ GPRReg payloadGPR() const { return static_cast<GPRReg>(m_payloadGPR); }
+ GPRReg gpr(WhichValueWord which) const
+ {
+ switch (which) {
+ case TagWord:
+ return tagGPR();
+ case PayloadWord:
+ return payloadGPR();
+ }
+ ASSERT_NOT_REACHED();
+ return tagGPR();
+ }
+
+ bool uses(GPRReg gpr) const { return m_tagGPR == gpr || m_payloadGPR == gpr; }
+
+private:
+ int8_t m_tagGPR;
+ int8_t m_payloadGPR;
+};
+
+class JSValueSource {
+public:
+ JSValueSource()
+ : m_offset(notAddress())
+ , m_baseOrTag(static_cast<int8_t>(InvalidGPRReg))
+ , m_payload(static_cast<int8_t>(InvalidGPRReg))
+ , m_tagType(0)
+ {
+ }
+
+ JSValueSource(JSValueRegs regs)
+ : m_offset(notAddress())
+ , m_baseOrTag(regs.tagGPR())
+ , m_payload(regs.payloadGPR())
+ , m_tagType(0)
+ {
+ }
+
+ JSValueSource(GPRReg tagGPR, GPRReg payloadGPR)
+ : m_offset(notAddress())
+ , m_baseOrTag(static_cast<int8_t>(tagGPR))
+ , m_payload(static_cast<int8_t>(payloadGPR))
+ , m_tagType(0)
+ {
+ }
+
+ JSValueSource(MacroAssembler::Address address)
+ : m_offset(address.offset)
+ , m_baseOrTag(static_cast<int8_t>(address.base))
+ , m_payload(static_cast<int8_t>(InvalidGPRReg))
+ , m_tagType(0)
+ {
+ ASSERT(m_offset != notAddress());
+ ASSERT(static_cast<GPRReg>(m_baseOrTag) != InvalidGPRReg);
+ }
+
+ static JSValueSource unboxedCell(GPRReg payloadGPR)
+ {
+ JSValueSource result;
+ result.m_offset = notAddress();
+ result.m_baseOrTag = static_cast<int8_t>(InvalidGPRReg);
+ result.m_payload = static_cast<int8_t>(payloadGPR);
+ result.m_tagType = static_cast<int8_t>(JSValue::CellTag);
+ return result;
+ }
+
+ bool operator!() const { return !static_cast<bool>(*this); }
+ explicit operator bool() const
+ {
+ return static_cast<GPRReg>(m_baseOrTag) != InvalidGPRReg
+ || static_cast<GPRReg>(m_payload) != InvalidGPRReg;
+ }
+
+ bool isAddress() const
+ {
+ ASSERT(!!*this);
+ return m_offset != notAddress();
+ }
+
+ int32_t offset() const
+ {
+ ASSERT(isAddress());
+ return m_offset;
+ }
+
+ GPRReg base() const
+ {
+ ASSERT(isAddress());
+ return static_cast<GPRReg>(m_baseOrTag);
+ }
+
+ GPRReg tagGPR() const
+ {
+ ASSERT(!isAddress() && static_cast<GPRReg>(m_baseOrTag) != InvalidGPRReg);
+ return static_cast<GPRReg>(m_baseOrTag);
+ }
+
+ GPRReg payloadGPR() const
+ {
+ ASSERT(!isAddress());
+ return static_cast<GPRReg>(m_payload);
+ }
+
+ bool hasKnownTag() const
+ {
+ ASSERT(!!*this);
+ ASSERT(!isAddress());
+ return static_cast<GPRReg>(m_baseOrTag) == InvalidGPRReg;
+ }
+
+ uint32_t tag() const
+ {
+ return static_cast<int32_t>(m_tagType);
+ }
+
+ MacroAssembler::Address asAddress(unsigned additionalOffset = 0) const { return MacroAssembler::Address(base(), offset() + additionalOffset); }
+
+private:
+ static inline int32_t notAddress() { return 0x80000000; }
+
+ int32_t m_offset;
+ int8_t m_baseOrTag;
+ int8_t m_payload;
+ int8_t m_tagType; // Contains the low bits of the tag.
+};
+#endif // USE(JSVALUE32_64)
+
+#if CPU(X86)
+#define NUMBER_OF_ARGUMENT_REGISTERS 0u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 6;
+ static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
+
+ // Temporary registers.
+ static const GPRReg regT0 = X86Registers::eax;
+ static const GPRReg regT1 = X86Registers::edx;
+ static const GPRReg regT2 = X86Registers::ecx;
+ static const GPRReg regT3 = X86Registers::ebx; // Callee-save
+ static const GPRReg regT4 = X86Registers::esi; // Callee-save
+ static const GPRReg regT5 = X86Registers::edi; // Callee-save
+ static const GPRReg callFrameRegister = X86Registers::ebp;
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2
+ static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
+ static const GPRReg argumentGPR2 = X86Registers::eax; // regT0
+ static const GPRReg argumentGPR3 = X86Registers::ebx; // regT3
+ static const GPRReg nonArgGPR0 = X86Registers::esi; // regT4
+ static const GPRReg returnValueGPR = X86Registers::eax; // regT0
+ static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
+ static const GPRReg nonPreservedNonReturnGPR = X86Registers::ecx;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5 };
+ return registerForIndex[index];
+ }
+
+ static GPRReg toArgumentRegister(unsigned)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return InvalidGPRReg;
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<int>(reg) < 8);
+ static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, 5 };
+ unsigned result = indexForRegister[reg];
+ return result;
+ }
+
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<int>(reg) < 8);
+ static const char* nameForRegister[8] = {
+ "eax", "ecx", "edx", "ebx",
+ "esp", "ebp", "esi", "edi",
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(X86)
+
+#if CPU(X86_64)
+#if !OS(WINDOWS)
+#define NUMBER_OF_ARGUMENT_REGISTERS 6u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 5u
+#else
+#define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 7u
+#endif
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 11;
+ static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
+
+ // These registers match the baseline JIT.
+ static const GPRReg callFrameRegister = X86Registers::ebp;
+ static const GPRReg tagTypeNumberRegister = X86Registers::r14;
+ static const GPRReg tagMaskRegister = X86Registers::r15;
+
+ // Temporary registers.
+ static const GPRReg regT0 = X86Registers::eax;
+#if !OS(WINDOWS)
+ static const GPRReg regT1 = X86Registers::esi;
+ static const GPRReg regT2 = X86Registers::edx;
+ static const GPRReg regT3 = X86Registers::ecx;
+ static const GPRReg regT4 = X86Registers::r8;
+ static const GPRReg regT5 = X86Registers::r10;
+ static const GPRReg regT6 = X86Registers::edi;
+ static const GPRReg regT7 = X86Registers::r9;
+#else
+ static const GPRReg regT1 = X86Registers::edx;
+ static const GPRReg regT2 = X86Registers::r8;
+ static const GPRReg regT3 = X86Registers::r9;
+ static const GPRReg regT4 = X86Registers::r10;
+ static const GPRReg regT5 = X86Registers::ecx;
+#endif
+
+ static const GPRReg regCS0 = X86Registers::ebx;
+
+#if !OS(WINDOWS)
+ static const GPRReg regCS1 = X86Registers::r12;
+ static const GPRReg regCS2 = X86Registers::r13;
+ static const GPRReg regCS3 = X86Registers::r14;
+ static const GPRReg regCS4 = X86Registers::r15;
+#else
+ static const GPRReg regCS1 = X86Registers::esi;
+ static const GPRReg regCS2 = X86Registers::edi;
+ static const GPRReg regCS3 = X86Registers::r12;
+ static const GPRReg regCS4 = X86Registers::r13;
+ static const GPRReg regCS5 = X86Registers::r14;
+ static const GPRReg regCS6 = X86Registers::r15;
+#endif
+
+ // These constants provide the names for the general purpose argument & return value registers.
+#if !OS(WINDOWS)
+ static const GPRReg argumentGPR0 = X86Registers::edi; // regT6
+ static const GPRReg argumentGPR1 = X86Registers::esi; // regT1
+ static const GPRReg argumentGPR2 = X86Registers::edx; // regT2
+ static const GPRReg argumentGPR3 = X86Registers::ecx; // regT3
+ static const GPRReg argumentGPR4 = X86Registers::r8; // regT4
+ static const GPRReg argumentGPR5 = X86Registers::r9; // regT7
+#else
+ static const GPRReg argumentGPR0 = X86Registers::ecx; // regT5
+ static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
+ static const GPRReg argumentGPR2 = X86Registers::r8; // regT2
+ static const GPRReg argumentGPR3 = X86Registers::r9; // regT3
+#endif
+ static const GPRReg nonArgGPR0 = X86Registers::r10; // regT5 (regT4 on Windows)
+ static const GPRReg returnValueGPR = X86Registers::eax; // regT0
+ static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 or regT2
+ static const GPRReg nonPreservedNonReturnGPR = X86Registers::r10; // regT5 (regT4 on Windows)
+ static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; // regT5 (regT4 on Windows)
+
+ // FIXME: I believe that all uses of this are dead in the sense that it just causes the scratch
+ // register allocator to select a different register and potentially spill things. It would be better
+ // if we instead had a more explicit way of saying that we don't have a scratch register.
+ static const GPRReg patchpointScratchRegister;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+#if !OS(WINDOWS)
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regCS0, regCS1, regCS2 };
+#else
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regCS0, regCS1, regCS2, regCS3, regCS4 };
+#endif
+ return registerForIndex[index];
+ }
+
+ static GPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < numberOfArgumentRegisters);
+#if !OS(WINDOWS)
+ static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3, argumentGPR4, argumentGPR5 };
+#else
+ static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 };
+#endif
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<int>(reg) < 16);
+#if !OS(WINDOWS)
+ static const unsigned indexForRegister[16] = { 0, 3, 2, 8, InvalidIndex, InvalidIndex, 1, 6, 4, 7, 5, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
+#else
+ static const unsigned indexForRegister[16] = { 0, 5, 1, 6, InvalidIndex, InvalidIndex, 7, 8, 2, 3, 4, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
+#endif
+ return indexForRegister[reg];
+ }
+
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<int>(reg) < 16);
+ static const char* nameForRegister[16] = {
+ "rax", "rcx", "rdx", "rbx",
+ "rsp", "rbp", "rsi", "rdi",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const std::array<GPRReg, 3>& reservedRegisters()
+ {
+ static const std::array<GPRReg, 3> reservedRegisters { {
+ MacroAssembler::s_scratchRegister,
+ tagTypeNumberRegister,
+ tagMaskRegister,
+ } };
+ return reservedRegisters;
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(X86_64)
+
+#if CPU(ARM)
+#define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 9;
+ static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
+
+ // Temporary registers.
+ static const GPRReg regT0 = ARMRegisters::r0;
+ static const GPRReg regT1 = ARMRegisters::r1;
+ static const GPRReg regT2 = ARMRegisters::r2;
+ static const GPRReg regT3 = ARMRegisters::r3;
+ static const GPRReg regT4 = ARMRegisters::r8;
+ static const GPRReg regT5 = ARMRegisters::r9;
+ static const GPRReg regT6 = ARMRegisters::r10;
+#if CPU(ARM_THUMB2)
+ static const GPRReg regT7 = ARMRegisters::r11;
+#else
+ static const GPRReg regT7 = ARMRegisters::r7;
+#endif
+ static const GPRReg regT8 = ARMRegisters::r4;
+ // These registers match the baseline JIT.
+ static const GPRReg callFrameRegister = ARMRegisters::fp;
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = ARMRegisters::r0; // regT0
+ static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1
+ static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2
+ static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT3
+ static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT8
+ static const GPRReg nonArgGPR1 = ARMRegisters::r8; // regT4
+ static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0
+ static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1
+ static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8 };
+ return registerForIndex[index];
+ }
+
+ static GPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < numberOfArgumentRegisters);
+ static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 };
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<int>(reg) < 16);
+ static const unsigned indexForRegister[16] =
+#if CPU(ARM_THUMB2)
+ { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+#else
+ { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+#endif
+ unsigned result = indexForRegister[reg];
+ return result;
+ }
+
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<int>(reg) < 16);
+ static const char* nameForRegister[16] = {
+ "r0", "r1", "r2", "r3",
+ "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11",
+ "r12", "r13", "r14", "r15"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(ARM)
+
+#if CPU(ARM64)
+#define NUMBER_OF_ARGUMENT_REGISTERS 8u
+// Callee Saves includes x19..x28 and FP registers q8..q15
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 18u
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 16;
+ static const unsigned numberOfArgumentRegisters = 8;
+
+ // These registers match the baseline JIT.
+ static const GPRReg callFrameRegister = ARM64Registers::fp;
+ static const GPRReg tagTypeNumberRegister = ARM64Registers::x27;
+ static const GPRReg tagMaskRegister = ARM64Registers::x28;
+ static const GPRReg dataTempRegister = MacroAssembler::dataTempRegister;
+ static const GPRReg memoryTempRegister = MacroAssembler::memoryTempRegister;
+ // Temporary registers.
+ static const GPRReg regT0 = ARM64Registers::x0;
+ static const GPRReg regT1 = ARM64Registers::x1;
+ static const GPRReg regT2 = ARM64Registers::x2;
+ static const GPRReg regT3 = ARM64Registers::x3;
+ static const GPRReg regT4 = ARM64Registers::x4;
+ static const GPRReg regT5 = ARM64Registers::x5;
+ static const GPRReg regT6 = ARM64Registers::x6;
+ static const GPRReg regT7 = ARM64Registers::x7;
+ static const GPRReg regT8 = ARM64Registers::x8;
+ static const GPRReg regT9 = ARM64Registers::x9;
+ static const GPRReg regT10 = ARM64Registers::x10;
+ static const GPRReg regT11 = ARM64Registers::x11;
+ static const GPRReg regT12 = ARM64Registers::x12;
+ static const GPRReg regT13 = ARM64Registers::x13;
+ static const GPRReg regT14 = ARM64Registers::x14;
+ static const GPRReg regT15 = ARM64Registers::x15;
+ static const GPRReg regCS0 = ARM64Registers::x19; // Used by FTL only
+ static const GPRReg regCS1 = ARM64Registers::x20; // Used by FTL only
+ static const GPRReg regCS2 = ARM64Registers::x21; // Used by FTL only
+ static const GPRReg regCS3 = ARM64Registers::x22; // Used by FTL only
+ static const GPRReg regCS4 = ARM64Registers::x23; // Used by FTL only
+ static const GPRReg regCS5 = ARM64Registers::x24; // Used by FTL only
+ static const GPRReg regCS6 = ARM64Registers::x25; // Used by FTL only
+ static const GPRReg regCS7 = ARM64Registers::x26;
+ static const GPRReg regCS8 = ARM64Registers::x27; // tagTypeNumber
+ static const GPRReg regCS9 = ARM64Registers::x28; // tagMask
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = ARM64Registers::x0; // regT0
+ static const GPRReg argumentGPR1 = ARM64Registers::x1; // regT1
+ static const GPRReg argumentGPR2 = ARM64Registers::x2; // regT2
+ static const GPRReg argumentGPR3 = ARM64Registers::x3; // regT3
+ static const GPRReg argumentGPR4 = ARM64Registers::x4; // regT4
+ static const GPRReg argumentGPR5 = ARM64Registers::x5; // regT5
+ static const GPRReg argumentGPR6 = ARM64Registers::x6; // regT6
+ static const GPRReg argumentGPR7 = ARM64Registers::x7; // regT7
+ static const GPRReg nonArgGPR0 = ARM64Registers::x8; // regT8
+ static const GPRReg nonArgGPR1 = ARM64Registers::x9; // regT9
+ static const GPRReg returnValueGPR = ARM64Registers::x0; // regT0
+ static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1
+ static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2;
+ static const GPRReg nonPreservedNonArgumentGPR = ARM64Registers::x8;
+ static const GPRReg patchpointScratchRegister;
+
+ // GPRReg mapping is direct, the machine register numbers can
+ // be used directly as indices into the GPR RegisterBank.
+ COMPILE_ASSERT(ARM64Registers::q0 == 0, q0_is_0);
+ COMPILE_ASSERT(ARM64Registers::q1 == 1, q1_is_1);
+ COMPILE_ASSERT(ARM64Registers::q2 == 2, q2_is_2);
+ COMPILE_ASSERT(ARM64Registers::q3 == 3, q3_is_3);
+ COMPILE_ASSERT(ARM64Registers::q4 == 4, q4_is_4);
+ COMPILE_ASSERT(ARM64Registers::q5 == 5, q5_is_5);
+ COMPILE_ASSERT(ARM64Registers::q6 == 6, q6_is_6);
+ COMPILE_ASSERT(ARM64Registers::q7 == 7, q7_is_7);
+ COMPILE_ASSERT(ARM64Registers::q8 == 8, q8_is_8);
+ COMPILE_ASSERT(ARM64Registers::q9 == 9, q9_is_9);
+ COMPILE_ASSERT(ARM64Registers::q10 == 10, q10_is_10);
+ COMPILE_ASSERT(ARM64Registers::q11 == 11, q11_is_11);
+ COMPILE_ASSERT(ARM64Registers::q12 == 12, q12_is_12);
+ COMPILE_ASSERT(ARM64Registers::q13 == 13, q13_is_13);
+ COMPILE_ASSERT(ARM64Registers::q14 == 14, q14_is_14);
+ COMPILE_ASSERT(ARM64Registers::q15 == 15, q15_is_15);
+ static GPRReg toRegister(unsigned index)
+ {
+ return (GPRReg)index;
+ }
+ static unsigned toIndex(GPRReg reg)
+ {
+ if (reg > regT15)
+ return InvalidIndex;
+ return (unsigned)reg;
+ }
+
+ static GPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < numberOfArgumentRegisters);
+ return toRegister(index);
+ }
+
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<unsigned>(reg) < 32);
+ static const char* nameForRegister[32] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+ "r16", "r17", "r18", "r19", "r20", "r21", "r22", "r23",
+ "r24", "r25", "r26", "r27", "r28", "fp", "lr", "sp"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const std::array<GPRReg, 4>& reservedRegisters()
+ {
+ static const std::array<GPRReg, 4> reservedRegisters { {
+ dataTempRegister,
+ memoryTempRegister,
+ tagTypeNumberRegister,
+ tagMaskRegister,
+ } };
+ return reservedRegisters;
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(ARM64)
+
+#if CPU(MIPS)
+#define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 7;
+ static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
+
+ // regT0 must be v0 for returning a 32-bit value.
+ // regT1 must be v1 for returning a pair of 32-bit value.
+
+ // Temporary registers.
+ static const GPRReg regT0 = MIPSRegisters::v0;
+ static const GPRReg regT1 = MIPSRegisters::v1;
+ static const GPRReg regT2 = MIPSRegisters::t2;
+ static const GPRReg regT3 = MIPSRegisters::t3;
+ static const GPRReg regT4 = MIPSRegisters::t4;
+ static const GPRReg regT5 = MIPSRegisters::t5;
+ static const GPRReg regT6 = MIPSRegisters::t6;
+ // These registers match the baseline JIT.
+ static const GPRReg callFrameRegister = MIPSRegisters::fp;
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = MIPSRegisters::a0;
+ static const GPRReg argumentGPR1 = MIPSRegisters::a1;
+ static const GPRReg argumentGPR2 = MIPSRegisters::a2;
+ static const GPRReg argumentGPR3 = MIPSRegisters::a3;
+ static const GPRReg nonArgGPR0 = regT0;
+ static const GPRReg returnValueGPR = regT0;
+ static const GPRReg returnValueGPR2 = regT1;
+ static const GPRReg nonPreservedNonReturnGPR = regT2;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6 };
+ return registerForIndex[index];
+ }
+
+ static GPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < numberOfArgumentRegisters);
+ static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 };
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 32);
+ static const unsigned indexForRegister[32] = {
+ InvalidIndex, InvalidIndex, 0, 1, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
+ InvalidIndex, InvalidIndex, 2, 3, 4, 5, 6, InvalidIndex,
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
+ };
+ unsigned result = indexForRegister[reg];
+ return result;
+ }
+
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 16);
+ static const char* nameForRegister[16] = {
+ "zero", "at", "v0", "v1",
+ "a0", "a1", "a2", "a3",
+ "t0", "t1", "t2", "t3",
+ "t4", "t5", "t6", "t7"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(MIPS)
+
+#if CPU(SH4)
+#define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
+
+class GPRInfo {
+public:
+ typedef GPRReg RegisterType;
+ static const unsigned numberOfRegisters = 10;
+
+ // Note: regT3 is required to be callee-preserved.
+
+ // Temporary registers.
+ static const GPRReg regT0 = SH4Registers::r0;
+ static const GPRReg regT1 = SH4Registers::r1;
+ static const GPRReg regT2 = SH4Registers::r6;
+ static const GPRReg regT3 = SH4Registers::r7;
+ static const GPRReg regT4 = SH4Registers::r2;
+ static const GPRReg regT5 = SH4Registers::r3;
+ static const GPRReg regT6 = SH4Registers::r4;
+ static const GPRReg regT7 = SH4Registers::r5;
+ static const GPRReg regT8 = SH4Registers::r8;
+ static const GPRReg regT9 = SH4Registers::r9;
+ // These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = regT0;
+ static const GPRReg cachedResultRegister2 = regT1;
+ static const GPRReg callFrameRegister = SH4Registers::fp;
+ // These constants provide the names for the general purpose argument & return value registers.
+ static const GPRReg argumentGPR0 = SH4Registers::r4; // regT6
+ static const GPRReg argumentGPR1 = SH4Registers::r5; // regT7
+ static const GPRReg argumentGPR2 = SH4Registers::r6; // regT2
+ static const GPRReg argumentGPR3 = SH4Registers::r7; // regT3
+ static const GPRReg nonArgGPR0 = regT4;
+ static const GPRReg returnValueGPR = regT0;
+ static const GPRReg returnValueGPR2 = regT1;
+ static const GPRReg nonPreservedNonReturnGPR = regT2;
+
+ static GPRReg toRegister(unsigned index)
+ {
+ ASSERT(index < numberOfRegisters);
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9 };
+ return registerForIndex[index];
+ }
+
+ static unsigned toIndex(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 14);
+ static const unsigned indexForRegister[14] = { 0, 1, 4, 5, 6, 7, 2, 3, 8, 9, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ unsigned result = indexForRegister[reg];
+ return result;
+ }
+
+ static const char* debugName(GPRReg reg)
+ {
+ ASSERT(reg != InvalidGPRReg);
+ ASSERT(reg < 16);
+ static const char* nameForRegister[16] = {
+ "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
+ "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+ };
+ return nameForRegister[reg];
+ }
+
+ static const unsigned InvalidIndex = 0xffffffff;
+};
+
+#endif // CPU(SH4)
+
+inline GPRReg argumentRegisterFor(unsigned argumentIndex)
+{
+#if USE(JSVALUE64)
+ if (argumentIndex >= NUMBER_OF_ARGUMENT_REGISTERS)
+ return InvalidGPRReg;
+
+ return GPRInfo::toArgumentRegister(argumentIndex);
+#else
+ UNUSED_PARAM(argumentIndex);
+
+ return InvalidGPRReg;
+#endif
+}
+
+// The baseline JIT uses "accumulator" style execution with regT0 (for 64-bit)
+// and regT0 + regT1 (for 32-bit) serving as the accumulator register(s) for
+// passing results of one opcode to the next. Hence:
+COMPILE_ASSERT(GPRInfo::regT0 == GPRInfo::returnValueGPR, regT0_must_equal_returnValueGPR);
+#if USE(JSVALUE32_64)
+COMPILE_ASSERT(GPRInfo::regT1 == GPRInfo::returnValueGPR2, regT1_must_equal_returnValueGPR2);
+#endif
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
+namespace WTF {
+
+inline void printInternal(PrintStream& out, JSC::GPRReg reg)
+{
+#if ENABLE(JIT)
+ out.print("%", JSC::GPRInfo::debugName(reg));
+#else
+ out.printf("%%r%d", reg);
+#endif
+}
+
+} // namespace WTF
+
+#endif
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
index 528fb2bc4..e8d01916b 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
@@ -29,6 +29,7 @@
#include "CallFrame.h"
#include "JSCJSValueInlines.h"
#include "JSObject.h"
+#include "JSCInlines.h"
#include <wtf/InlineASM.h>
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h
index f4c8bc703..71ff4e5bd 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.h
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h
@@ -28,7 +28,6 @@
#include "JSCJSValue.h"
#include "MacroAssemblerCodeRef.h"
-#include <wtf/Platform.h>
#if ENABLE(JIT)
@@ -42,7 +41,7 @@ namespace JSC {
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL;
-#if COMPILER(GCC)
+#if COMPILER(GCC_OR_CLANG)
// This is a public declaration only to convince CLANG not to elide it.
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL;
@@ -52,11 +51,11 @@ inline void initializeHostCallReturnValue()
getHostCallReturnValueWithExecState(0);
}
-#else // COMPILER(GCC)
+#else // COMPILER(GCC_OR_CLANG)
inline void initializeHostCallReturnValue() { }
-#endif // COMPILER(GCC)
+#endif // COMPILER(GCC_OR_CLANG)
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp
new file mode 100644
index 000000000..5243b49ea
--- /dev/null
+++ b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "JSArrayBufferView.h"
+#include "JSCJSValueInlines.h"
+#include "JSCellInlines.h"
+#include "PolymorphicAccess.h"
+
+namespace JSC {
+
+typedef CCallHelpers::TrustedImm32 TrustedImm32;
+typedef CCallHelpers::Imm32 Imm32;
+typedef CCallHelpers::TrustedImmPtr TrustedImmPtr;
+typedef CCallHelpers::ImmPtr ImmPtr;
+typedef CCallHelpers::TrustedImm64 TrustedImm64;
+typedef CCallHelpers::Imm64 Imm64;
+
+bool AccessCase::canEmitIntrinsicGetter(JSFunction* getter, Structure* structure)
+{
+
+ switch (getter->intrinsic()) {
+ case TypedArrayByteOffsetIntrinsic:
+ case TypedArrayByteLengthIntrinsic:
+ case TypedArrayLengthIntrinsic: {
+ TypedArrayType type = structure->classInfo()->typedArrayStorageType;
+
+ if (!isTypedView(type))
+ return false;
+
+ return true;
+ }
+ default:
+ return false;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void AccessCase::emitIntrinsicGetter(AccessGenerationState& state)
+{
+ CCallHelpers& jit = *state.jit;
+ JSValueRegs valueRegs = state.valueRegs;
+ GPRReg baseGPR = state.baseGPR;
+ GPRReg valueGPR = valueRegs.payloadGPR();
+
+ switch (intrinsic()) {
+ case TypedArrayLengthIntrinsic: {
+ jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
+ jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ state.succeed();
+ return;
+ }
+
+ case TypedArrayByteLengthIntrinsic: {
+ TypedArrayType type = structure()->classInfo()->typedArrayStorageType;
+
+ jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
+
+ if (elementSize(type) > 1) {
+ // We can use a bitshift here since we TypedArrays cannot have byteLength that overflows an int32.
+ jit.lshift32(valueGPR, Imm32(logElementSize(type)), valueGPR);
+ }
+
+ jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ state.succeed();
+ return;
+ }
+
+ case TypedArrayByteOffsetIntrinsic: {
+ GPRReg scratchGPR = state.scratchGPR;
+
+ CCallHelpers::Jump emptyByteOffset = jit.branch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(WastefulTypedArray));
+
+ jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), valueGPR);
+ jit.loadPtr(MacroAssembler::Address(scratchGPR, Butterfly::offsetOfArrayBuffer()), scratchGPR);
+ jit.loadPtr(MacroAssembler::Address(scratchGPR, ArrayBuffer::offsetOfData()), scratchGPR);
+ jit.subPtr(scratchGPR, valueGPR);
+
+ CCallHelpers::Jump done = jit.jump();
+
+ emptyByteOffset.link(&jit);
+ jit.move(TrustedImmPtr(0), valueGPR);
+
+ done.link(&jit);
+
+ jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
+ state.succeed();
+ return;
+ }
+
+ default:
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index 9b46d8792..ac8c132aa 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,77 +26,54 @@
#include "config.h"
#if ENABLE(JIT)
-#include "JIT.h"
-// This probably does not belong here; adding here for now as a quick Windows build fix.
-#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
-#include "MacroAssembler.h"
-JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
-#endif
+#include "JIT.h"
#include "CodeBlock.h"
-#include <wtf/CryptographicallyRandomNumber.h>
-#include "DFGNode.h" // for DFG_SUCCESS_STATS
+#include "CodeBlockWithJITType.h"
+#include "DFGCapabilities.h"
#include "Interpreter.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
+#include "JITOperations.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "LinkBuffer.h"
-#include "Operations.h"
-#include "RepatchBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
+#include "PCToCodeOriginMap.h"
+#include "ProfilerDatabase.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "SlowPathCall.h"
+#include "StackAlignment.h"
+#include "TypeProfilerLog.h"
+#include <wtf/CryptographicallyRandomNumber.h>
using namespace std;
namespace JSC {
-void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
-}
-
-void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
-}
-
-void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
+ MacroAssembler::repatchCall(
+ CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
+ newCalleeFunction);
}
JIT::JIT(VM* vm, CodeBlock* codeBlock)
- : m_interpreter(vm->interpreter)
- , m_vm(vm)
- , m_codeBlock(codeBlock)
- , m_labels(0)
- , m_bytecodeOffset((unsigned)-1)
- , m_propertyAccessInstructionIndex(UINT_MAX)
+ : JSInterfaceJIT(vm, codeBlock)
+ , m_interpreter(vm->interpreter)
+ , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
+ , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
+ , m_getByIdIndex(UINT_MAX)
+ , m_putByIdIndex(UINT_MAX)
, m_byValInstructionIndex(UINT_MAX)
- , m_globalResolveInfoIndex(UINT_MAX)
, m_callLinkInfoIndex(UINT_MAX)
-#if USE(JSVALUE32_64)
- , m_jumpTargetIndex(0)
- , m_mappedBytecodeOffset((unsigned)-1)
- , m_mappedVirtualRegisterIndex(JSStack::ReturnPC)
- , m_mappedTag((RegisterID)-1)
- , m_mappedPayload((RegisterID)-1)
-#else
- , m_lastResultBytecodeRegister(std::numeric_limits<int>::max())
- , m_jumpTargetsPosition(0)
-#endif
, m_randomGenerator(cryptographicallyRandomNumber())
-#if ENABLE(VALUE_PROFILER)
+ , m_pcToCodeOriginMapBuilder(*vm)
, m_canBeOptimized(false)
, m_shouldEmitProfiling(false)
-#endif
{
- m_labels.reserveCapacity(codeBlock ? codeBlock->numberOfInstructions() : 0);
}
#if ENABLE(DFG_JIT)
@@ -105,56 +82,55 @@ void JIT::emitEnterOptimizationCheck()
if (!canBeOptimized())
return;
- Jump skipOptimize = branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter()));
- JITStubCall stubCall(this, cti_optimize);
- stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
+ JumpList skipOptimize;
+
+ skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
ASSERT(!m_bytecodeOffset);
- stubCall.call();
+
+ copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer();
+
+ callOperation(operationOptimize, m_bytecodeOffset);
+ skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
+ move(returnValueGPR2, stackPointerRegister);
+ jump(returnValueGPR);
skipOptimize.link(this);
}
#endif
-#define NEXT_OPCODE(name) \
- m_bytecodeOffset += OPCODE_LENGTH(name); \
- break;
-
-#if USE(JSVALUE32_64)
-#define DEFINE_BINARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand); \
- stubCall.addArgument(currentInstruction[3].u.operand); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
+void JIT::emitNotifyWrite(WatchpointSet* set)
+{
+ if (!set || set->state() == IsInvalidated)
+ return;
+
+ addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
+}
-#define DEFINE_UNARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
+void JIT::emitNotifyWrite(GPRReg pointerToSet)
+{
+ addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
+}
-#else // USE(JSVALUE32_64)
+void JIT::assertStackPointerOffset()
+{
+ if (ASSERT_DISABLED)
+ return;
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
+ Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
+ breakpoint();
+ ok.link(this);
+}
-#define DEFINE_BINARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
- stubCall.addArgument(currentInstruction[3].u.operand, regT2); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
- }
+#define NEXT_OPCODE(name) \
+ m_bytecodeOffset += OPCODE_LENGTH(name); \
+ break;
-#define DEFINE_UNARY_OP(name) \
- case name: { \
- JITStubCall stubCall(this, cti_##name); \
- stubCall.addArgument(currentInstruction[2].u.operand, regT2); \
- stubCall.call(currentInstruction[1].u.operand); \
- NEXT_OPCODE(name); \
+#define DEFINE_SLOW_OP(name) \
+ case op_##name: { \
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
+ slowPathCall.call(); \
+ NEXT_OPCODE(op_##name); \
}
-#endif // USE(JSVALUE32_64)
#define DEFINE_OP(name) \
case name: { \
@@ -170,12 +146,13 @@ void JIT::emitEnterOptimizationCheck()
void JIT::privateCompileMainPass()
{
+ jitAssertTagsInPlace();
+ jitAssertArgumentCountSane();
+
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
unsigned instructionCount = m_codeBlock->instructions().size();
- m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
- m_labels.resize(instructionCount);
for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
if (m_disassembler)
@@ -183,16 +160,13 @@ void JIT::privateCompileMainPass()
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
+
#if ENABLE(OPCODE_SAMPLING)
if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
sampleInstruction(currentInstruction);
#endif
-#if USE(JSVALUE64)
- if (atJumpTarget())
- killLastResultRegister();
-#endif
-
m_labels[m_bytecodeOffset] = label();
#if ENABLE(JIT_VERBOSE)
@@ -201,60 +175,71 @@ void JIT::privateCompileMainPass()
OpcodeID opcodeID = m_interpreter->getOpcodeID(currentInstruction->u.opcode);
- if (m_compilation && opcodeID != op_call_put_result) {
+ if (m_compilation) {
add64(
TrustedImm32(1),
AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
}
+
+ if (Options::eagerlyUpdateTopCallFrame())
+ updateTopCallFrame();
+ unsigned bytecodeOffset = m_bytecodeOffset;
+
switch (opcodeID) {
- DEFINE_BINARY_OP(op_del_by_val)
- DEFINE_BINARY_OP(op_in)
- DEFINE_BINARY_OP(op_less)
- DEFINE_BINARY_OP(op_lesseq)
- DEFINE_BINARY_OP(op_greater)
- DEFINE_BINARY_OP(op_greatereq)
- DEFINE_UNARY_OP(op_is_function)
- DEFINE_UNARY_OP(op_is_object)
- DEFINE_UNARY_OP(op_typeof)
+ DEFINE_SLOW_OP(del_by_val)
+ DEFINE_SLOW_OP(in)
+ DEFINE_SLOW_OP(less)
+ DEFINE_SLOW_OP(lesseq)
+ DEFINE_SLOW_OP(greater)
+ DEFINE_SLOW_OP(greatereq)
+ DEFINE_SLOW_OP(is_function)
+ DEFINE_SLOW_OP(is_object_or_null)
+ DEFINE_SLOW_OP(typeof)
DEFINE_OP(op_add)
DEFINE_OP(op_bitand)
DEFINE_OP(op_bitor)
DEFINE_OP(op_bitxor)
DEFINE_OP(op_call)
+ DEFINE_OP(op_tail_call)
DEFINE_OP(op_call_eval)
DEFINE_OP(op_call_varargs)
+ DEFINE_OP(op_tail_call_varargs)
+ DEFINE_OP(op_construct_varargs)
DEFINE_OP(op_catch)
DEFINE_OP(op_construct)
- DEFINE_OP(op_get_callee)
DEFINE_OP(op_create_this)
- DEFINE_OP(op_convert_this)
- DEFINE_OP(op_init_lazy_reg)
- DEFINE_OP(op_create_arguments)
+ DEFINE_OP(op_to_this)
+ DEFINE_OP(op_create_direct_arguments)
+ DEFINE_OP(op_create_scoped_arguments)
+ DEFINE_OP(op_create_out_of_band_arguments)
+ DEFINE_OP(op_copy_rest)
+ DEFINE_OP(op_get_rest_length)
+ DEFINE_OP(op_check_tdz)
+ DEFINE_OP(op_assert)
+ DEFINE_OP(op_save)
+ DEFINE_OP(op_resume)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
DEFINE_OP(op_div)
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
- DEFINE_OP(op_create_activation)
+ DEFINE_OP(op_get_scope)
DEFINE_OP(op_eq)
DEFINE_OP(op_eq_null)
- case op_get_by_id_out_of_line:
case op_get_array_length:
DEFINE_OP(op_get_by_id)
- DEFINE_OP(op_get_arguments_length)
DEFINE_OP(op_get_by_val)
- DEFINE_OP(op_get_argument_by_val)
- DEFINE_OP(op_get_by_pname)
- DEFINE_OP(op_get_pnames)
- DEFINE_OP(op_check_has_instance)
+ DEFINE_OP(op_overrides_has_instance)
DEFINE_OP(op_instanceof)
+ DEFINE_OP(op_instanceof_custom)
DEFINE_OP(op_is_undefined)
DEFINE_OP(op_is_boolean)
DEFINE_OP(op_is_number)
DEFINE_OP(op_is_string)
+ DEFINE_OP(op_is_object)
DEFINE_OP(op_jeq_null)
DEFINE_OP(op_jfalse)
DEFINE_OP(op_jmp)
@@ -270,6 +255,7 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jngreatereq)
DEFINE_OP(op_jtrue)
DEFINE_OP(op_loop_hint)
+ DEFINE_OP(op_watchdog)
DEFINE_OP(op_lshift)
DEFINE_OP(op_mod)
DEFINE_OP(op_mov)
@@ -282,54 +268,35 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_new_array_buffer)
DEFINE_OP(op_new_func)
DEFINE_OP(op_new_func_exp)
+ DEFINE_OP(op_new_generator_func)
+ DEFINE_OP(op_new_generator_func_exp)
+ DEFINE_OP(op_new_arrow_func_exp)
DEFINE_OP(op_new_object)
DEFINE_OP(op_new_regexp)
- DEFINE_OP(op_next_pname)
DEFINE_OP(op_not)
DEFINE_OP(op_nstricteq)
- DEFINE_OP(op_pop_scope)
DEFINE_OP(op_dec)
DEFINE_OP(op_inc)
DEFINE_OP(op_profile_did_call)
DEFINE_OP(op_profile_will_call)
- DEFINE_OP(op_push_name_scope)
+ DEFINE_OP(op_profile_type)
+ DEFINE_OP(op_profile_control_flow)
DEFINE_OP(op_push_with_scope)
- case op_put_by_id_out_of_line:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line:
+ DEFINE_OP(op_create_lexical_environment)
+ DEFINE_OP(op_get_parent_scope)
DEFINE_OP(op_put_by_id)
DEFINE_OP(op_put_by_index)
+ case op_put_by_val_direct:
DEFINE_OP(op_put_by_val)
- DEFINE_OP(op_put_getter_setter)
- case op_init_global_const_nop:
- NEXT_OPCODE(op_init_global_const_nop);
- DEFINE_OP(op_init_global_const)
- DEFINE_OP(op_init_global_const_check)
-
- case op_resolve_global_property:
- case op_resolve_global_var:
- case op_resolve_scoped_var:
- case op_resolve_scoped_var_on_top_scope:
- case op_resolve_scoped_var_with_top_scope_check:
- DEFINE_OP(op_resolve)
-
- case op_resolve_base_to_global:
- case op_resolve_base_to_global_dynamic:
- case op_resolve_base_to_scope:
- case op_resolve_base_to_scope_with_top_scope_check:
- DEFINE_OP(op_resolve_base)
-
- case op_put_to_base_variable:
- DEFINE_OP(op_put_to_base)
-
- DEFINE_OP(op_resolve_with_base)
- DEFINE_OP(op_resolve_with_this)
+ DEFINE_OP(op_put_getter_by_id)
+ DEFINE_OP(op_put_setter_by_id)
+ DEFINE_OP(op_put_getter_setter_by_id)
+ DEFINE_OP(op_put_getter_by_val)
+ DEFINE_OP(op_put_setter_by_val)
+
DEFINE_OP(op_ret)
- DEFINE_OP(op_call_put_result)
- DEFINE_OP(op_ret_object_or_this)
DEFINE_OP(op_rshift)
+ DEFINE_OP(op_unsigned)
DEFINE_OP(op_urshift)
DEFINE_OP(op_strcat)
DEFINE_OP(op_stricteq)
@@ -337,39 +304,40 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_switch_char)
DEFINE_OP(op_switch_imm)
DEFINE_OP(op_switch_string)
- DEFINE_OP(op_tear_off_activation)
- DEFINE_OP(op_tear_off_arguments)
DEFINE_OP(op_throw)
DEFINE_OP(op_throw_static_error)
DEFINE_OP(op_to_number)
+ DEFINE_OP(op_to_string)
DEFINE_OP(op_to_primitive)
- DEFINE_OP(op_get_scoped_var)
- DEFINE_OP(op_put_scoped_var)
-
- case op_get_by_id_chain:
- case op_get_by_id_generic:
- case op_get_by_id_proto:
- case op_get_by_id_self:
- case op_get_by_id_getter_chain:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_self:
- case op_get_by_id_custom_chain:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_self:
- case op_get_string_length:
- case op_put_by_id_generic:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
+ DEFINE_OP(op_resolve_scope)
+ DEFINE_OP(op_get_from_scope)
+ DEFINE_OP(op_put_to_scope)
+ DEFINE_OP(op_get_from_arguments)
+ DEFINE_OP(op_put_to_arguments)
+
+ DEFINE_OP(op_get_enumerable_length)
+ DEFINE_OP(op_has_generic_property)
+ DEFINE_OP(op_has_structure_property)
+ DEFINE_OP(op_has_indexed_property)
+ DEFINE_OP(op_get_direct_pname)
+ DEFINE_OP(op_get_property_enumerator)
+ DEFINE_OP(op_enumerator_structure_pname)
+ DEFINE_OP(op_enumerator_generic_pname)
+ DEFINE_OP(op_to_index_string)
+ default:
RELEASE_ASSERT_NOT_REACHED();
}
+
+ if (false)
+ dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
}
- RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = (unsigned)-1;
+ m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
}
@@ -385,12 +353,11 @@ void JIT::privateCompileSlowCases()
{
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
- m_propertyAccessInstructionIndex = 0;
+ m_getByIdIndex = 0;
+ m_putByIdIndex = 0;
m_byValInstructionIndex = 0;
- m_globalResolveInfoIndex = 0;
m_callLinkInfoIndex = 0;
-#if ENABLE(VALUE_PROFILER)
// Use this to assert that slow-path code associates new profiling sites with existing
// ValueProfiles rather than creating new ones. This ensures that for a given instruction
// (say, get_by_id) we get combined statistics for both the fast-path executions of that
@@ -398,24 +365,19 @@ void JIT::privateCompileSlowCases()
// new ValueProfiles then the ValueProfiles would no longer be sorted by bytecode offset,
// which would break the invariant necessary to use CodeBlock::valueProfileForBytecodeOffset().
unsigned numberOfValueProfiles = m_codeBlock->numberOfValueProfiles();
-#endif
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-
m_bytecodeOffset = iter->to;
+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
+
unsigned firstTo = m_bytecodeOffset;
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
-#if ENABLE(VALUE_PROFILER)
RareCaseProfile* rareCaseProfile = 0;
if (shouldEmitProfiling())
rareCaseProfile = m_codeBlock->addRareCaseProfile(m_bytecodeOffset);
-#endif
#if ENABLE(JIT_VERBOSE)
dataLogF("Old JIT emitting slow code for bc#%u at offset 0x%lx.\n", m_bytecodeOffset, (long)debugOffset());
@@ -430,22 +392,22 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_bitor)
DEFINE_SLOWCASE_OP(op_bitxor)
DEFINE_SLOWCASE_OP(op_call)
+ DEFINE_SLOWCASE_OP(op_tail_call)
DEFINE_SLOWCASE_OP(op_call_eval)
DEFINE_SLOWCASE_OP(op_call_varargs)
+ DEFINE_SLOWCASE_OP(op_tail_call_varargs)
+ DEFINE_SLOWCASE_OP(op_construct_varargs)
DEFINE_SLOWCASE_OP(op_construct)
- DEFINE_SLOWCASE_OP(op_convert_this)
+ DEFINE_SLOWCASE_OP(op_to_this)
+ DEFINE_SLOWCASE_OP(op_check_tdz)
DEFINE_SLOWCASE_OP(op_create_this)
DEFINE_SLOWCASE_OP(op_div)
DEFINE_SLOWCASE_OP(op_eq)
- case op_get_by_id_out_of_line:
case op_get_array_length:
DEFINE_SLOWCASE_OP(op_get_by_id)
- DEFINE_SLOWCASE_OP(op_get_arguments_length)
DEFINE_SLOWCASE_OP(op_get_by_val)
- DEFINE_SLOWCASE_OP(op_get_argument_by_val)
- DEFINE_SLOWCASE_OP(op_get_by_pname)
- DEFINE_SLOWCASE_OP(op_check_has_instance)
DEFINE_SLOWCASE_OP(op_instanceof)
+ DEFINE_SLOWCASE_OP(op_instanceof_custom)
DEFINE_SLOWCASE_OP(op_jfalse)
DEFINE_SLOWCASE_OP(op_jless)
DEFINE_SLOWCASE_OP(op_jlesseq)
@@ -457,6 +419,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_jngreatereq)
DEFINE_SLOWCASE_OP(op_jtrue)
DEFINE_SLOWCASE_OP(op_loop_hint)
+ DEFINE_SLOWCASE_OP(op_watchdog)
DEFINE_SLOWCASE_OP(op_lshift)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
@@ -467,122 +430,63 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_nstricteq)
DEFINE_SLOWCASE_OP(op_dec)
DEFINE_SLOWCASE_OP(op_inc)
- case op_put_by_id_out_of_line:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line:
DEFINE_SLOWCASE_OP(op_put_by_id)
+ case op_put_by_val_direct:
DEFINE_SLOWCASE_OP(op_put_by_val)
- DEFINE_SLOWCASE_OP(op_init_global_const_check);
DEFINE_SLOWCASE_OP(op_rshift)
+ DEFINE_SLOWCASE_OP(op_unsigned)
DEFINE_SLOWCASE_OP(op_urshift)
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
DEFINE_SLOWCASE_OP(op_to_number)
+ DEFINE_SLOWCASE_OP(op_to_string)
DEFINE_SLOWCASE_OP(op_to_primitive)
+ DEFINE_SLOWCASE_OP(op_has_indexed_property)
+ DEFINE_SLOWCASE_OP(op_has_structure_property)
+ DEFINE_SLOWCASE_OP(op_get_direct_pname)
- case op_resolve_global_property:
- case op_resolve_global_var:
- case op_resolve_scoped_var:
- case op_resolve_scoped_var_on_top_scope:
- case op_resolve_scoped_var_with_top_scope_check:
- DEFINE_SLOWCASE_OP(op_resolve)
-
- case op_resolve_base_to_global:
- case op_resolve_base_to_global_dynamic:
- case op_resolve_base_to_scope:
- case op_resolve_base_to_scope_with_top_scope_check:
- DEFINE_SLOWCASE_OP(op_resolve_base)
- DEFINE_SLOWCASE_OP(op_resolve_with_base)
- DEFINE_SLOWCASE_OP(op_resolve_with_this)
-
- case op_put_to_base_variable:
- DEFINE_SLOWCASE_OP(op_put_to_base)
+ DEFINE_SLOWCASE_OP(op_resolve_scope)
+ DEFINE_SLOWCASE_OP(op_get_from_scope)
+ DEFINE_SLOWCASE_OP(op_put_to_scope)
default:
RELEASE_ASSERT_NOT_REACHED();
}
+ if (false)
+ dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
+
RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
-#if ENABLE(VALUE_PROFILER)
if (shouldEmitProfiling())
add32(TrustedImm32(1), AbsoluteAddress(&rareCaseProfile->m_counter));
-#endif
emitJumpSlowToHot(jump(), 0);
}
- RELEASE_ASSERT(m_propertyAccessInstructionIndex == m_propertyAccessCompilationInfo.size());
- RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
-#if ENABLE(VALUE_PROFILER)
+ RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
+ RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
-#endif
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = (unsigned)-1;
-#endif
-}
-
-ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo& info, LinkBuffer &linkBuffer)
-{
- ASSERT(bytecodeIndex != std::numeric_limits<unsigned>::max());
- info.bytecodeIndex = bytecodeIndex;
- info.callReturnLocation = linkBuffer.locationOf(callReturnLocation);
- info.hotPathBegin = linkBuffer.locationOf(hotPathBegin);
-
- switch (m_type) {
- case GetById: {
- CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
- info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare));
- info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck));
- info.patch.baseline.u.get.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
-#if USE(JSVALUE64)
- info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel));
-#else
- info.patch.baseline.u.get.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel1));
- info.patch.baseline.u.get.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel2));
+ m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
- info.patch.baseline.u.get.putResult = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getPutResult));
- info.patch.baseline.u.get.coldPathBegin = MacroAssembler::differenceBetweenCodePtr(linkBuffer.locationOf(getColdPathBegin), linkBuffer.locationOf(callReturnLocation));
- break;
- }
- case PutById:
- CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin);
- info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare));
- info.patch.baseline.u.put.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad));
-#if USE(JSVALUE64)
- info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel));
-#else
- info.patch.baseline.u.put.displacementLabel1 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel1));
- info.patch.baseline.u.put.displacementLabel2 = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel2));
-#endif
- break;
- }
}
-JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort effort)
+CompilationResult JIT::privateCompile(JITCompilationEffort effort)
{
-#if ENABLE(JIT_VERBOSE_OSR)
- printf("Compiling JIT code!\n");
-#endif
-
-#if ENABLE(VALUE_PROFILER)
- DFG::CapabilityLevel level = m_codeBlock->canCompileWithDFG();
+ DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
switch (level) {
case DFG::CannotCompile:
m_canBeOptimized = false;
+ m_canBeOptimizedOrInlined = false;
m_shouldEmitProfiling = false;
break;
- case DFG::MayInline:
- m_canBeOptimized = false;
- m_canBeOptimizedOrInlined = true;
- m_shouldEmitProfiling = true;
- break;
case DFG::CanCompile:
+ case DFG::CanCompileAndInline:
m_canBeOptimized = true;
m_canBeOptimizedOrInlined = true;
m_shouldEmitProfiling = true;
@@ -591,15 +495,38 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
RELEASE_ASSERT_NOT_REACHED();
break;
}
-#endif
- if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
- m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
+ switch (m_codeBlock->codeType()) {
+ case GlobalCode:
+ case ModuleCode:
+ case EvalCode:
+ m_codeBlock->m_shouldAlwaysBeInlined = false;
+ break;
+ case FunctionCode:
+ // We could have already set it to false because we detected an uninlineable call.
+ // Don't override that observation.
+ m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
+ break;
+ }
+
+ m_codeBlock->setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); // Might be able to remove as this is probably already set to this value.
+
+ // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
+ if (m_vm->typeProfiler())
+ m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
+
+ if (Options::dumpDisassembly() || m_vm->m_perBytecodeProfiler)
+ m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
if (m_vm->m_perBytecodeProfiler) {
- m_compilation = m_vm->m_perBytecodeProfiler->newCompilation(m_codeBlock, Profiler::Baseline);
+ m_compilation = adoptRef(
+ new Profiler::Compilation(
+ m_vm->m_perBytecodeProfiler->ensureBytecodesFor(m_codeBlock),
+ Profiler::Baseline));
m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
}
+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
+
if (m_disassembler)
m_disassembler->setStartOfCode(label());
@@ -607,9 +534,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
if (m_randomGenerator.getUint32() & 1)
nop();
- preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
- emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
Label beginLabel(this);
@@ -618,17 +544,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
sampleInstruction(m_codeBlock->instructions().begin());
#endif
- Jump stackCheck;
if (m_codeBlock->codeType() == FunctionCode) {
-#if ENABLE(DFG_JIT)
-#if DFG_ENABLE(SUCCESS_STATS)
- static SamplingCounter counter("orignalJIT");
- emitCount(counter);
-#endif
-#endif
-
-#if ENABLE(VALUE_PROFILER)
- ASSERT(m_bytecodeOffset == (unsigned)-1);
+ ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
if (shouldEmitProfiling()) {
for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
// If this is a constructor, then we want to put in a dummy profiling site (to
@@ -645,43 +562,54 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
}
}
-#endif
-
- addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1);
- stackCheck = branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1);
}
- Label functionBody = label();
-
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
+
+ move(regT1, stackPointerRegister);
+ checkStackPointerAlignment();
+
+ emitSaveCalleeSaves();
+ emitMaterializeTagCheckRegisters();
+
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
if (m_disassembler)
m_disassembler->setEndOfSlowPath(label());
+ m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
+
+ stackOverflow.link(this);
+ m_bytecodeOffset = 0;
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
Label arityCheck;
if (m_codeBlock->codeType() == FunctionCode) {
- stackCheck.link(this);
- m_bytecodeOffset = 0;
- JITStubCall(this, cti_stack_check).call();
-#ifndef NDEBUG
- m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
-#endif
- jump(functionBody);
-
arityCheck = label();
- preserveReturnAddressAfterCall(regT2);
- emitPutToCallFrameHeader(regT2, JSStack::ReturnPC);
- emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
load32(payloadFor(JSStack::ArgumentCount), regT1);
branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
m_bytecodeOffset = 0;
- JITStubCall(this, m_codeBlock->m_isConstructor ? cti_op_construct_arityCheck : cti_op_call_arityCheck).call(callFrameRegister);
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
+ branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
+ move(returnValueGPR, GPRInfo::argumentGPR0);
+ emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
+
#if !ASSERT_DISABLED
- m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
#endif
jump(beginLabel);
@@ -689,14 +617,17 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
ASSERT(m_jmpTable.isEmpty());
+ privateCompileExceptionHandlers();
+
if (m_disassembler)
m_disassembler->setEndOfCode(label());
+ m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
+
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
if (patchBuffer.didFailToAllocate())
- return JITCode();
+ return CompilationFailed;
- ASSERT(m_labels.size() >= m_codeBlock->instructionCount());
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
for (unsigned i = 0; i < m_switches.size(); ++i) {
SwitchRecord record = m_switches[i];
@@ -735,110 +666,132 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo
patchBuffer.link(iter->from, FunctionPtr(iter->to));
}
- m_codeBlock->callReturnIndexVector().reserveCapacity(m_calls.size());
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter)
- m_codeBlock->callReturnIndexVector().append(CallReturnOffsetToBytecodeOffset(patchBuffer.returnAddressOffset(iter->from), iter->bytecodeOffset));
-
- m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size());
- for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i)
- m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer);
- m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
- for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
- CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
- CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
- CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
- CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
-
- m_codeBlock->byValInfo(i) = ByValInfo(
- m_byValCompilationInfo[i].bytecodeIndex,
+ for (unsigned i = m_getByIds.size(); i--;)
+ m_getByIds[i].finalize(patchBuffer);
+ for (unsigned i = m_putByIds.size(); i--;)
+ m_putByIds[i].finalize(patchBuffer);
+
+ for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
+ PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
+ CodeLocationJump notIndexJump = CodeLocationJump();
+ if (Jump(patchableNotIndexJump).isSet())
+ notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
+ CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
+ CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
+ CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
+ CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
+ CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
+
+ *byValCompilationInfo.byValInfo = ByValInfo(
+ byValCompilationInfo.bytecodeIndex,
+ notIndexJump,
badTypeJump,
- m_byValCompilationInfo[i].arrayMode,
+ byValCompilationInfo.arrayMode,
+ byValCompilationInfo.arrayProfile,
differenceBetweenCodePtr(badTypeJump, doneTarget),
+ differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
differenceBetweenCodePtr(returnAddress, slowPathTarget));
}
- m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
- for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
- CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.callType = m_callStructureStubCompilationInfo[i].callType;
- info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
- info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
- info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
- info.calleeGPR = regT0;
+ for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
+ CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
+ CallLinkInfo& info = *compilationInfo.callLinkInfo;
+ info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation),
+ patchBuffer.locationOf(compilationInfo.hotPathBegin),
+ patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
}
-#if ENABLE(DFG_JIT) || ENABLE(LLINT)
- if (canBeOptimizedOrInlined()
-#if ENABLE(LLINT)
- || true
-#endif
- ) {
- CompactJITCodeMap::Encoder jitCodeMapEncoder;
- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
- if (m_labels[bytecodeOffset].isSet())
- jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
- }
- m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
+ CompactJITCodeMap::Encoder jitCodeMapEncoder;
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
+ if (m_labels[bytecodeOffset].isSet())
+ jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
}
-#endif
+ m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
- if (m_codeBlock->codeType() == FunctionCode && functionEntryArityCheck)
- *functionEntryArityCheck = patchBuffer.locationOf(arityCheck);
+ MacroAssemblerCodePtr withArityCheck;
+ if (m_codeBlock->codeType() == FunctionCode)
+ withArityCheck = patchBuffer.locationOf(arityCheck);
- if (Options::showDisassembly())
+ if (Options::dumpDisassembly()) {
m_disassembler->dump(patchBuffer);
- if (m_compilation)
+ patchBuffer.didAlreadyDisassemble();
+ }
+ if (m_compilation) {
m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
+ m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
+ }
+
+ if (m_pcToCodeOriginMapBuilder.didBuildMapping())
+ m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
- CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
+ CodeRef result = FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
static_cast<double>(result.size()) /
static_cast<double>(m_codeBlock->instructions().size()));
-
+
m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
+ m_codeBlock->setJITCode(
+ adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
#if ENABLE(JIT_VERBOSE)
dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
#endif
- return JITCode(result, JITCode::BaselineJIT);
+ return CompilationSuccessful;
}
-void JIT::linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
+void JIT::privateCompileExceptionHandlers()
{
- RepatchBuffer repatchBuffer(callerCodeBlock);
-
- ASSERT(!callLinkInfo->isLinked());
- callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
- callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
- repatchBuffer.relink(callLinkInfo->hotPathOther, code);
-
- if (calleeCodeBlock)
- calleeCodeBlock->linkIncomingCall(callLinkInfo);
-
- // Patch the slow patch so we do not continue to try to link.
- if (kind == CodeForCall) {
- ASSERT(callLinkInfo->callType == CallLinkInfo::Call
- || callLinkInfo->callType == CallLinkInfo::CallVarargs);
- if (callLinkInfo->callType == CallLinkInfo::Call) {
- repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallGenerator).code());
- return;
- }
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
- repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallGenerator).code());
- return;
+ copyCalleeSavesToVMCalleeSavesBuffer();
+
+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
+
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
+ jumpToExceptionHandler();
}
- ASSERT(kind == CodeForConstruct);
- repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructGenerator).code());
+ if (!m_exceptionChecks.empty()) {
+ m_exceptionChecks.link(this);
+
+ copyCalleeSavesToVMCalleeSavesBuffer();
+
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
+ jumpToExceptionHandler();
+ }
}
-void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
+unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
{
- RepatchBuffer repatchBuffer(callerCodeBlock);
+ ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
+
+ return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
+}
- repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallGenerator).code());
+int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
+{
+ return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index df8a19fd8..e81824268 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,14 +28,9 @@
#if ENABLE(JIT)
-// Verbose logging of code generation
-#define ENABLE_JIT_VERBOSE 0
-// Verbose logging for OSR-related code.
-#define ENABLE_JIT_VERBOSE_OSR 0
-
// We've run into some problems where changing the size of the class JIT leads to
// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
-#if COMPILER(GCC)
+#if COMPILER(GCC_OR_CLANG)
#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
#else
#define JIT_CLASS_ALIGNMENT
@@ -47,33 +42,34 @@
#include "CompactJITCodeMap.h"
#include "Interpreter.h"
#include "JITDisassembler.h"
+#include "JITInlineCacheGenerator.h"
#include "JSInterfaceJIT.h"
-#include "LegacyProfiler.h"
#include "Opcode.h"
+#include "PCToCodeOriginMap.h"
#include "ResultType.h"
+#include "SamplingTool.h"
#include "UnusedPointer.h"
-#include <bytecode/SamplingTool.h>
namespace JSC {
+ class ArrayAllocationProfile;
+ class CallLinkInfo;
class CodeBlock;
class FunctionExecutable;
class JIT;
- class JSPropertyNameIterator;
+ class Identifier;
class Interpreter;
class JSScope;
class JSStack;
class MarkedAllocator;
class Register;
class StructureChain;
+ class StructureStubInfo;
- struct CallLinkInfo;
struct Instruction;
struct OperandTypes;
- struct PolymorphicAccessStructureList;
struct SimpleJumpTable;
struct StringJumpTable;
- struct StructureStubInfo;
struct CallRecord {
MacroAssembler::Call from;
@@ -150,146 +146,45 @@ namespace JSC {
}
};
- enum PropertyStubGetById_T { PropertyStubGetById };
- enum PropertyStubPutById_T { PropertyStubPutById };
-
- struct PropertyStubCompilationInfo {
- enum Type { GetById, PutById } m_type;
-
- unsigned bytecodeIndex;
- MacroAssembler::Call callReturnLocation;
- MacroAssembler::Label hotPathBegin;
- MacroAssembler::DataLabelPtr getStructureToCompare;
- MacroAssembler::PatchableJump getStructureCheck;
- MacroAssembler::ConvertibleLoadLabel propertyStorageLoad;
-#if USE(JSVALUE64)
- MacroAssembler::DataLabelCompact getDisplacementLabel;
-#else
- MacroAssembler::DataLabelCompact getDisplacementLabel1;
- MacroAssembler::DataLabelCompact getDisplacementLabel2;
-#endif
- MacroAssembler::Label getPutResult;
- MacroAssembler::Label getColdPathBegin;
- MacroAssembler::DataLabelPtr putStructureToCompare;
-#if USE(JSVALUE64)
- MacroAssembler::DataLabel32 putDisplacementLabel;
-#else
- MacroAssembler::DataLabel32 putDisplacementLabel1;
- MacroAssembler::DataLabel32 putDisplacementLabel2;
-#endif
-
-#if !ASSERT_DISABLED
- PropertyStubCompilationInfo()
- : bytecodeIndex(std::numeric_limits<unsigned>::max())
- {
- }
-#endif
-
-
- PropertyStubCompilationInfo(
- PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
- MacroAssembler::DataLabelPtr structureToCompare,
- MacroAssembler::PatchableJump structureCheck,
- MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
-#if USE(JSVALUE64)
- MacroAssembler::DataLabelCompact displacementLabel,
-#else
- MacroAssembler::DataLabelCompact displacementLabel1,
- MacroAssembler::DataLabelCompact displacementLabel2,
-#endif
- MacroAssembler::Label putResult)
- : m_type(GetById)
- , bytecodeIndex(bytecodeIndex)
- , hotPathBegin(hotPathBegin)
- , getStructureToCompare(structureToCompare)
- , getStructureCheck(structureCheck)
- , propertyStorageLoad(propertyStorageLoad)
-#if USE(JSVALUE64)
- , getDisplacementLabel(displacementLabel)
-#else
- , getDisplacementLabel1(displacementLabel1)
- , getDisplacementLabel2(displacementLabel2)
-#endif
- , getPutResult(putResult)
- {
- }
-
- PropertyStubCompilationInfo(
- PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin,
- MacroAssembler::DataLabelPtr structureToCompare,
- MacroAssembler::ConvertibleLoadLabel propertyStorageLoad,
-#if USE(JSVALUE64)
- MacroAssembler::DataLabel32 displacementLabel
-#else
- MacroAssembler::DataLabel32 displacementLabel1,
- MacroAssembler::DataLabel32 displacementLabel2
-#endif
- )
- : m_type(PutById)
- , bytecodeIndex(bytecodeIndex)
- , hotPathBegin(hotPathBegin)
- , propertyStorageLoad(propertyStorageLoad)
- , putStructureToCompare(structureToCompare)
-#if USE(JSVALUE64)
- , putDisplacementLabel(displacementLabel)
-#else
- , putDisplacementLabel1(displacementLabel1)
- , putDisplacementLabel2(displacementLabel2)
-#endif
- {
- }
-
- void slowCaseInfo(PropertyStubGetById_T, MacroAssembler::Label coldPathBegin, MacroAssembler::Call call)
- {
- ASSERT(m_type == GetById);
- callReturnLocation = call;
- getColdPathBegin = coldPathBegin;
- }
-
- void slowCaseInfo(PropertyStubPutById_T, MacroAssembler::Call call)
- {
- ASSERT(m_type == PutById);
- callReturnLocation = call;
- }
-
- void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer);
- };
-
struct ByValCompilationInfo {
ByValCompilationInfo() { }
- ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget)
- : bytecodeIndex(bytecodeIndex)
+ ByValCompilationInfo(ByValInfo* byValInfo, unsigned bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget)
+ : byValInfo(byValInfo)
+ , bytecodeIndex(bytecodeIndex)
+ , notIndexJump(notIndexJump)
, badTypeJump(badTypeJump)
, arrayMode(arrayMode)
+ , arrayProfile(arrayProfile)
, doneTarget(doneTarget)
+ , nextHotPathTarget(nextHotPathTarget)
{
}
-
+
+ ByValInfo* byValInfo;
unsigned bytecodeIndex;
+ MacroAssembler::PatchableJump notIndexJump;
MacroAssembler::PatchableJump badTypeJump;
JITArrayMode arrayMode;
+ ArrayProfile* arrayProfile;
MacroAssembler::Label doneTarget;
+ MacroAssembler::Label nextHotPathTarget;
MacroAssembler::Label slowPathTarget;
MacroAssembler::Call returnAddress;
};
- struct StructureStubCompilationInfo {
+ struct CallCompilationInfo {
MacroAssembler::DataLabelPtr hotPathBegin;
MacroAssembler::Call hotPathOther;
MacroAssembler::Call callReturnLocation;
- CallLinkInfo::CallType callType;
- unsigned bytecodeIndex;
+ CallLinkInfo* callLinkInfo;
};
- // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
- void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(ReturnAddressPtr, FunctionPtr newCalleeFunction);
class JIT : private JSInterfaceJIT {
+ friend class JITSlowPathCall;
friend class JITStubCall;
- friend struct PropertyStubCompilationInfo;
using MacroAssembler::Jump;
using MacroAssembler::JumpList;
@@ -302,104 +197,64 @@ namespace JSC {
static const int patchPutByIdDefaultOffset = 256;
public:
- static JITCode compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, CodePtr* functionEntryArityCheck = 0)
+ static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort)
{
- return JIT(vm, codeBlock).privateCompile(functionEntryArityCheck, effort);
+ return JIT(vm, codeBlock).privateCompile(effort);
}
- static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
- {
- JIT jit(vm, callerCodeBlock);
- jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex;
- jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr);
- }
-
- static void compileGetByIdProto(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
+ static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
- jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
}
- static void compileGetByIdSelfList(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
- {
- JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
- jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset);
- }
- static void compileGetByIdProtoList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
+ static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
{
JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
- jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame);
- }
- static void compileGetByIdChainList(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
- {
- JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
- jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName);
}
- static void compileGetByIdChain(VM* vm, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
+ static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
- jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
}
- static void compilePutByIdTransition(VM* vm, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+ static void compileDirectPutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = stubInfo->bytecodeIndex;
- jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
}
-
- static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+
+ static void compilePutByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
{
JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
+ jit.privateCompilePutByValWithCachedId(byValInfo, returnAddress, putKind, propertyName);
}
- static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compileHasIndexedProperty(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
+ jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode);
}
static CodeRef compileCTINativeCall(VM* vm, NativeFunction func)
{
if (!vm->canUseJIT()) {
-#if ENABLE(LLINT)
return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
-#else
- return CodeRef();
-#endif
}
JIT jit(vm, 0);
return jit.privateCompileCTINativeCall(vm, func);
}
- static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*);
- static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*);
- static void patchGetByIdSelf(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr);
- static void patchPutByIdReplace(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr, bool direct);
-
- static void compilePatchGetArrayLength(VM* vm, CodeBlock* codeBlock, ReturnAddressPtr returnAddress)
- {
- JIT jit(vm, codeBlock);
-#if ENABLE(DFG_JIT)
- // Force profiling to be enabled during stub generation.
- jit.m_canBeOptimized = true;
- jit.m_canBeOptimizedOrInlined = true;
- jit.m_shouldEmitProfiling = true;
-#endif // ENABLE(DFG_JIT)
- return jit.privateCompilePatchGetArrayLength(returnAddress);
- }
-
- static void linkFor(JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind);
- static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*);
+ static unsigned frameRegisterCountFor(CodeBlock*);
+ static int stackPointerOffsetFor(CodeBlock*);
private:
JIT(VM*, CodeBlock* = 0);
@@ -407,25 +262,52 @@ namespace JSC {
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
- JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort);
-
- void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr);
-
- void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
- void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset);
- void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
- void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*);
- void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*);
- void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, PropertyOffset cachedOffset, StructureChain*, ReturnAddressPtr, bool direct);
+ CompilationResult privateCompile(JITCompilationEffort);
void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&);
void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&);
+
+ void privateCompileHasIndexedProperty(ByValInfo*, ReturnAddressPtr, JITArrayMode);
Label privateCompileCTINativeCall(VM*, bool isConstruct = false);
CodeRef privateCompileCTINativeCall(VM*, NativeFunction);
void privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress);
- static bool isDirectPutById(StructureStubInfo*);
+ // Add a call out from JIT code, without an exception check.
+ Call appendCall(const FunctionPtr& function)
+ {
+ Call functionCall = call();
+ m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value()));
+ return functionCall;
+ }
+
+#if OS(WINDOWS) && CPU(X86_64)
+ Call appendCallWithSlowPathReturnType(const FunctionPtr& function)
+ {
+ Call functionCall = callWithSlowPathReturnType();
+ m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value()));
+ return functionCall;
+ }
+#endif
+
+ void exceptionCheck(Jump jumpToHandler)
+ {
+ m_exceptionChecks.append(jumpToHandler);
+ }
+
+ void exceptionCheck()
+ {
+ m_exceptionChecks.append(emitExceptionCheck());
+ }
+
+ void exceptionCheckWithCallFrameRollback()
+ {
+ m_exceptionChecksWithCallFrameRollback.append(emitExceptionCheck());
+ }
+
+ void privateCompileExceptionHandlers();
void addSlowCase(Jump);
void addSlowCase(JumpList);
@@ -435,43 +317,37 @@ namespace JSC {
void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex);
void compileOpCallSlowCase(OpcodeID, Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex);
- void compileLoadVarargs(Instruction*);
- void compileCallEval();
- void compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator&);
+ void compileSetupVarargsFrame(Instruction*, CallLinkInfo*);
+ void compileCallEval(Instruction*);
+ void compileCallEvalSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitPutCallResult(Instruction*);
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
- bool isOperandConstantImmediateDouble(unsigned src);
+ bool isOperandConstantDouble(int src);
void emitLoadDouble(int index, FPRegisterID value);
void emitLoadInt32ToDouble(int index, FPRegisterID value);
- Jump emitJumpIfNotObject(RegisterID structureReg);
-
- Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch);
- void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch);
- void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*);
+ Jump emitJumpIfCellObject(RegisterID cellReg);
+ Jump emitJumpIfCellNotObject(RegisterID cellReg);
- enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterImmediates };
+ enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue };
// value register in write barrier is used before any scratch registers
// so may safely be the same as either of the scratch registers.
- void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind);
- void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
+ void emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode);
+ void emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode);
+ void emitWriteBarrier(JSCell* owner);
template<typename StructureType> // StructureType can be RegisterID or ImmPtr.
void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch);
-#if ENABLE(VALUE_PROFILER)
// This assumes that the value to profile is in regT0 and that regT3 is available for
// scratch.
void emitValueProfilingSite(ValueProfile*);
void emitValueProfilingSite(unsigned bytecodeOffset);
void emitValueProfilingSite();
-#else
- void emitValueProfilingSite(unsigned) { }
- void emitValueProfilingSite() { }
-#endif
- void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
- void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
+ void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*);
+ void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex);
void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*);
@@ -481,14 +357,22 @@ namespace JSC {
// Property is int-checked and zero extended. Base is cell checked.
// Structure is already profiled. Returns the slow cases. Fall-through
// case contains result in regT0, and it is not yet profiled.
+ JumpList emitInt32Load(Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); }
+ JumpList emitDoubleLoad(Instruction*, PatchableJump& badType);
+ JumpList emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
+ JumpList emitArrayStorageLoad(Instruction*, PatchableJump& badType);
+ JumpList emitLoadForArrayMode(Instruction*, JITArrayMode, PatchableJump& badType);
+
JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); }
JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType);
JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType);
- JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness);
- JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
+ JumpList emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType);
+ JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType);
- // Property is in regT0, base is in regT0. regT2 contains indecing type.
+ // Property is in regT1, base is in regT0. regT2 contains indecing type.
// The value to store is not yet loaded. Property is int-checked and
// zero-extended. Base is cell checked. Structure is already profiled.
// returns the slow cases.
@@ -506,13 +390,25 @@ namespace JSC {
}
JumpList emitGenericContiguousPutByVal(Instruction*, PatchableJump& badType, IndexingType indexingShape = ContiguousShape);
JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType);
- JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness, TypedArrayRounding);
- JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize);
-
+ JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType);
+ JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType);
+
+ // Identifier check helper for GetByVal and PutByVal.
+ void emitIdentifierCheck(RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases);
+
+ JITGetByIdGenerator emitGetByValWithCachedId(Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases);
+ JITPutByIdGenerator emitPutByValWithCachedId(Instruction*, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases);
+
enum FinalObjectMode { MayBeFinal, KnownNotFinal };
+ void emitGetVirtualRegister(int src, JSValueRegs dst);
+ void emitPutVirtualRegister(int dst, JSValueRegs src);
+
+ int32_t getOperandConstantInt(int src);
+ double getOperandConstantDouble(int src);
+
#if USE(JSVALUE32_64)
- bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant);
+ bool getOperandConstantInt(int op1, int op2, int& op, int32_t& constant);
void emitLoadTag(int index, RegisterID tag);
void emitLoadPayload(int index, RegisterID payload);
@@ -525,156 +421,103 @@ namespace JSC {
void emitStore(int index, const JSValue constant, RegisterID base = callFrameRegister);
void emitStoreInt32(int index, RegisterID payload, bool indexIsInt32 = false);
void emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32 = false);
- void emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength);
void emitStoreCell(int index, RegisterID payload, bool indexIsCell = false);
void emitStoreBool(int index, RegisterID payload, bool indexIsBool = false);
void emitStoreDouble(int index, FPRegisterID value);
- bool isLabeled(unsigned bytecodeOffset);
- void map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload);
- void unmap(RegisterID);
- void unmap();
- bool isMapped(int virtualRegisterIndex);
- bool getMappedPayload(int virtualRegisterIndex, RegisterID& payload);
- bool getMappedTag(int virtualRegisterIndex, RegisterID& tag);
-
void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex);
void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag);
- void compileGetByIdHotPath(Identifier*);
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector<SlowCaseEntry>::iterator&);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal);
- void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset);
+ void compileGetByIdHotPath(const Identifier*);
// Arithmetic opcode helpers
- void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
- void emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType);
- void emitBinaryDoubleOp(OpcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
-
-#if CPU(ARM_TRADITIONAL)
- // sequenceOpCall
- static const int sequenceOpCallInstructionSpace = 12;
- static const int sequenceOpCallConstantSpace = 2;
- // sequenceGetByIdHotPath
- static const int sequenceGetByIdHotPathInstructionSpace = 36;
- static const int sequenceGetByIdHotPathConstantSpace = 4;
- // sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 80;
- static const int sequenceGetByIdSlowCaseConstantSpace = 4;
- // sequencePutById
- static const int sequencePutByIdInstructionSpace = 36;
- static const int sequencePutByIdConstantSpace = 4;
-#elif CPU(SH4)
- // sequenceOpCall
- static const int sequenceOpCallInstructionSpace = 12;
- static const int sequenceOpCallConstantSpace = 2;
- // sequenceGetByIdHotPath
- static const int sequenceGetByIdHotPathInstructionSpace = 36;
- static const int sequenceGetByIdHotPathConstantSpace = 5;
- // sequenceGetByIdSlowCase
- static const int sequenceGetByIdSlowCaseInstructionSpace = 38;
- static const int sequenceGetByIdSlowCaseConstantSpace = 4;
- // sequencePutById
- static const int sequencePutByIdInstructionSpace = 36;
- static const int sequencePutByIdConstantSpace = 5;
-#endif
+ void emitSub32Constant(int dst, int op, int32_t constant, ResultType opType);
+ void emitBinaryDoubleOp(OpcodeID, int dst, int op1, int op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
#else // USE(JSVALUE32_64)
- /* This function is deprecated. */
- void emitGetJITStubArg(unsigned argumentNumber, RegisterID dst);
-
void emitGetVirtualRegister(int src, RegisterID dst);
+ void emitGetVirtualRegister(VirtualRegister src, RegisterID dst);
void emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2);
- void emitPutVirtualRegister(unsigned dst, RegisterID from = regT0);
- void emitStoreCell(unsigned dst, RegisterID payload, bool /* only used in JSValue32_64 */ = false)
+ void emitGetVirtualRegisters(VirtualRegister src1, RegisterID dst1, VirtualRegister src2, RegisterID dst2);
+ void emitPutVirtualRegister(int dst, RegisterID from = regT0);
+ void emitPutVirtualRegister(VirtualRegister dst, RegisterID from = regT0);
+ void emitStoreCell(int dst, RegisterID payload, bool /* only used in JSValue32_64 */ = false)
+ {
+ emitPutVirtualRegister(dst, payload);
+ }
+ void emitStoreCell(VirtualRegister dst, RegisterID payload)
{
emitPutVirtualRegister(dst, payload);
}
-
- int32_t getConstantOperandImmediateInt(unsigned src);
-
- void killLastResultRegister();
Jump emitJumpIfJSCell(RegisterID);
Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
void emitJumpSlowCaseIfJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
- Jump emitJumpIfImmediateInteger(RegisterID);
- Jump emitJumpIfNotImmediateInteger(RegisterID);
- Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
- void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
- void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
-
- void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
-
- void emitTagAsBoolImmediate(RegisterID reg);
- void compileBinaryArithOp(OpcodeID, unsigned dst, unsigned src1, unsigned src2, OperandTypes opi);
- void compileBinaryArithOpSlowCase(OpcodeID, Vector<SlowCaseEntry>::iterator&, unsigned dst, unsigned src1, unsigned src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
-
- void compileGetByIdHotPath(int baseVReg, Identifier*);
- void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier*, Vector<SlowCaseEntry>::iterator&);
- void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal);
- void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset);
+ Jump emitJumpIfInt(RegisterID);
+ Jump emitJumpIfNotInt(RegisterID);
+ Jump emitJumpIfNotInt(RegisterID, RegisterID, RegisterID scratch);
+ PatchableJump emitPatchableJumpIfNotInt(RegisterID);
+ void emitJumpSlowCaseIfNotInt(RegisterID);
+ void emitJumpSlowCaseIfNotNumber(RegisterID);
+ void emitJumpSlowCaseIfNotInt(RegisterID, RegisterID, RegisterID scratch);
-#endif // USE(JSVALUE32_64)
-
-#if (defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL)
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name) do { beginUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace); } while (false)
-#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst) do { endUninterruptedSequence(name ## InstructionSpace, name ## ConstantSpace, dst); } while (false)
-#define END_UNINTERRUPTED_SEQUENCE(name) END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, 0)
+ void emitTagBool(RegisterID);
- void beginUninterruptedSequence(int, int);
- void endUninterruptedSequence(int, int, int);
+ void compileGetByIdHotPath(int baseVReg, const Identifier*);
-#else
-#define BEGIN_UNINTERRUPTED_SEQUENCE(name)
-#define END_UNINTERRUPTED_SEQUENCE(name)
-#define END_UNINTERRUPTED_SEQUENCE_FOR_PUT(name, dst)
-#endif
+#endif // USE(JSVALUE32_64)
- void emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition);
- void emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator&);
+ void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition);
+ void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&);
+
+ void assertStackPointerOffset();
void emit_op_add(Instruction*);
void emit_op_bitand(Instruction*);
void emit_op_bitor(Instruction*);
void emit_op_bitxor(Instruction*);
void emit_op_call(Instruction*);
+ void emit_op_tail_call(Instruction*);
void emit_op_call_eval(Instruction*);
void emit_op_call_varargs(Instruction*);
- void emit_op_call_put_result(Instruction*);
+ void emit_op_tail_call_varargs(Instruction*);
+ void emit_op_construct_varargs(Instruction*);
void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
- void emit_op_get_callee(Instruction*);
void emit_op_create_this(Instruction*);
- void emit_op_convert_this(Instruction*);
- void emit_op_create_arguments(Instruction*);
+ void emit_op_to_this(Instruction*);
+ void emit_op_create_direct_arguments(Instruction*);
+ void emit_op_create_scoped_arguments(Instruction*);
+ void emit_op_create_out_of_band_arguments(Instruction*);
+ void emit_op_copy_rest(Instruction*);
+ void emit_op_get_rest_length(Instruction*);
+ void emit_op_check_tdz(Instruction*);
+ void emit_op_assert(Instruction*);
+ void emit_op_save(Instruction*);
+ void emit_op_resume(Instruction*);
void emit_op_debug(Instruction*);
void emit_op_del_by_id(Instruction*);
void emit_op_div(Instruction*);
void emit_op_end(Instruction*);
void emit_op_enter(Instruction*);
- void emit_op_create_activation(Instruction*);
+ void emit_op_get_scope(Instruction*);
void emit_op_eq(Instruction*);
void emit_op_eq_null(Instruction*);
void emit_op_get_by_id(Instruction*);
void emit_op_get_arguments_length(Instruction*);
void emit_op_get_by_val(Instruction*);
void emit_op_get_argument_by_val(Instruction*);
- void emit_op_get_by_pname(Instruction*);
void emit_op_init_lazy_reg(Instruction*);
- void emit_op_check_has_instance(Instruction*);
+ void emit_op_overrides_has_instance(Instruction*);
void emit_op_instanceof(Instruction*);
+ void emit_op_instanceof_custom(Instruction*);
void emit_op_is_undefined(Instruction*);
void emit_op_is_boolean(Instruction*);
void emit_op_is_number(Instruction*);
void emit_op_is_string(Instruction*);
+ void emit_op_is_object(Instruction*);
void emit_op_jeq_null(Instruction*);
void emit_op_jfalse(Instruction*);
void emit_op_jmp(Instruction*);
@@ -690,6 +533,7 @@ namespace JSC {
void emit_op_jngreatereq(Instruction*);
void emit_op_jtrue(Instruction*);
void emit_op_loop_hint(Instruction*);
+ void emit_op_watchdog(Instruction*);
void emit_op_lshift(Instruction*);
void emit_op_mod(Instruction*);
void emit_op_mov(Instruction*);
@@ -702,34 +546,31 @@ namespace JSC {
void emit_op_new_array_buffer(Instruction*);
void emit_op_new_func(Instruction*);
void emit_op_new_func_exp(Instruction*);
+ void emit_op_new_generator_func(Instruction*);
+ void emit_op_new_generator_func_exp(Instruction*);
+ void emit_op_new_arrow_func_exp(Instruction*);
void emit_op_new_object(Instruction*);
void emit_op_new_regexp(Instruction*);
- void emit_op_get_pnames(Instruction*);
- void emit_op_next_pname(Instruction*);
void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
- void emit_op_pop_scope(Instruction*);
void emit_op_dec(Instruction*);
void emit_op_inc(Instruction*);
void emit_op_profile_did_call(Instruction*);
void emit_op_profile_will_call(Instruction*);
- void emit_op_push_name_scope(Instruction*);
+ void emit_op_profile_type(Instruction*);
+ void emit_op_profile_control_flow(Instruction*);
void emit_op_push_with_scope(Instruction*);
+ void emit_op_create_lexical_environment(Instruction*);
+ void emit_op_get_parent_scope(Instruction*);
void emit_op_put_by_id(Instruction*);
void emit_op_put_by_index(Instruction*);
void emit_op_put_by_val(Instruction*);
- void emit_op_put_getter_setter(Instruction*);
- void emit_op_init_global_const(Instruction*);
- void emit_op_init_global_const_check(Instruction*);
- void emit_resolve_operations(ResolveOperations*, const int* base, const int* value);
- void emitSlow_link_resolve_operations(ResolveOperations*, Vector<SlowCaseEntry>::iterator&);
- void emit_op_resolve(Instruction*);
- void emit_op_resolve_base(Instruction*);
- void emit_op_resolve_with_base(Instruction*);
- void emit_op_resolve_with_this(Instruction*);
- void emit_op_put_to_base(Instruction*);
+ void emit_op_put_getter_by_id(Instruction*);
+ void emit_op_put_setter_by_id(Instruction*);
+ void emit_op_put_getter_setter_by_id(Instruction*);
+ void emit_op_put_getter_by_val(Instruction*);
+ void emit_op_put_setter_by_val(Instruction*);
void emit_op_ret(Instruction*);
- void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
void emit_op_strcat(Instruction*);
void emit_op_stricteq(Instruction*);
@@ -737,36 +578,48 @@ namespace JSC {
void emit_op_switch_char(Instruction*);
void emit_op_switch_imm(Instruction*);
void emit_op_switch_string(Instruction*);
- void emit_op_tear_off_activation(Instruction*);
void emit_op_tear_off_arguments(Instruction*);
void emit_op_throw(Instruction*);
void emit_op_throw_static_error(Instruction*);
void emit_op_to_number(Instruction*);
+ void emit_op_to_string(Instruction*);
void emit_op_to_primitive(Instruction*);
void emit_op_unexpected_load(Instruction*);
+ void emit_op_unsigned(Instruction*);
void emit_op_urshift(Instruction*);
- void emit_op_get_scoped_var(Instruction*);
- void emit_op_put_scoped_var(Instruction*);
+ void emit_op_get_enumerable_length(Instruction*);
+ void emit_op_has_generic_property(Instruction*);
+ void emit_op_has_structure_property(Instruction*);
+ void emit_op_has_indexed_property(Instruction*);
+ void emit_op_get_direct_pname(Instruction*);
+ void emit_op_get_property_enumerator(Instruction*);
+ void emit_op_enumerator_structure_pname(Instruction*);
+ void emit_op_enumerator_generic_pname(Instruction*);
+ void emit_op_to_index_string(Instruction*);
void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_tail_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_tail_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_convert_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_create_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_check_tdz(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_callee(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof_custom(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -778,6 +631,7 @@ namespace JSC {
void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -790,37 +644,56 @@ namespace JSC {
void emitSlow_op_inc(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_init_global_const_check(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_rshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_number(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_string(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_unsigned(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
-
- void emitSlow_op_resolve(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_with_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_resolve_with_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_put_to_base(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_has_indexed_property(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_has_structure_property(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_direct_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
+
+ void emit_op_resolve_scope(Instruction*);
+ void emit_op_get_from_scope(Instruction*);
+ void emit_op_put_to_scope(Instruction*);
+ void emit_op_get_from_arguments(Instruction*);
+ void emit_op_put_to_arguments(Instruction*);
+ void emitSlow_op_resolve_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_from_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_put_to_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitRightShift(Instruction*, bool isUnsigned);
void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
- void emitInitRegister(unsigned dst);
-
- void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
- void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitNewFuncCommon(Instruction*);
+ void emitNewFuncExprCommon(Instruction*);
+ void emitVarInjectionCheck(bool needsVarInjectionChecks);
+ void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth);
+ void emitLoadWithStructureCheck(int scope, Structure** structureSlot);
#if USE(JSVALUE64)
- void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetVarFromPointer(JSValue* operand, GPRReg);
+ void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg);
+#else
+ void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload);
+ void emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload);
#endif
+ void emitGetClosureVar(int scope, uintptr_t operand);
+ void emitNotifyWrite(WatchpointSet*);
+ void emitNotifyWrite(GPRReg pointerToSet);
+ void emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet*);
+ void emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet**);
+ void emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet*);
- JSValue getConstantOperand(unsigned src);
- bool isOperandConstantImmediateInt(unsigned src);
- bool isOperandConstantImmediateChar(unsigned src);
+ void emitInitRegister(int dst);
- bool atJumpTarget();
+ void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+
+ JSValue getConstantOperand(int src);
+ bool isOperandConstantInt(int src);
+ bool isOperandConstantChar(int src);
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
@@ -837,13 +710,138 @@ namespace JSC {
++iter;
}
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int virtualRegisterIndex);
+ void linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases,
+ Vector<SlowCaseEntry>::iterator&, unsigned bytecodeOffset);
+
+ MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr&);
+#if OS(WINDOWS) && CPU(X86_64)
+ MacroAssembler::Call appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr&);
+#endif
+ MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr&);
+ MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr&, int);
+ MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr&, int);
+
+ enum WithProfileTag { WithProfile };
+
+ MacroAssembler::Call callOperation(C_JITOperation_E);
+ MacroAssembler::Call callOperation(C_JITOperation_EO, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EL, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EL, TrustedImmPtr);
+ MacroAssembler::Call callOperation(C_JITOperation_ESt, Structure*);
+ MacroAssembler::Call callOperation(C_JITOperation_EZ, int32_t);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, int32_t, int32_t);
+ MacroAssembler::Call callOperation(J_JITOperation_E, int);
+ MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, GPRReg, int32_t);
+ MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, const JSValue*, int32_t);
+ MacroAssembler::Call callOperation(J_JITOperation_EC, int, JSCell*);
+ MacroAssembler::Call callOperation(V_JITOperation_EC, JSCell*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg);
+#if USE(JSVALUE64)
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*);
+#else
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*);
+#endif
+ MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, const Identifier*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, ArrayProfile*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, ByValInfo*);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EJsc, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJscC, int, GPRReg, JSCell*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EJscZ, GPRReg, int32_t);
+ MacroAssembler::Call callOperation(C_JITOperation_EJscZ, int, GPRReg, int32_t);
+#if USE(JSVALUE64)
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg);
+#else
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg);
+#endif
+ MacroAssembler::Call callOperation(J_JITOperation_EP, int, void*);
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EPc, int, Instruction*);
+ MacroAssembler::Call callOperation(J_JITOperation_EPc, int, Instruction*);
+ MacroAssembler::Call callOperation(J_JITOperation_EZ, int, int32_t);
+ MacroAssembler::Call callOperation(J_JITOperation_EZZ, int, int32_t, int32_t);
+ MacroAssembler::Call callOperation(P_JITOperation_E);
+ MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, size_t);
+ MacroAssembler::Call callOperation(S_JITOperation_ECC, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID);
+ MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(S_JITOperation_EOJss, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(Sprt_JITOperation_EZ, int32_t);
+ MacroAssembler::Call callOperation(V_JITOperation_E);
+ MacroAssembler::Call callOperation(V_JITOperation_EC, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ECC, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ECIZC, RegisterID, UniquedStringImpl*, int32_t, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ECIZCC, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID);
+#if USE(JSVALUE64)
+ MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, int32_t, RegisterID);
+#else
+ MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, RegisterID, int32_t, RegisterID);
+#endif
+ MacroAssembler::Call callOperation(J_JITOperation_EE, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID);
+ MacroAssembler::Call callOperation(J_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID);
+ MacroAssembler::Call callOperationNoExceptionCheck(Z_JITOperation_E);
+#if USE(JSVALUE64)
+ MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID);
+#else
+ MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID, RegisterID);
+#endif
+#if USE(JSVALUE64)
+ MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, int32_t, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(V_JITOperation_ECIZJJ, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID);
+#else
+ MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, UniquedStringImpl*);
+#endif
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, ArrayProfile*);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, ByValInfo*);
+ MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, int32_t, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, int32_t);
+ MacroAssembler::Call callOperation(V_JITOperation_EPc, Instruction*);
+ MacroAssembler::Call callOperation(V_JITOperation_EZ, int32_t);
+ MacroAssembler::Call callOperation(V_JITOperation_EZJ, int, GPRReg);
+ MacroAssembler::Call callOperationWithCallFrameRollbackOnException(J_JITOperation_E);
+ MacroAssembler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb, CodeBlock*);
+ MacroAssembler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E);
+#if USE(JSVALUE32_64)
+ MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, RegisterID, int32_t, RegisterID);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, GPRReg, int32_t, int32_t);
+ MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, GPRReg, const Identifier*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, GPRReg, GPRReg, ArrayProfile*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, GPRReg, GPRReg, ByValInfo*);
+ MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, GPRReg, size_t);
+ MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ArrayProfile*);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ByValInfo*);
+ MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, RegisterID, int32_t);
+ MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, RegisterID, int32_t, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EZJ, int32_t, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg, GPRReg);
+#endif
+
+ template<typename SnippetGenerator>
+ void emitBitBinaryOpFastPath(Instruction* currentInstruction);
+
+ void emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID);
Jump checkStructure(RegisterID reg, Structure* structure);
- void restoreArgumentReferenceForTrampoline();
void updateTopCallFrame();
Call emitNakedCall(CodePtr function = CodePtr());
+ Call emitNakedTailCall(CodePtr function = CodePtr());
// Loads the character value of a single character string into dst.
void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
@@ -855,7 +853,7 @@ namespace JSC {
#endif
#ifndef NDEBUG
- void printBytecodeOperandTypes(unsigned src1, unsigned src2);
+ void printBytecodeOperandTypes(int src1, int src2);
#endif
#if ENABLE(SAMPLING_FLAGS)
@@ -890,52 +888,37 @@ namespace JSC {
#endif
Interpreter* m_interpreter;
- VM* m_vm;
- CodeBlock* m_codeBlock;
Vector<CallRecord> m_calls;
Vector<Label> m_labels;
- Vector<PropertyStubCompilationInfo> m_propertyAccessCompilationInfo;
+ Vector<JITGetByIdGenerator> m_getByIds;
+ Vector<JITPutByIdGenerator> m_putByIds;
Vector<ByValCompilationInfo> m_byValCompilationInfo;
- Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
+ Vector<CallCompilationInfo> m_callCompilationInfo;
Vector<JumpTable> m_jmpTable;
unsigned m_bytecodeOffset;
Vector<SlowCaseEntry> m_slowCases;
Vector<SwitchRecord> m_switches;
- unsigned m_propertyAccessInstructionIndex;
+ JumpList m_exceptionChecks;
+ JumpList m_exceptionChecksWithCallFrameRollback;
+
+ unsigned m_getByIdIndex;
+ unsigned m_putByIdIndex;
unsigned m_byValInstructionIndex;
- unsigned m_globalResolveInfoIndex;
unsigned m_callLinkInfoIndex;
-#if USE(JSVALUE32_64)
- unsigned m_jumpTargetIndex;
- unsigned m_mappedBytecodeOffset;
- int m_mappedVirtualRegisterIndex;
- RegisterID m_mappedTag;
- RegisterID m_mappedPayload;
-#else
- int m_lastResultBytecodeRegister;
-#endif
- unsigned m_jumpTargetsPosition;
-
-#ifndef NDEBUG
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
- Label m_uninterruptedInstructionSequenceBegin;
- int m_uninterruptedConstantSequenceBegin;
-#endif
-#endif
- OwnPtr<JITDisassembler> m_disassembler;
+ std::unique_ptr<JITDisassembler> m_disassembler;
RefPtr<Profiler::Compilation> m_compilation;
WeakRandom m_randomGenerator;
static CodeRef stringGetByValStubGenerator(VM*);
-#if ENABLE(VALUE_PROFILER)
+ PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder;
+
bool m_canBeOptimized;
bool m_canBeOptimizedOrInlined;
bool m_shouldEmitProfiling;
-#endif
} JIT_CLASS_ALIGNMENT;
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITAddGenerator.cpp b/Source/JavaScriptCore/jit/JITAddGenerator.cpp
new file mode 100644
index 000000000..5d91a516a
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITAddGenerator.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITAddGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITAddGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber()) {
+ ASSERT(!m_didEmitFastPath);
+ return;
+ }
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& varOpr = m_leftOperand.isConstInt32() ? m_rightOperand : m_leftOperand;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar + intConstant.
+ CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(var);
+
+ m_slowPathJumpList.append(jit.branchAdd32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR));
+
+ jit.boxInt32(m_scratchGPR, m_result);
+ m_endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ m_slowPathJumpList.append(notInt32);
+ return;
+ }
+
+ // Try to do doubleVar + double(intConstant).
+ notInt32.link(&jit);
+ if (!varOpr.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(var, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(var, m_leftFPR, m_scratchGPR, m_scratchFPR);
+
+ jit.move(CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR);
+ jit.convertInt32ToDouble(m_scratchGPR, m_rightFPR);
+
+ // Fall thru to doubleVar + doubleVar.
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+ CCallHelpers::Jump leftNotInt;
+ CCallHelpers::Jump rightNotInt;
+
+ // Try to do intVar + intVar.
+ leftNotInt = jit.branchIfNotInt32(m_left);
+ rightNotInt = jit.branchIfNotInt32(m_right);
+
+ m_slowPathJumpList.append(jit.branchAdd32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), m_scratchGPR));
+
+ jit.boxInt32(m_scratchGPR, m_result);
+ m_endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ m_slowPathJumpList.append(leftNotInt);
+ m_slowPathJumpList.append(rightNotInt);
+ return;
+ }
+
+ leftNotInt.link(&jit);
+ if (!m_leftOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
+
+ jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
+ CCallHelpers::Jump rightWasInteger = jit.jump();
+
+ rightNotInt.link(&jit);
+ if (!m_rightOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
+
+ rightIsDouble.link(&jit);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+
+ rightWasInteger.link(&jit);
+
+ // Fall thru to doubleVar + doubleVar.
+ }
+
+ // Do doubleVar + doubleVar.
+ jit.addDouble(m_rightFPR, m_leftFPR);
+ jit.boxDouble(m_leftFPR, m_result);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITAddGenerator.h b/Source/JavaScriptCore/jit/JITAddGenerator.h
new file mode 100644
index 000000000..c28db7209
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITAddGenerator.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITAddGenerator_h
+#define JITAddGenerator_h
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class JITAddGenerator {
+public:
+ JITAddGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ }
+
+ void generateFastPath(CCallHelpers&);
+
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+
+private:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITAddGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index 713d05e3b..2751720b7 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,28 +29,33 @@
#include "JIT.h"
#include "CodeBlock.h"
+#include "JITAddGenerator.h"
+#include "JITBitAndGenerator.h"
+#include "JITBitOrGenerator.h"
+#include "JITBitXorGenerator.h"
+#include "JITDivGenerator.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
-#include "JITStubs.h"
+#include "JITLeftShiftGenerator.h"
+#include "JITMulGenerator.h"
+#include "JITNegGenerator.h"
+#include "JITOperations.h"
+#include "JITRightShiftGenerator.h"
+#include "JITSubGenerator.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "SlowPathCall.h"
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
namespace JSC {
void JIT::emit_op_jless(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jless, op1, op2, target, LessThan);
@@ -58,8 +63,8 @@ void JIT::emit_op_jless(Instruction* currentInstruction)
void JIT::emit_op_jlesseq(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jlesseq, op1, op2, target, LessThanOrEqual);
@@ -67,8 +72,8 @@ void JIT::emit_op_jlesseq(Instruction* currentInstruction)
void JIT::emit_op_jgreater(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jgreater, op1, op2, target, GreaterThan);
@@ -76,8 +81,8 @@ void JIT::emit_op_jgreater(Instruction* currentInstruction)
void JIT::emit_op_jgreatereq(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jgreatereq, op1, op2, target, GreaterThanOrEqual);
@@ -85,8 +90,8 @@ void JIT::emit_op_jgreatereq(Instruction* currentInstruction)
void JIT::emit_op_jnless(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jnless, op1, op2, target, GreaterThanOrEqual);
@@ -94,8 +99,8 @@ void JIT::emit_op_jnless(Instruction* currentInstruction)
void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jnlesseq, op1, op2, target, GreaterThan);
@@ -103,8 +108,8 @@ void JIT::emit_op_jnlesseq(Instruction* currentInstruction)
void JIT::emit_op_jngreater(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jngreater, op1, op2, target, LessThanOrEqual);
@@ -112,8 +117,8 @@ void JIT::emit_op_jngreater(Instruction* currentInstruction)
void JIT::emit_op_jngreatereq(Instruction* currentInstruction)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
emit_compareAndJump(op_jngreatereq, op1, op2, target, LessThan);
@@ -121,317 +126,107 @@ void JIT::emit_op_jngreatereq(Instruction* currentInstruction)
void JIT::emitSlow_op_jless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, cti_op_jless, false, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThan, operationCompareLess, false, iter);
}
void JIT::emitSlow_op_jlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, cti_op_jlesseq, false, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqual, operationCompareLessEq, false, iter);
}
void JIT::emitSlow_op_jgreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, cti_op_jgreater, false, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThan, operationCompareGreater, false, iter);
}
void JIT::emitSlow_op_jgreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, cti_op_jgreatereq, false, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqual, operationCompareGreaterEq, false, iter);
}
void JIT::emitSlow_op_jnless(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, cti_op_jless, true, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrEqualOrUnordered, operationCompareLess, true, iter);
}
void JIT::emitSlow_op_jnlesseq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, cti_op_jlesseq, true, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleGreaterThanOrUnordered, operationCompareLessEq, true, iter);
}
void JIT::emitSlow_op_jngreater(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, cti_op_jgreater, true, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrEqualOrUnordered, operationCompareGreater, true, iter);
}
void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned op1 = currentInstruction[1].u.operand;
- unsigned op2 = currentInstruction[2].u.operand;
+ int op1 = currentInstruction[1].u.operand;
+ int op2 = currentInstruction[2].u.operand;
unsigned target = currentInstruction[3].u.operand;
- emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, cti_op_jgreatereq, true, iter);
+ emit_compareAndJumpSlow(op1, op2, target, DoubleLessThanOrUnordered, operationCompareGreaterEq, true, iter);
}
#if USE(JSVALUE64)
-void JIT::emit_op_negate(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
-
- Jump srcNotInt = emitJumpIfNotImmediateInteger(regT0);
- addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
- neg32(regT0);
- emitFastArithReTagImmediate(regT0, regT0);
-
- Jump end = jump();
-
- srcNotInt.link(this);
- emitJumpSlowCaseIfNotImmediateNumber(regT0);
-
- move(TrustedImm64((int64_t)0x8000000000000000ull), regT1);
- xor64(regT1, regT0);
-
- end.link(this);
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // 0x7fffffff check
- linkSlowCase(iter); // double check
-
- JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
+void JIT::emit_op_unsigned(Instruction* currentInstruction)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- emitFastArithImmToInt(regT0);
- emitFastArithImmToInt(regT2);
- lshift32(regT2, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(result);
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotInt(regT0);
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitTagInt(regT0, regT0);
+ emitPutVirtualRegister(result, regT0);
}
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- UNUSED_PARAM(op1);
- UNUSED_PARAM(op2);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_lshift);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT2);
- stubCall.call(result);
-}
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- // isOperandConstantImmediateInt(op2) => 1 SlowCase
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- // Mask with 0x1f as per ecma-262 11.7.2 step 7.
- rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- if (supportsFloatingPointTruncate()) {
- Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
- // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
- addSlowCase(emitJumpIfNotImmediateNumber(regT0));
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
- lhsIsInt.link(this);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- } else {
- // !supportsFloatingPoint() => 2 SlowCases
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- }
- emitFastArithImmToInt(regT2);
- rshift32(regT2, regT0);
- }
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_rshift);
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- } else {
- if (supportsFloatingPointTruncate()) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- // We're reloading op1 to regT0 as we can no longer guarantee that
- // we have not munged the operand. It may have already been shifted
- // correctly, but it still will not have been tagged.
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(regT2);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT2);
- }
- }
-
- stubCall.call(result);
-}
-
-void JIT::emit_op_urshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- // Slow case of urshift makes assumptions about what registers hold the
- // shift arguments, so any changes must be updated there as well.
- if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitFastArithImmToInt(regT0);
- int shift = getConstantOperand(op2).asInt32();
- if (shift)
- urshift32(Imm32(shift & 0x1f), regT0);
- // unsigned shift < 0 or shift = k*2^32 may result in (essentially)
- // a toUint conversion, which can result in a value we can represent
- // as an immediate int.
- if (shift < 0 || !(shift & 31))
- addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
- return;
- }
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- if (!isOperandConstantImmediateInt(op1))
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- emitFastArithImmToInt(regT0);
- emitFastArithImmToInt(regT1);
- urshift32(regT1, regT0);
- addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- int shift = getConstantOperand(op2).asInt32();
- // op1 = regT0
- linkSlowCase(iter); // int32 check
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (shift)
- urshift32(Imm32(shift & 0x1f), regT0);
- if (shift < 0 || !(shift & 31))
- failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- if (shift < 0 || !(shift & 31))
- linkSlowCase(iter); // failed to box in hot path
- } else {
- // op1 = regT0
- // op2 = regT1
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // int32 check -- op1 is not an int
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(emitJumpIfNotImmediateNumber(regT0)); // op1 is not a double
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- failures.append(emitJumpIfNotImmediateInteger(regT1)); // op2 is not an int
- emitFastArithImmToInt(regT1);
- urshift32(regT1, regT0);
- failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- }
-
- linkSlowCase(iter); // int32 check - op2 is not an int
- linkSlowCase(iter); // Can't represent unsigned result as an immediate
- }
- JITStubCall stubCall(this, cti_op_urshift);
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(op2, regT1);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_unsigned);
+ slowPathCall.call();
}
-void JIT::emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
+void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition condition)
{
// We generate inline code for the following cases in the fast path:
// - int immediate to constant int immediate
// - constant int immediate to int immediate
// - int immediate to int immediate
- if (isOperandConstantImmediateChar(op1)) {
+ if (isOperandConstantChar(op1)) {
emitGetVirtualRegister(op2, regT0);
addSlowCase(emitJumpIfNotJSCell(regT0));
JumpList failures;
@@ -440,7 +235,7 @@ void JIT::emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned tar
addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op2)) {
emitGetVirtualRegister(op1, regT0);
addSlowCase(emitJumpIfNotJSCell(regT0));
JumpList failures;
@@ -449,26 +244,26 @@ void JIT::emit_compareAndJump(OpcodeID, unsigned op1, unsigned op2, unsigned tar
addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantInt(op2)) {
emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t op2imm = getConstantOperandImmediateInt(op2);
+ emitJumpSlowCaseIfNotInt(regT0);
+ int32_t op2imm = getOperandConstantInt(op2);
addJump(branch32(condition, regT0, Imm32(op2imm)), target);
- } else if (isOperandConstantImmediateInt(op1)) {
+ } else if (isOperandConstantInt(op1)) {
emitGetVirtualRegister(op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- int32_t op1imm = getConstantOperandImmediateInt(op1);
+ emitJumpSlowCaseIfNotInt(regT1);
+ int32_t op1imm = getOperandConstantInt(op1);
addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ emitJumpSlowCaseIfNotInt(regT0);
+ emitJumpSlowCaseIfNotInt(regT1);
addJump(branch32(condition, regT0, regT1), target);
}
}
-void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition condition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition condition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
{
COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jlesseq), OPCODE_LENGTH_op_jlesseq_equals_op_jless);
COMPILE_ASSERT(OPCODE_LENGTH(op_jless) == OPCODE_LENGTH(op_jnless), OPCODE_LENGTH_op_jnless_equals_op_jless);
@@ -482,24 +277,24 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
// - floating-point number to constant int immediate
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, stub);
- stubCall.addArgument(op1, regT0);
- stubCall.addArgument(op2, regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+
+ emitGetVirtualRegister(op1, argumentGPR0);
+ emitGetVirtualRegister(op2, argumentGPR1);
+ callOperation(operation, argumentGPR0, argumentGPR1);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
return;
}
- if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantInt(op2)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail1 = emitJumpIfNotNumber(regT0);
add64(tagTypeNumberRegister, regT0);
move64ToDouble(regT0, fpRegT0);
@@ -515,17 +310,14 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
fail1.link(this);
}
- JITStubCall stubCall(this, stub);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
-
- } else if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT1);
+ callOperation(operation, regT0, regT1);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
+ } else if (isOperandConstantInt(op1)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail1 = emitJumpIfNotNumber(regT1);
add64(tagTypeNumberRegister, regT1);
move64ToDouble(regT1, fpRegT1);
@@ -541,18 +333,16 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
fail1.link(this);
}
- JITStubCall stubCall(this, stub);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
+ emitGetVirtualRegister(op1, regT2);
+ callOperation(operation, regT2, regT1);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
} else {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
- Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ Jump fail1 = emitJumpIfNotNumber(regT0);
+ Jump fail2 = emitJumpIfNotNumber(regT1);
+ Jump fail3 = emitJumpIfInt(regT1);
add64(tagTypeNumberRegister, regT0);
add64(tagTypeNumberRegister, regT1);
move64ToDouble(regT0, fpRegT0);
@@ -568,113 +358,47 @@ void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, D
}
linkSlowCase(iter);
- JITStubCall stubCall(this, stub);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
- }
-}
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t imm = getConstantOperandImmediateInt(op1);
- and64(Imm32(imm), regT0);
- if (imm >= 0)
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t imm = getConstantOperandImmediateInt(op2);
- and64(Imm32(imm), regT0);
- if (imm >= 0)
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- and64(regT1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- }
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- if (isOperandConstantImmediateInt(op1)) {
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT0);
- stubCall.call(result);
- } else if (isOperandConstantImmediateInt(op2)) {
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(regT0);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- } else {
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(regT1);
- stubCall.call(result);
+ callOperation(operation, regT0, regT1);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
}
}
void JIT::emit_op_inc(Instruction* currentInstruction)
{
- unsigned srcDst = currentInstruction[1].u.operand;
+ int srcDst = currentInstruction[1].u.operand;
emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotInt(regT0);
addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitTagInt(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
- emitGetVirtualRegister(srcDst, regT0);
- notImm.link(this);
- JITStubCall stubCall(this, cti_op_inc);
- stubCall.addArgument(regT0);
- stubCall.call(srcDst);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_inc);
+ slowPathCall.call();
}
void JIT::emit_op_dec(Instruction* currentInstruction)
{
- unsigned srcDst = currentInstruction[1].u.operand;
+ int srcDst = currentInstruction[1].u.operand;
emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotInt(regT0);
addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitTagInt(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned srcDst = currentInstruction[1].u.operand;
-
- Jump notImm = getSlowCase(iter);
linkSlowCase(iter);
- emitGetVirtualRegister(srcDst, regT0);
- notImm.link(this);
- JITStubCall stubCall(this, cti_op_dec);
- stubCall.addArgument(regT0);
- stubCall.call(srcDst);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_dec);
+ slowPathCall.call();
}
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
@@ -683,436 +407,526 @@ void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>
void JIT::emit_op_mod(Instruction* currentInstruction)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
// Make sure registers are correct for x86 IDIV instructions.
ASSERT(regT0 == X86Registers::eax);
- ASSERT(regT1 == X86Registers::edx);
- ASSERT(regT2 == X86Registers::ecx);
-
- emitGetVirtualRegisters(op1, regT3, op2, regT2);
- emitJumpSlowCaseIfNotImmediateInteger(regT3);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
-
- move(regT3, regT0);
- addSlowCase(branchTest32(Zero, regT2));
- Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
+ auto edx = X86Registers::edx;
+ auto ecx = X86Registers::ecx;
+ ASSERT(regT4 != edx);
+ ASSERT(regT4 != ecx);
+
+ emitGetVirtualRegisters(op1, regT4, op2, ecx);
+ emitJumpSlowCaseIfNotInt(regT4);
+ emitJumpSlowCaseIfNotInt(ecx);
+
+ move(regT4, regT0);
+ addSlowCase(branchTest32(Zero, ecx));
+ Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1));
addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
denominatorNotNeg1.link(this);
- m_assembler.cdq();
- m_assembler.idivl_r(regT2);
- Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
- addSlowCase(branchTest32(Zero, regT1));
+ x86ConvertToDoubleWord32();
+ x86Div32(ecx);
+ Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0));
+ addSlowCase(branchTest32(Zero, edx));
numeratorPositive.link(this);
- emitFastArithReTagImmediate(regT1, regT0);
+ emitTagInt(edx, regT0);
emitPutVirtualRegister(result);
}
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
-
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(regT3);
- stubCall.addArgument(regT2);
- stubCall.call(result);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
+ slowPathCall.call();
}
#else // CPU(X86) || CPU(X86_64)
void JIT::emit_op_mod(Instruction* currentInstruction)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
+ slowPathCall.call();
}
-void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
{
- RELEASE_ASSERT_NOT_REACHED();
+ UNREACHABLE_FOR_PLATFORM();
}
#endif // CPU(X86) || CPU(X86_64)
/* ------------------------------ END: OP_MOD ------------------------------ */
-/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+#endif // USE(JSVALUE64)
-void JIT::compileBinaryArithOp(OpcodeID opcodeID, unsigned, unsigned op1, unsigned op2, OperandTypes)
+void JIT::emit_op_negate(Instruction* currentInstruction)
{
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
-#if ENABLE(VALUE_PROFILER)
- RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
- if (opcodeID == op_add)
- addSlowCase(branchAdd32(Overflow, regT1, regT0));
- else if (opcodeID == op_sub)
- addSlowCase(branchSub32(Overflow, regT1, regT0));
- else {
- ASSERT(opcodeID == op_mul);
-#if ENABLE(VALUE_PROFILER)
- if (shouldEmitProfiling()) {
- // We want to be able to measure if this is taking the slow case just
- // because of negative zero. If this produces positive zero, then we
- // don't want the slow case to be taken because that will throw off
- // speculative compilation.
- move(regT0, regT2);
- addSlowCase(branchMul32(Overflow, regT1, regT2));
- JumpList done;
- done.append(branchTest32(NonZero, regT2));
- Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0));
- done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0)));
- negativeZero.link(this);
- // We only get here if we have a genuine negative zero. Record this,
- // so that the speculative JIT knows that we failed speculation
- // because of a negative zero.
- add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter));
- addSlowCase(jump());
- done.link(this);
- move(regT2, regT0);
- } else {
- addSlowCase(branchMul32(Overflow, regT1, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- }
+ int result = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+#if USE(JSVALUE64)
+ JSValueRegs srcRegs = JSValueRegs(regT0);
+ JSValueRegs resultRegs = srcRegs;
+ GPRReg scratchGPR = regT2;
#else
- addSlowCase(branchMul32(Overflow, regT1, regT0));
- addSlowCase(branchTest32(Zero, regT0));
+ JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs resultRegs = srcRegs;
+ GPRReg scratchGPR = regT4;
#endif
- }
- emitFastArithIntToImmNoCheck(regT0, regT0);
+
+ emitGetVirtualRegister(src, srcRegs);
+
+ JITNegGenerator gen(resultRegs, srcRegs, scratchGPR);
+ gen.generateFastPath(*this);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
}
-void JIT::compileBinaryArithOpSlowCase(OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, unsigned result, unsigned op1, unsigned op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
- COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
-
- Jump notImm1;
- Jump notImm2;
- if (op1HasImmediateIntFastCase) {
- notImm2 = getSlowCase(iter);
- } else if (op2HasImmediateIntFastCase) {
- notImm1 = getSlowCase(iter);
- } else {
- notImm1 = getSlowCase(iter);
- notImm2 = getSlowCase(iter);
- }
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
- linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
- if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
- linkSlowCase(iter);
- emitGetVirtualRegister(op1, regT0);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
+ slowPathCall.call();
+}
- Label stubFunctionCall(this);
- JITStubCall stubCall(this, opcodeID == op_add ? cti_op_add : opcodeID == op_sub ? cti_op_sub : cti_op_mul);
- if (op1HasImmediateIntFastCase || op2HasImmediateIntFastCase) {
- emitGetVirtualRegister(op1, regT0);
- emitGetVirtualRegister(op2, regT1);
- }
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(result);
- Jump end = jump();
-
- if (op1HasImmediateIntFastCase) {
- notImm2.link(this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- emitGetVirtualRegister(op1, regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT2);
- } else if (op2HasImmediateIntFastCase) {
- notImm1.link(this);
- if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- emitGetVirtualRegister(op2, regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT2);
- } else {
- // if we get here, eax is not an int32, edx not yet checked.
- notImm1.link(this);
- if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT1);
- Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
- convertInt32ToDouble(regT1, fpRegT2);
- Jump op2wasInteger = jump();
-
- // if we get here, eax IS an int32, edx is not.
- notImm2.link(this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- convertInt32ToDouble(regT0, fpRegT1);
- op2isDouble.link(this);
- add64(tagTypeNumberRegister, regT1);
- move64ToDouble(regT1, fpRegT2);
- op2wasInteger.link(this);
- }
+template<typename SnippetGenerator>
+void JIT::emitBitBinaryOpFastPath(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
- if (opcodeID == op_add)
- addDouble(fpRegT2, fpRegT1);
- else if (opcodeID == op_sub)
- subDouble(fpRegT2, fpRegT1);
- else if (opcodeID == op_mul)
- mulDouble(fpRegT2, fpRegT1);
- else {
- ASSERT(opcodeID == op_div);
- divDouble(fpRegT2, fpRegT1);
- }
- moveDoubleTo64(fpRegT1, regT0);
- sub64(tagTypeNumberRegister, regT0);
- emitPutVirtualRegister(result, regT0);
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
+#else
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+#endif
+
+ SnippetOperand leftOperand;
+ SnippetOperand rightOperand;
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
+
+ gen.generateFastPath(*this);
- end.link(this);
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
}
-void JIT::emit_op_add(Instruction* currentInstruction)
+void JIT::emit_op_bitand(Instruction* currentInstruction)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ emitBitBinaryOpFastPath<JITBitAndGenerator>(currentInstruction);
+}
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- addSlowCase();
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
- return;
- }
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
- if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT0);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT0);
- } else
- compileBinaryArithOp(op_add, result, op1, op2, types);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
+ slowPathCall.call();
+}
- emitPutVirtualRegister(result);
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ emitBitBinaryOpFastPath<JITBitOrGenerator>(currentInstruction);
}
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- linkDummySlowCase(iter);
- return;
- }
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
+ slowPathCall.call();
+}
- bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
- bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
- compileBinaryArithOpSlowCase(op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ emitBitBinaryOpFastPath<JITBitXorGenerator>(currentInstruction);
}
-void JIT::emit_op_mul(Instruction* currentInstruction)
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
- // For now, only plant a fast int case if the constant operand is greater than zero.
- int32_t value;
- if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
-#if ENABLE(VALUE_PROFILER)
- // Add a special fast case profile because the DFG JIT will expect one.
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
- emitFastArithReTagImmediate(regT1, regT0);
- } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
-#if ENABLE(VALUE_PROFILER)
- // Add a special fast case profile because the DFG JIT will expect one.
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ emitBitBinaryOpFastPath<JITLeftShiftGenerator>(currentInstruction);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
+ slowPathCall.call();
+}
+
+void JIT::emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID opcodeID)
+{
+ ASSERT(opcodeID == op_rshift || opcodeID == op_urshift);
+
+ JITRightShiftGenerator::ShiftType snippetShiftType = opcodeID == op_rshift ?
+ JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
+
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
+ FPRReg scratchFPR = InvalidFPRReg;
+#else
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+ FPRReg scratchFPR = fpRegT2;
#endif
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
- emitFastArithReTagImmediate(regT1, regT0);
- } else
- compileBinaryArithOp(op_mul, result, op1, op2, types);
- emitPutVirtualRegister(result);
+ SnippetOperand leftOperand;
+ SnippetOperand rightOperand;
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
+ fpRegT0, scratchGPR, scratchFPR, snippetShiftType);
+
+ gen.generateFastPath(*this);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
}
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_rshift(Instruction* currentInstruction)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ emitRightShiftFastPath(currentInstruction, op_rshift);
+}
- bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
- bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
- compileBinaryArithOpSlowCase(op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_rshift);
+ slowPathCall.call();
}
-void JIT::emit_op_div(Instruction* currentInstruction)
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ emitRightShiftFastPath(currentInstruction, op_urshift);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_urshift);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (isOperandConstantImmediateDouble(op1)) {
- emitGetVirtualRegister(op1, regT0);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitLoadInt32ToDouble(op1, fpRegT0);
- } else {
- emitGetVirtualRegister(op1, regT0);
- if (!types.first().definitelyIsNumber())
- emitJumpSlowCaseIfNotImmediateNumber(regT0);
- Jump notInt = emitJumpIfNotImmediateInteger(regT0);
- convertInt32ToDouble(regT0, fpRegT0);
- Jump skipDoubleLoad = jump();
- notInt.link(this);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- skipDoubleLoad.link(this);
- }
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
+ FPRReg scratchFPR = InvalidFPRReg;
+#else
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+ FPRReg scratchFPR = fpRegT2;
+#endif
- if (isOperandConstantImmediateDouble(op2)) {
- emitGetVirtualRegister(op2, regT1);
- add64(tagTypeNumberRegister, regT1);
- move64ToDouble(regT1, fpRegT1);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoadInt32ToDouble(op2, fpRegT1);
+ SnippetOperand leftOperand(types.first());
+ SnippetOperand rightOperand(types.second());
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
+ fpRegT0, fpRegT1, scratchGPR, scratchFPR);
+
+ gen.generateFastPath(*this);
+
+ if (gen.didEmitFastPath()) {
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
} else {
- emitGetVirtualRegister(op2, regT1);
- if (!types.second().definitelyIsNumber())
- emitJumpSlowCaseIfNotImmediateNumber(regT1);
- Jump notInt = emitJumpIfNotImmediateInteger(regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- Jump skipDoubleLoad = jump();
- notInt.link(this);
- add64(tagTypeNumberRegister, regT1);
- move64ToDouble(regT1, fpRegT1);
- skipDoubleLoad.link(this);
+ ASSERT(gen.endJumpList().empty());
+ ASSERT(gen.slowPathJumpList().empty());
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
+ slowPathCall.call();
}
- divDouble(fpRegT1, fpRegT0);
-
-#if ENABLE(VALUE_PROFILER)
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
- // If we've got an integer, we might as well make that the result of the division.
- emitFastArithReTagImmediate(regT0, regT0);
- Jump isInteger = jump();
- notInteger.link(this);
- moveDoubleTo64(fpRegT0, regT0);
- Jump doubleZero = branchTest64(Zero, regT0);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
- sub64(tagTypeNumberRegister, regT0);
- Jump trueDouble = jump();
- doubleZero.link(this);
- move(tagTypeNumberRegister, regT0);
- trueDouble.link(this);
- isInteger.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
#else
- // Double result.
- moveDoubleTo64(fpRegT0, regT0);
- sub64(tagTypeNumberRegister, regT0);
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
#endif
+ FPRReg scratchFPR = fpRegT2;
- emitPutVirtualRegister(dst, regT0);
+ ResultProfile* resultProfile = nullptr;
+ if (shouldEmitProfiling())
+ resultProfile = m_codeBlock->ensureResultProfile(m_bytecodeOffset);
+
+ SnippetOperand leftOperand(types.first());
+ SnippetOperand rightOperand(types.second());
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+#if USE(JSVALUE64)
+ else if (isOperandConstantDouble(op1))
+ leftOperand.setConstDouble(getOperandConstantDouble(op1));
+#endif
+
+ if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+#if USE(JSVALUE64)
+ else if (isOperandConstantDouble(op2))
+ rightOperand.setConstDouble(getOperandConstantDouble(op2));
+#endif
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
+ fpRegT0, fpRegT1, scratchGPR, scratchFPR, resultProfile);
+
+ gen.generateFastPath(*this);
+
+ if (gen.didEmitFastPath()) {
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
+ } else {
+ ASSERT(gen.endJumpList().empty());
+ ASSERT(gen.slowPathJumpList().empty());
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
+ slowPathCall.call();
+ }
}
void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
-#ifndef NDEBUG
- breakpoint();
+
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = JSValueRegs(regT2);
+ GPRReg scratchGPR = regT3;
+ FPRReg scratchFPR = InvalidFPRReg;
+#else
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+ FPRReg scratchFPR = fpRegT2;
#endif
- return;
- }
- if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter);
- }
- if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
- if (!types.second().definitelyIsNumber())
- linkSlowCase(iter);
+
+ ResultProfile* resultProfile = nullptr;
+ if (shouldEmitProfiling())
+ resultProfile = m_codeBlock->ensureResultProfile(m_bytecodeOffset);
+
+ SnippetOperand leftOperand(types.first());
+ SnippetOperand rightOperand(types.second());
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isPositiveConstInt32())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isPositiveConstInt32())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ JITMulGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
+ fpRegT0, fpRegT1, scratchGPR, scratchFPR, resultProfile);
+
+ gen.generateFastPath(*this);
+
+ if (gen.didEmitFastPath()) {
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
+ } else {
+ ASSERT(gen.endJumpList().empty());
+ ASSERT(gen.slowPathJumpList().empty());
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
+ slowPathCall.call();
}
- // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
- JITStubCall stubCall(this, cti_op_div);
- stubCall.addArgument(op1, regT2);
- stubCall.addArgument(op2, regT2);
- stubCall.call(result);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
+ slowPathCall.call();
}
void JIT::emit_op_sub(Instruction* currentInstruction)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- compileBinaryArithOp(op_sub, result, op1, op2, types);
- emitPutVirtualRegister(result);
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
+ FPRReg scratchFPR = InvalidFPRReg;
+#else
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+ FPRReg scratchFPR = fpRegT2;
+#endif
+
+ SnippetOperand leftOperand(types.first());
+ SnippetOperand rightOperand(types.second());
+
+ emitGetVirtualRegister(op1, leftRegs);
+ emitGetVirtualRegister(op2, rightRegs);
+
+ JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
+ fpRegT0, fpRegT1, scratchGPR, scratchFPR);
+
+ gen.generateFastPath(*this);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
}
void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
- compileBinaryArithOpSlowCase(op_sub, iter, result, op1, op2, types, false, false);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_sub);
+ slowPathCall.call();
}
/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
-#endif // USE(JSVALUE64)
-
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index c1caf61f5..1fa14563a 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -1,5 +1,5 @@
/*
-* Copyright (C) 2008 Apple Inc. All rights reserved.
+* Copyright (C) 2008, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,67 +31,24 @@
#include "CodeBlock.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
-#include "JITStubs.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "SlowPathCall.h"
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
namespace JSC {
-void JIT::emit_op_negate(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
- neg32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-
- Jump end = jump();
-
- srcNotInt.link(this);
- addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
-
- xor32(TrustedImm32(1 << 31), regT1);
- store32(regT1, tagFor(dst));
- if (dst != src)
- store32(regT0, payloadFor(dst));
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter); // 0x7fffffff check
- linkSlowCase(iter); // double check
-
- JITStubCall stubCall(this, cti_op_negate);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
-}
-
-void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsigned target, RelationalCondition condition)
+void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target, RelationalCondition condition)
{
JumpList notInt32Op1;
JumpList notInt32Op2;
// Character less.
- if (isOperandConstantImmediateChar(op1)) {
+ if (isOperandConstantChar(op1)) {
emitLoad(op2, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
@@ -100,7 +57,7 @@ void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsig
addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op2)) {
emitLoad(op1, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
@@ -109,11 +66,11 @@ void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsig
addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateInt(op1)) {
+ if (isOperandConstantInt(op1)) {
emitLoad(op2, regT3, regT2);
notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
+ } else if (isOperandConstantInt(op2)) {
emitLoad(op1, regT1, regT0);
notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
@@ -132,596 +89,99 @@ void JIT::emit_compareAndJump(OpcodeID opcode, unsigned op1, unsigned op2, unsig
Jump end = jump();
// Double less.
- emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2));
end.link(this);
}
-void JIT::emit_compareAndJumpSlow(unsigned op1, unsigned op2, unsigned target, DoubleCondition, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION), bool invert, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
{
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
} else {
if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ if (!isOperandConstantInt(op1) && !isOperandConstantInt(op2))
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // int32 check
} else {
- if (!isOperandConstantImmediateInt(op1)) {
+ if (!isOperandConstantInt(op1)) {
linkSlowCase(iter); // double check
linkSlowCase(iter); // int32 check
}
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ if (isOperandConstantInt(op1) || !isOperandConstantInt(op2))
linkSlowCase(iter); // double check
}
}
- JITStubCall stubCall(this, stub);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, regT0), target);
-}
-
-// LeftShift (<<)
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_lshift));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- lshift32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1 || dst == op2, OPCODE_LENGTH(op_lshift));
-}
-
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_lshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// RightShift (>>) and UnsignedRightShift (>>>) helper
-
-void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- // Slow case of rshift makes assumptions about what registers hold the
- // shift arguments, so any changes must be updated there as well.
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- int shift = getConstantOperand(op2).asInt32() & 0x1f;
- if (shift) {
- if (isUnsigned)
- urshift32(Imm32(shift), regT0);
- else
- rshift32(Imm32(shift), regT0);
- } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
- addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- if (isUnsigned) {
- urshift32(regT2, regT0);
- addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- } else
- rshift32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op1, OPCODE_LENGTH(op_rshift));
- }
-}
-
-void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- int shift = getConstantOperand(op2).asInt32() & 0x1f;
- // op1 = regT1:regT0
- linkSlowCase(iter); // int32 check
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
- emitLoadDouble(op1, fpRegT0);
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (shift) {
- if (isUnsigned)
- urshift32(Imm32(shift), regT0);
- else
- rshift32(Imm32(shift), regT0);
- } else if (isUnsigned) // signed right shift by zero is simply toInt conversion
- failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
- move(TrustedImm32(JSValue::Int32Tag), regT1);
- emitStoreInt32(dst, regT0, false);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- if (isUnsigned && !shift)
- linkSlowCase(iter); // failed to box in hot path
- } else {
- // op1 = regT1:regT0
- // op2 = regT3:regT2
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // int32 check -- op1 is not an int
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
- emitLoadDouble(op1, fpRegT0);
- failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (isUnsigned) {
- urshift32(regT2, regT0);
- failures.append(branch32(LessThan, regT0, TrustedImm32(0)));
- } else
- rshift32(regT2, regT0);
- move(TrustedImm32(JSValue::Int32Tag), regT1);
- emitStoreInt32(dst, regT0, false);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- }
-
- linkSlowCase(iter); // int32 check - op2 is not an int
- if (isUnsigned)
- linkSlowCase(iter); // Can't represent unsigned result as an immediate
- }
-
- JITStubCall stubCall(this, isUnsigned ? cti_op_urshift : cti_op_rshift);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// RightShift (>>)
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- emitRightShift(currentInstruction, false);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitRightShiftSlowCase(currentInstruction, iter, false);
-}
-
-// UnsignedRightShift (>>>)
-
-void JIT::emit_op_urshift(Instruction* currentInstruction)
-{
- emitRightShift(currentInstruction, true);
-}
-
-void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitRightShiftSlowCase(currentInstruction, iter, true);
-}
-
-// BitAnd (&)
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- and32(Imm32(constant), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, dst == op, OPCODE_LENGTH(op_bitand));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- and32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitand));
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitand);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// BitOr (|)
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- or32(Imm32(constant), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitor));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- or32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitor));
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ emitLoad(op1, regT1, regT0);
+ emitLoad(op2, regT3, regT2);
+ callOperation(operation, regT1, regT0, regT3, regT2);
+ emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
}
-// BitXor (^)
-
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
+void JIT::emit_op_unsigned(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- xor32(Imm32(constant), regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, op == dst, OPCODE_LENGTH(op_bitxor));
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+
+ emitLoad(op1, regT1, regT0);
+
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- xor32(regT2, regT0);
- emitStoreAndMapInt32(dst, regT1, regT0, (op1 == dst || op2 == dst), OPCODE_LENGTH(op_bitxor));
+ addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
+ emitStoreInt32(result, regT0, result == op1);
}
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_unsigned);
+ slowPathCall.call();
}
void JIT::emit_op_inc(Instruction* currentInstruction)
{
- unsigned srcDst = currentInstruction[1].u.operand;
+ int srcDst = currentInstruction[1].u.operand;
emitLoad(srcDst, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
- emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_inc));
+ emitStoreInt32(srcDst, regT0, true);
}
void JIT::emitSlow_op_inc(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned srcDst = currentInstruction[1].u.operand;
-
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // overflow check
- JITStubCall stubCall(this, cti_op_inc);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_inc);
+ slowPathCall.call();
}
void JIT::emit_op_dec(Instruction* currentInstruction)
{
- unsigned srcDst = currentInstruction[1].u.operand;
+ int srcDst = currentInstruction[1].u.operand;
emitLoad(srcDst, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
- emitStoreAndMapInt32(srcDst, regT1, regT0, true, OPCODE_LENGTH(op_dec));
+ emitStoreInt32(srcDst, regT0, true);
}
void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned srcDst = currentInstruction[1].u.operand;
-
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // overflow check
- JITStubCall stubCall(this, cti_op_dec);
- stubCall.addArgument(srcDst);
- stubCall.call(srcDst);
-}
-
-// Addition (+)
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- addSlowCase();
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
- return;
- }
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchAdd32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT2);
- Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- addDouble(fpRegT1, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- linkDummySlowCase(iter);
- return;
- }
-
- unsigned op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter); // non-sse case
- else {
- ResultType opType = op == op1 ? types.first() : types.second();
- if (!opType.definitelyIsNumber())
- linkSlowCase(iter); // double check
- }
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_add);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Subtraction (-)
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchSub32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_dec);
+ slowPathCall.call();
}
-void JIT::emitSub32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
-#if ENABLE(JIT_CONSTANT_BLINDING)
- addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
-#else
- addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2));
-#endif
-
- emitStoreInt32(dst, regT2, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
- linkSlowCase(iter); // int32 or double check
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITStubCall stubCall(this, cti_op_sub);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsigned op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
+void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
{
JumpList end;
@@ -753,54 +213,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
// Do the math.
doTheMath.link(this);
switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op1, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op1, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op1, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_div: {
- emitLoadDouble(op1, fpRegT1);
- divDouble(fpRegT0, fpRegT1);
-
-#if ENABLE(VALUE_PROFILER)
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- // FIXME: This will fail to convert to integer if the result is zero. We should
- // distinguish between positive zero and negative zero here.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
- // If we've got an integer, we might as well make that the result of the division.
- emitStoreInt32(dst, regT2);
- Jump isInteger = jump();
- notInteger.link(this);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
- emitStoreDouble(dst, fpRegT1);
- isInteger.link(this);
-#else
- emitStoreDouble(dst, fpRegT1);
-#endif
- break;
- }
case op_jless:
emitLoadDouble(op1, fpRegT2);
addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
@@ -858,53 +270,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
// Do the math.
switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op2, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op2, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op2, fpRegT2);
- subDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_div: {
- emitLoadDouble(op2, fpRegT2);
- divDouble(fpRegT2, fpRegT0);
-#if ENABLE(VALUE_PROFILER)
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- // FIXME: This will fail to convert to integer if the result is zero. We should
- // distinguish between positive zero and negative zero here.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
- // If we've got an integer, we might as well make that the result of the division.
- emitStoreInt32(dst, regT2);
- Jump isInteger = jump();
- notInteger.link(this);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
- emitStoreDouble(dst, fpRegT0);
- isInteger.link(this);
-#else
- emitStoreDouble(dst, fpRegT0);
-#endif
- break;
- }
case op_jless:
emitLoadDouble(op2, fpRegT1);
addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
@@ -945,191 +310,17 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, unsigned dst, unsigned op1, unsi
end.link(this);
}
-// Multiplication (*)
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
-#if ENABLE(VALUE_PROFILER)
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- // Int32 case.
- move(regT0, regT3);
- addSlowCase(branchMul32(Overflow, regT2, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- Jump overflow = getSlowCase(iter); // overflow check
- linkSlowCase(iter); // zero result check
-
- Jump negZero = branchOr32(Signed, regT2, regT3);
- emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
-
- negZero.link(this);
-#if ENABLE(VALUE_PROFILER)
- // We only get here if we have a genuine negative zero. Record this,
- // so that the speculative JIT knows that we failed speculation
- // because of a negative zero.
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
-#endif
- overflow.link(this);
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- }
-
- if (supportsFloatingPoint()) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- Label jitStubCall(this);
- JITStubCall stubCall(this, cti_op_mul);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
-// Division (/)
-
-void JIT::emit_op_div(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
-#if ENABLE(VALUE_PROFILER)
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-#endif
-
- if (!supportsFloatingPoint()) {
- addSlowCase(jump());
- return;
- }
-
- // Int32 divide.
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- JumpList end;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
-
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT0, fpRegT0);
- convertInt32ToDouble(regT2, fpRegT1);
- divDouble(fpRegT1, fpRegT0);
-#if ENABLE(VALUE_PROFILER)
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- // FIXME: This will fail to convert to integer if the result is zero. We should
- // distinguish between positive zero and negative zero here.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
- // If we've got an integer, we might as well make that the result of the division.
- emitStoreInt32(dst, regT2);
- end.append(jump());
- notInteger.link(this);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
- emitStoreDouble(dst, fpRegT0);
-#else
- emitStoreDouble(dst, fpRegT0);
-#endif
- end.append(jump());
-
- // Double divide.
- emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter);
- else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- JITStubCall stubCall(this, cti_op_div);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
-}
-
// Mod (%)
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
void JIT::emit_op_mod(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+#if CPU(X86)
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
-#if CPU(X86) || CPU(X86_64)
// Make sure registers are correct for x86 IDIV instructions.
ASSERT(regT0 == X86Registers::eax);
ASSERT(regT1 == X86Registers::edx);
@@ -1145,35 +336,28 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
denominatorNotNeg1.link(this);
- m_assembler.cdq();
- m_assembler.idivl_r(regT2);
+ x86ConvertToDoubleWord32();
+ x86Div32(regT2);
Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
addSlowCase(branchTest32(Zero, regT1));
numeratorPositive.link(this);
emitStoreInt32(dst, regT1, (op1 == dst || op2 == dst));
#else
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
+ slowPathCall.call();
#endif
}
void JIT::emitSlow_op_mod(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
#if CPU(X86) || CPU(X86_64)
- unsigned result = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_mod);
- stubCall.addArgument(op1);
- stubCall.addArgument(op2);
- stubCall.call(result);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mod);
+ slowPathCall.call();
#else
UNUSED_PARAM(currentInstruction);
UNUSED_PARAM(iter);
diff --git a/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp b/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp
new file mode 100644
index 000000000..715b503d2
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITBitAndGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITBitAndGenerator::generateFastPath(CCallHelpers& jit)
+{
+#if USE(JSVALUE64)
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#else
+ UNUSED_PARAM(m_scratchGPR);
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar & intConstant.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(var));
+
+ jit.moveValueRegs(var, m_result);
+ if (constOpr.asConstInt32() != static_cast<int32_t>(0xffffffff)) {
+#if USE(JSVALUE64)
+ jit.and64(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+ if (constOpr.asConstInt32() >= 0)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.and32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+#endif
+ }
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+
+ // Try to do intVar & intVar.
+#if USE(JSVALUE64)
+ jit.move(m_left.payloadGPR(), m_scratchGPR);
+ jit.and64(m_right.payloadGPR(), m_scratchGPR);
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_scratchGPR));
+ jit.move(m_scratchGPR, m_result.payloadGPR());
+#else
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+ jit.moveValueRegs(m_left, m_result);
+ jit.and32(m_right.payloadGPR(), m_result.payloadGPR());
+#endif
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitAndGenerator.h b/Source/JavaScriptCore/jit/JITBitAndGenerator.h
new file mode 100644
index 000000000..de2436a30
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitAndGenerator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITBitAndGenerator_h
+#define JITBitAndGenerator_h
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITBitAndGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITBitAndGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITBitAndGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h b/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h
new file mode 100644
index 000000000..e6ffbd68e
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITBitBinaryOpGenerator_h
+#define JITBitBinaryOpGenerator_h
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class JITBitBinaryOpGenerator {
+public:
+ JITBitBinaryOpGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_scratchGPR(scratchGPR)
+ {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ }
+
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+
+protected:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ GPRReg m_scratchGPR;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITBitBinaryOpGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp b/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp
new file mode 100644
index 000000000..9f843c135
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITBitOrGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITBitOrGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar | intConstant.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(var));
+
+ jit.moveValueRegs(var, m_result);
+ if (constOpr.asConstInt32()) {
+#if USE(JSVALUE64)
+ jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+#endif
+ }
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+
+ // Try to do intVar | intVar.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ jit.moveValueRegs(m_left, m_result);
+#if USE(JSVALUE64)
+ jit.or64(m_right.payloadGPR(), m_result.payloadGPR());
+#else
+ jit.or32(m_right.payloadGPR(), m_result.payloadGPR());
+#endif
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitOrGenerator.h b/Source/JavaScriptCore/jit/JITBitOrGenerator.h
new file mode 100644
index 000000000..a78df48e0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitOrGenerator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITBitOrGenerator_h
+#define JITBitOrGenerator_h
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITBitOrGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITBitOrGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg unused = InvalidGPRReg)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, unused)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITBitOrGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp b/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp
new file mode 100644
index 000000000..8ccf1b5d6
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITBitXorGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITBitXorGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar ^ intConstant.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(var));
+
+ jit.moveValueRegs(var, m_result);
+#if USE(JSVALUE64)
+ jit.xor32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.xor32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+#endif
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+
+ // Try to do intVar ^ intVar.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ jit.moveValueRegs(m_left, m_result);
+#if USE(JSVALUE64)
+ jit.xor64(m_right.payloadGPR(), m_result.payloadGPR());
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.xor32(m_right.payloadGPR(), m_result.payloadGPR());
+#endif
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitXorGenerator.h b/Source/JavaScriptCore/jit/JITBitXorGenerator.h
new file mode 100644
index 000000000..81bbd15c2
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitXorGenerator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITBitXorGenerator_h
+#define JITBitXorGenerator_h
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITBitXorGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITBitXorGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg unused = InvalidGPRReg)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, unused)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITBitOrGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 5520a4d34..d406d5b6f 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,246 +29,303 @@
#if USE(JSVALUE64)
#include "JIT.h"
-#include "Arguments.h"
+#include "CallFrameShuffler.h"
#include "CodeBlock.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "Operations.h"
-#include "RepatchBuffer.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "SetupVarargsFrame.h"
+#include "StackAlignment.h"
#include "ThunkGenerators.h"
#include <wtf/StringPrintStream.h>
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
namespace JSC {
-void JIT::emit_op_call_put_result(Instruction* instruction)
+void JIT::emitPutCallResult(Instruction* instruction)
{
int dst = instruction[1].u.operand;
emitValueProfilingSite();
emitPutVirtualRegister(dst);
- if (canBeOptimizedOrInlined())
- killLastResultRegister(); // Make lastResultRegister tracking simpler in the DFG.
}
-void JIT::compileLoadVarargs(Instruction* instruction)
+void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info)
{
- int thisValue = instruction[2].u.operand;
- int arguments = instruction[3].u.operand;
- int firstFreeRegister = instruction[4].u.operand;
-
- killLastResultRegister();
-
- JumpList slowCase;
- JumpList end;
- bool canOptimize = m_codeBlock->usesArguments()
- && arguments == m_codeBlock->argumentsRegister()
- && !m_codeBlock->symbolTable()->slowArguments();
-
- if (canOptimize) {
- emitGetVirtualRegister(arguments, regT0);
- slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));
-
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
- slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
- // regT0: argumentCountIncludingThis
-
- move(regT0, regT1);
- add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT1);
- lshift32(TrustedImm32(3), regT1);
- addPtr(callFrameRegister, regT1);
- // regT1: newCallFrame
-
- slowCase.append(branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT1));
-
- // Initialize ArgumentCount.
- store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
-
- // Initialize 'this'.
- emitGetVirtualRegister(thisValue, regT2);
- store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
-
- // Copy arguments.
- neg32(regT0);
- signExtend32ToPtr(regT0, regT0);
- end.append(branchAdd64(Zero, TrustedImm32(1), regT0));
- // regT0: -argumentCount
-
- Label copyLoop = label();
- load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
- store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
- branchAdd64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
+ int thisValue = instruction[3].u.operand;
+ int arguments = instruction[4].u.operand;
+ int firstFreeRegister = instruction[5].u.operand;
+ int firstVarArgOffset = instruction[6].u.operand;
+
+ emitGetVirtualRegister(arguments, regT1);
+ callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset);
+ move(TrustedImm32(-firstFreeRegister), regT1);
+ emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
+ addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
+ emitGetVirtualRegister(arguments, regT2);
+ callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0);
+ move(returnValueGPR, regT1);
+
+ // Profile the argument count.
+ load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
+ load8(info->addressOfMaxNumArguments(), regT0);
+ Jump notBiggest = branch32(Above, regT0, regT2);
+ Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255));
+ move(TrustedImm32(255), regT2);
+ notSaturated.link(this);
+ store8(regT2, info->addressOfMaxNumArguments());
+ notBiggest.link(this);
+
+ // Initialize 'this'.
+ emitGetVirtualRegister(thisValue, regT0);
+ store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
- end.append(jump());
- }
+ addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
+}
- if (canOptimize)
- slowCase.link(this);
+void JIT::compileCallEval(Instruction* instruction)
+{
+ addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
+ storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(thisValue, regT0);
- stubCall.addArgument(arguments, regT0);
- stubCall.addArgument(Imm32(firstFreeRegister));
- stubCall.call(regT1);
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
- if (canOptimize)
- end.link(this);
-}
+ callOperation(operationCallEval, regT1);
-void JIT::compileCallEval()
-{
- JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
- stubCall.call();
addSlowCase(branch64(Equal, regT0, TrustedImm64(JSValue::encode(JSValue()))));
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
sampleCodeBlock(m_codeBlock);
+
+ emitPutCallResult(instruction);
}
-void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
+ info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
+
linkSlowCase(iter);
+ int registerOffset = -instruction[4].u.operand;
- emitGetFromCallFrameHeader64(JSStack::Callee, regT0);
- emitNakedCall(m_vm->getCTIStub(virtualCallGenerator).code());
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+
+ load64(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
+ move(TrustedImmPtr(info), regT2);
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info);
+ info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true));
+ emitNakedCall(virtualThunk.code());
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
+
+ emitPutCallResult(instruction);
}
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
- int callee = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
/* Caller always:
- Updates callFrameRegister to callee callFrame.
- Initializes ArgumentCount; CallerFrame; Callee.
For a JS call:
- - Caller initializes ScopeChain.
- Callee initializes ReturnPC; CodeBlock.
- Callee restores callFrameRegister before return.
For a non-JS call:
- - Caller initializes ScopeChain; ReturnPC; CodeBlock.
+ - Caller initializes ReturnPC; CodeBlock.
- Caller restores callFrameRegister after return.
*/
-
- if (opcodeID == op_call_varargs)
- compileLoadVarargs(instruction);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length);
+ CallLinkInfo* info;
+ if (opcodeID != op_call_eval)
+ info = m_codeBlock->addCallLinkInfo();
+ if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs)
+ compileSetupVarargsFrame(instruction, info);
else {
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = -instruction[4].u.operand;
if (opcodeID == op_call && shouldEmitProfiling()) {
emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
Jump done = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- storePtr(regT0, instruction[5].u.arrayProfile->addressOfLastSeenStructure());
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
- store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- } // regT1 holds newCallFrame with ArgumentCount initialized.
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+ store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+ } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
- store32(TrustedImm32(instruction - m_codeBlock->instructions().begin()), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin();
+ uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits();
+ store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
- store64(callFrameRegister, Address(regT1, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
- store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
- move(regT1, callFrameRegister);
+ store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
if (opcodeID == op_call_eval) {
- compileCallEval();
+ compileCallEval(instruction);
return;
}
DataLabelPtr addressOfLinkedFunctionCheck;
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
- END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
addSlowCase(slowCase);
- ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
- m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
+ ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
+ info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
+ m_callCompilationInfo.append(CallCompilationInfo());
+ m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
+
+ if (opcodeID == op_tail_call) {
+ CallFrameShuffleData shuffleData;
+ shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
+ shuffleData.numLocals =
+ instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register);
+ shuffleData.args.resize(instruction[3].u.operand);
+ for (int i = 0; i < instruction[3].u.operand; ++i) {
+ shuffleData.args[i] =
+ ValueRecovery::displacedInJSStack(
+ virtualRegisterForArgument(i) - instruction[4].u.operand,
+ DataFormatJS);
+ }
+ shuffleData.callee =
+ ValueRecovery::inGPR(regT0, DataFormatJS);
+ shuffleData.setupCalleeSaveRegisters(m_codeBlock);
+ info->setFrameShuffleData(shuffleData);
+ CallFrameShuffler(*this, shuffleData).prepareForTailCall();
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+ return;
+ }
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+ if (opcodeID == op_tail_call_varargs) {
+ emitRestoreCalleeSaves();
+ prepareForTailCallSlow();
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+ return;
+ }
+
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
+
+ emitPutCallResult(instruction);
}
-void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction*, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
+void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
{
if (opcodeID == op_call_eval) {
- compileCallEvalSlowCase(iter);
+ compileCallEvalSlowCase(instruction, iter);
return;
}
linkSlowCase(iter);
-
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructGenerator).code() : m_vm->getCTIStub(linkCallGenerator).code());
+
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+ emitRestoreCalleeSaves();
+
+ move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
+
+ m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
+
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+ abortWithReason(JITDidReturnFromTailCall);
+ return;
+ }
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
+
+ emitPutCallResult(instruction);
}
-void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
+void JIT::emit_op_call(Instruction* currentInstruction)
{
- JumpList slowCases;
-
- slowCases.append(branchTestPtr(NonZero, regT0, tagMaskRegister));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
-
- loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
- emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- Call call = nearCall();
- Jump done = jump();
-
- slowCases.link(this);
- move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
- restoreReturnAddressBeforeReturn(regT2);
- Jump slow = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
- patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallGenerator).code()));
-
- RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline closure call stub for %s, return point %p, target %p (%s)",
- toCString(*m_codeBlock).data(),
- callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
- codePtr.executableAddress(),
- toCString(pointerDump(calleeCodeBlock)).data())),
- *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
- callLinkInfo->codeOrigin));
-
- RepatchBuffer repatchBuffer(m_codeBlock);
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_tail_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_call_eval(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
+}
+
+void JIT::emit_op_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_construct(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
+}
+
+void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
- CodeLocationLabel(stubRoutine->code().code()));
- repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallGenerator).code());
+void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
- callLinkInfo->stub = stubRoutine.release();
+void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index c8be31206..4d163c2d4 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,28 +29,23 @@
#if USE(JSVALUE32_64)
#include "JIT.h"
-#include "Arguments.h"
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JSArray.h"
#include "JSFunction.h"
-#include "Operations.h"
-#include "RepatchBuffer.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "SetupVarargsFrame.h"
+#include "StackAlignment.h"
#include <wtf/StringPrintStream.h>
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
namespace JSC {
-void JIT::emit_op_call_put_result(Instruction* instruction)
+void JIT::emitPutCallResult(Instruction* instruction)
{
int dst = instruction[1].u.operand;
emitValueProfilingSite();
@@ -62,43 +57,21 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
unsigned dst = currentInstruction[1].u.operand;
emitLoad(dst, regT1, regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
- restoreReturnAddressBeforeReturn(regT2);
+ checkStackPointerAlignment();
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
-void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned thisReg = currentInstruction[2].u.operand;
-
- emitLoad(result, regT1, regT0);
- Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump notObject = emitJumpIfNotObject(regT2);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
- notJSCell.link(this);
- notObject.link(this);
- emitLoad(thisReg, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
+ compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
+ compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -111,6 +84,16 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
}
+void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
@@ -121,6 +104,11 @@ void JIT::emit_op_call(Instruction* currentInstruction)
compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
}
+void JIT::emit_op_tail_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
void JIT::emit_op_call_eval(Instruction* currentInstruction)
{
compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
@@ -131,224 +119,207 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
}
+void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
void JIT::emit_op_construct(Instruction* currentInstruction)
{
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::compileLoadVarargs(Instruction* instruction)
+void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info)
{
- int thisValue = instruction[2].u.operand;
- int arguments = instruction[3].u.operand;
- int firstFreeRegister = instruction[4].u.operand;
-
- JumpList slowCase;
- JumpList end;
- bool canOptimize = m_codeBlock->usesArguments()
- && arguments == m_codeBlock->argumentsRegister()
- && !m_codeBlock->symbolTable()->slowArguments();
-
- if (canOptimize) {
- emitLoadTag(arguments, regT1);
- slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
-
- load32(payloadFor(JSStack::ArgumentCount), regT2);
- slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
- // regT2: argumentCountIncludingThis
-
- move(regT2, regT3);
- add32(TrustedImm32(firstFreeRegister + JSStack::CallFrameHeaderSize), regT3);
- lshift32(TrustedImm32(3), regT3);
- addPtr(callFrameRegister, regT3);
- // regT3: newCallFrame
-
- slowCase.append(branchPtr(Below, AbsoluteAddress(m_vm->interpreter->stack().addressOfEnd()), regT3));
-
- // Initialize ArgumentCount.
- store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));
-
- // Initialize 'this'.
- emitLoad(thisValue, regT1, regT0);
- store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
-
- // Copy arguments.
- neg32(regT2);
- end.append(branchAdd32(Zero, TrustedImm32(1), regT2));
- // regT2: -argumentCount;
-
- Label copyLoop = label();
- load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
- load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- branchAdd32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);
-
- end.append(jump());
- }
+ int thisValue = instruction[3].u.operand;
+ int arguments = instruction[4].u.operand;
+ int firstFreeRegister = instruction[5].u.operand;
+ int firstVarArgOffset = instruction[6].u.operand;
+
+ emitLoad(arguments, regT1, regT0);
+ callOperation(operationSizeFrameForVarargs, regT1, regT0, -firstFreeRegister, firstVarArgOffset);
+ move(TrustedImm32(-firstFreeRegister), regT1);
+ emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
+ addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister);
+ emitLoad(arguments, regT2, regT4);
+ callOperation(operationSetupVarargsFrame, regT1, regT2, regT4, firstVarArgOffset, regT0);
+ move(returnValueGPR, regT1);
+
+ // Profile the argument count.
+ load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
+ load8(info->addressOfMaxNumArguments(), regT0);
+ Jump notBiggest = branch32(Above, regT0, regT2);
+ Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255));
+ move(TrustedImm32(255), regT2);
+ notSaturated.link(this);
+ store8(regT2, info->addressOfMaxNumArguments());
+ notBiggest.link(this);
+
+ // Initialize 'this'.
+ emitLoad(thisValue, regT2, regT0);
+ store32(regT0, Address(regT1, PayloadOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ store32(regT2, Address(regT1, TagOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+
+ addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
+}
- if (canOptimize)
- slowCase.link(this);
+void JIT::compileCallEval(Instruction* instruction)
+{
+ addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
+ storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
- JITStubCall stubCall(this, cti_op_load_varargs);
- stubCall.addArgument(thisValue);
- stubCall.addArgument(arguments);
- stubCall.addArgument(Imm32(firstFreeRegister));
- stubCall.call(regT3);
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- if (canOptimize)
- end.link(this);
-}
+ callOperation(operationCallEval, regT1);
-void JIT::compileCallEval()
-{
- JITStubCall stubCall(this, cti_op_call_eval); // Initializes ScopeChain; ReturnPC; CodeBlock.
- stubCall.call();
addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
sampleCodeBlock(m_codeBlock);
+
+ emitPutCallResult(instruction);
}
-void JIT::compileCallEvalSlowCase(Vector<SlowCaseEntry>::iterator& iter)
+void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
+ info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
+
linkSlowCase(iter);
+ int registerOffset = -instruction[4].u.operand;
+
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+
+ loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
+ loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT1);
+ move(TrustedImmPtr(info), regT2);
+
emitLoad(JSStack::Callee, regT1, regT0);
- emitNakedCall(m_vm->getCTIStub(virtualCallGenerator).code());
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info);
+ info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true));
+ emitNakedCall(virtualThunk.code());
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
+
+ emitPutCallResult(instruction);
}
void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned callLinkInfoIndex)
{
- int callee = instruction[1].u.operand;
+ int callee = instruction[2].u.operand;
/* Caller always:
- Updates callFrameRegister to callee callFrame.
- Initializes ArgumentCount; CallerFrame; Callee.
For a JS call:
- - Caller initializes ScopeChain.
- Callee initializes ReturnPC; CodeBlock.
- Callee restores callFrameRegister before return.
For a non-JS call:
- - Caller initializes ScopeChain; ReturnPC; CodeBlock.
+ - Caller initializes ReturnPC; CodeBlock.
- Caller restores callFrameRegister after return.
*/
-
- if (opcodeID == op_call_varargs)
- compileLoadVarargs(instruction);
+ CallLinkInfo* info;
+ if (opcodeID != op_call_eval)
+ info = m_codeBlock->addCallLinkInfo();
+ if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs)
+ compileSetupVarargsFrame(instruction, info);
else {
- int argCount = instruction[2].u.operand;
- int registerOffset = instruction[3].u.operand;
+ int argCount = instruction[3].u.operand;
+ int registerOffset = -instruction[4].u.operand;
if (opcodeID == op_call && shouldEmitProfiling()) {
emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT1, JSCell::structureOffset()), regT1);
- storePtr(regT1, instruction[5].u.arrayProfile->addressOfLastSeenStructure());
+ loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1);
+ storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
- store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3));
- } // regT3 holds newCallFrame with ArgumentCount initialized.
+ store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+ } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
- storePtr(TrustedImmPtr(instruction), tagFor(JSStack::ArgumentCount, callFrameRegister));
+ uint32_t locationBits = CallSiteIndex(instruction).bits();
+ store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister));
emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
- storePtr(callFrameRegister, Address(regT3, JSStack::CallerFrame * static_cast<int>(sizeof(Register))));
- emitStore(JSStack::Callee, regT1, regT0, regT3);
- move(regT3, callFrameRegister);
+ store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+ store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC)));
if (opcodeID == op_call_eval) {
- compileCallEval();
+ compileCallEval(instruction);
return;
}
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+ emitRestoreCalleeSaves();
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+
DataLabelPtr addressOfLinkedFunctionCheck;
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
- END_UNINTERRUPTED_SEQUENCE(sequenceOpCall);
addSlowCase(slowCase);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
- m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
+ ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
+ info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
+ m_callCompilationInfo.append(CallCompilationInfo());
+ m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
+
+ checkStackPointerAlignment();
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+ prepareForTailCallSlow();
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+ return;
+ }
+
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
+ emitPutCallResult(instruction);
}
-void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction*, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
+void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter, unsigned callLinkInfoIndex)
{
if (opcodeID == op_call_eval) {
- compileCallEvalSlowCase(iter);
+ compileCallEvalSlowCase(instruction, iter);
return;
}
linkSlowCase(iter);
linkSlowCase(iter);
-
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructGenerator).code() : m_vm->getCTIStub(linkCallGenerator).code());
- sampleCodeBlock(m_codeBlock);
-}
+ move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
-void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
-{
- JumpList slowCases;
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+ emitRestoreCalleeSaves();
- slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
-
- loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- Call call = nearCall();
- Jump done = jump();
-
- slowCases.link(this);
- move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
- restoreReturnAddressBeforeReturn(regT2);
- Jump slow = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
- patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallGenerator).code()));
-
- RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline closure call stub for %s, return point %p, target %p (%s)",
- toCString(*m_codeBlock).data(),
- callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
- codePtr.executableAddress(),
- toCString(pointerDump(calleeCodeBlock)).data())),
- *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
- callLinkInfo->codeOrigin));
-
- RepatchBuffer repatchBuffer(m_codeBlock);
-
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
- CodeLocationLabel(stubRoutine->code().code()));
- repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallGenerator).code());
-
- callLinkInfo->stub = stubRoutine.release();
+ m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
+
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+ abortWithReason(JITDidReturnFromTailCall);
+ return;
+ }
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+
+ sampleCodeBlock(m_codeBlock);
+ emitPutCallResult(instruction);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITCode.cpp b/Source/JavaScriptCore/jit/JITCode.cpp
index 5cfa6304b..26b69c542 100644
--- a/Source/JavaScriptCore/jit/JITCode.cpp
+++ b/Source/JavaScriptCore/jit/JITCode.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,32 +26,212 @@
#include "config.h"
#include "JITCode.h"
+#include "LLIntThunks.h"
+#include "JSCInlines.h"
+#include "ProtoCallFrame.h"
#include <wtf/PrintStream.h>
-namespace WTF {
+namespace JSC {
-void printInternal(PrintStream& out, JSC::JITCode::JITType type)
+JITCode::JITCode(JITType jitType)
+ : m_jitType(jitType)
+{
+}
+
+JITCode::~JITCode()
+{
+}
+
+const char* JITCode::typeName(JITType jitType)
{
- switch (type) {
- case JSC::JITCode::None:
- out.print("None");
- return;
- case JSC::JITCode::HostCallThunk:
- out.print("Host");
- return;
- case JSC::JITCode::InterpreterThunk:
- out.print("LLInt");
- return;
- case JSC::JITCode::BaselineJIT:
- out.print("Baseline");
- return;
- case JSC::JITCode::DFGJIT:
- out.print("DFG");
- return;
+ switch (jitType) {
+ case None:
+ return "None";
+ case HostCallThunk:
+ return "Host";
+ case InterpreterThunk:
+ return "LLInt";
+ case BaselineJIT:
+ return "Baseline";
+ case DFGJIT:
+ return "DFG";
+ case FTLJIT:
+ return "FTL";
default:
CRASH();
- return;
+ return "";
+ }
+}
+
+void JITCode::validateReferences(const TrackedReferences&)
+{
+}
+
+JSValue JITCode::execute(VM* vm, ProtoCallFrame* protoCallFrame)
+{
+ void* entryAddress;
+ JSFunction* function = jsDynamicCast<JSFunction*>(protoCallFrame->callee());
+
+ if (!function || !protoCallFrame->needArityCheck()) {
+ ASSERT(!protoCallFrame->needArityCheck());
+ entryAddress = executableAddress();
+ } else
+ entryAddress = addressForCall(MustCheckArity).executableAddress();
+ JSValue result = JSValue::decode(vmEntryToJavaScript(entryAddress, vm, protoCallFrame));
+ return vm->exception() ? jsNull() : result;
+}
+
+DFG::CommonData* JITCode::dfgCommon()
+{
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+}
+
+DFG::JITCode* JITCode::dfg()
+{
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+}
+
+FTL::JITCode* JITCode::ftl()
+{
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+}
+
+FTL::ForOSREntryJITCode* JITCode::ftlForOSREntry()
+{
+ RELEASE_ASSERT_NOT_REACHED();
+ return 0;
+}
+
+JITCodeWithCodeRef::JITCodeWithCodeRef(JITType jitType)
+ : JITCode(jitType)
+{
+}
+
+JITCodeWithCodeRef::JITCodeWithCodeRef(CodeRef ref, JITType jitType)
+ : JITCode(jitType)
+ , m_ref(ref)
+{
+}
+
+JITCodeWithCodeRef::~JITCodeWithCodeRef()
+{
+ if ((Options::dumpDisassembly() || (isOptimizingJIT(jitType()) && Options::dumpDFGDisassembly()))
+ && m_ref.executableMemory())
+ dataLog("Destroying JIT code at ", pointerDump(m_ref.executableMemory()), "\n");
+}
+
+void* JITCodeWithCodeRef::executableAddressAtOffset(size_t offset)
+{
+ RELEASE_ASSERT(m_ref);
+ return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset;
+}
+
+void* JITCodeWithCodeRef::dataAddressAtOffset(size_t offset)
+{
+ RELEASE_ASSERT(m_ref);
+ ASSERT(offset <= size()); // use <= instead of < because it is valid to ask for an address at the exclusive end of the code.
+ return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset;
+}
+
+unsigned JITCodeWithCodeRef::offsetOf(void* pointerIntoCode)
+{
+ RELEASE_ASSERT(m_ref);
+ intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress());
+ ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
+ return static_cast<unsigned>(result);
+}
+
+size_t JITCodeWithCodeRef::size()
+{
+ RELEASE_ASSERT(m_ref);
+ return m_ref.size();
+}
+
+bool JITCodeWithCodeRef::contains(void* address)
+{
+ RELEASE_ASSERT(m_ref);
+ return m_ref.executableMemory()->contains(address);
+}
+
+DirectJITCode::DirectJITCode(JITType jitType)
+ : JITCodeWithCodeRef(jitType)
+{
+}
+
+DirectJITCode::DirectJITCode(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck, JITType jitType)
+ : JITCodeWithCodeRef(ref, jitType)
+ , m_withArityCheck(withArityCheck)
+{
+}
+
+DirectJITCode::~DirectJITCode()
+{
+}
+
+void DirectJITCode::initializeCodeRef(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck)
+{
+ RELEASE_ASSERT(!m_ref);
+ m_ref = ref;
+ m_withArityCheck = withArityCheck;
+}
+
+JITCode::CodePtr DirectJITCode::addressForCall(ArityCheckMode arity)
+{
+ switch (arity) {
+ case ArityCheckNotRequired:
+ RELEASE_ASSERT(m_ref);
+ return m_ref.code();
+ case MustCheckArity:
+ RELEASE_ASSERT(m_withArityCheck);
+ return m_withArityCheck;
}
+ RELEASE_ASSERT_NOT_REACHED();
+ return CodePtr();
+}
+
+NativeJITCode::NativeJITCode(JITType jitType)
+ : JITCodeWithCodeRef(jitType)
+{
+}
+
+NativeJITCode::NativeJITCode(CodeRef ref, JITType jitType)
+ : JITCodeWithCodeRef(ref, jitType)
+{
+}
+
+NativeJITCode::~NativeJITCode()
+{
+}
+
+void NativeJITCode::initializeCodeRef(CodeRef ref)
+{
+ ASSERT(!m_ref);
+ m_ref = ref;
+}
+
+JITCode::CodePtr NativeJITCode::addressForCall(ArityCheckMode)
+{
+ RELEASE_ASSERT(!!m_ref);
+ return m_ref.code();
+}
+
+#if ENABLE(JIT)
+RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex)
+{
+ return RegisterSet();
+}
+#endif
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream& out, JSC::JITCode::JITType type)
+{
+ out.print(JSC::JITCode::typeName(type));
}
} // namespace WTF
diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h
index b7521fb21..7fb7b3134 100644
--- a/Source/JavaScriptCore/jit/JITCode.h
+++ b/Source/JavaScriptCore/jit/JITCode.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,167 +26,226 @@
#ifndef JITCode_h
#define JITCode_h
-#if ENABLE(JIT) || ENABLE(LLINT)
+#include "ArityCheckMode.h"
#include "CallFrame.h"
+#include "CodeOrigin.h"
#include "Disassembler.h"
-#include "JITStubs.h"
#include "JSCJSValue.h"
-#include "LegacyProfiler.h"
#include "MacroAssemblerCodeRef.h"
-#endif
+#include "RegisterSet.h"
+#include <wtf/Optional.h>
namespace JSC {
-#if ENABLE(JIT)
- class VM;
- class JSStack;
-#endif
+namespace DFG {
+class CommonData;
+class JITCode;
+}
+namespace FTL {
+class ForOSREntryJITCode;
+class JITCode;
+}
+
+struct ProtoCallFrame;
+class TrackedReferences;
+class VM;
+
+class JITCode : public ThreadSafeRefCounted<JITCode> {
+public:
+ typedef MacroAssemblerCodeRef CodeRef;
+ typedef MacroAssemblerCodePtr CodePtr;
+
+ enum JITType : uint8_t {
+ None,
+ HostCallThunk,
+ InterpreterThunk,
+ BaselineJIT,
+ DFGJIT,
+ FTLJIT
+ };
- class JITCode {
-#if ENABLE(JIT) || ENABLE(LLINT)
- typedef MacroAssemblerCodeRef CodeRef;
- typedef MacroAssemblerCodePtr CodePtr;
-#else
- JITCode() { }
-#endif
- public:
- enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT };
-
- static JITType bottomTierJIT()
- {
- return BaselineJIT;
- }
-
- static JITType topTierJIT()
- {
- return DFGJIT;
- }
-
- static JITType nextTierJIT(JITType jitType)
- {
- ASSERT_UNUSED(jitType, jitType == BaselineJIT || jitType == DFGJIT);
+ static const char* typeName(JITType);
+
+ static JITType bottomTierJIT()
+ {
+ return BaselineJIT;
+ }
+
+ static JITType topTierJIT()
+ {
+ return FTLJIT;
+ }
+
+ static JITType nextTierJIT(JITType jitType)
+ {
+ switch (jitType) {
+ case BaselineJIT:
return DFGJIT;
+ case DFGJIT:
+ return FTLJIT;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return None;
}
-
- static bool isOptimizingJIT(JITType jitType)
- {
- return jitType == DFGJIT;
+ }
+
+ static bool isExecutableScript(JITType jitType)
+ {
+ switch (jitType) {
+ case None:
+ case HostCallThunk:
+ return false;
+ default:
+ return true;
}
-
- static bool isBaselineCode(JITType jitType)
- {
- return jitType == InterpreterThunk || jitType == BaselineJIT;
+ }
+
+ static bool couldBeInterpreted(JITType jitType)
+ {
+ switch (jitType) {
+ case InterpreterThunk:
+ case BaselineJIT:
+ return true;
+ default:
+ return false;
}
-
-#if ENABLE(JIT) || ENABLE(LLINT)
- JITCode()
- : m_jitType(None)
- {
+ }
+
+ static bool isJIT(JITType jitType)
+ {
+ switch (jitType) {
+ case BaselineJIT:
+ case DFGJIT:
+ case FTLJIT:
+ return true;
+ default:
+ return false;
}
+ }
- JITCode(const CodeRef ref, JITType jitType)
- : m_ref(ref)
- , m_jitType(jitType)
- {
- ASSERT(jitType != None);
- }
-
- bool operator !() const
- {
- return !m_ref;
- }
+ static bool isLowerTier(JITType expectedLower, JITType expectedHigher)
+ {
+ RELEASE_ASSERT(isExecutableScript(expectedLower));
+ RELEASE_ASSERT(isExecutableScript(expectedHigher));
+ return expectedLower < expectedHigher;
+ }
+
+ static bool isHigherTier(JITType expectedHigher, JITType expectedLower)
+ {
+ return isLowerTier(expectedLower, expectedHigher);
+ }
+
+ static bool isLowerOrSameTier(JITType expectedLower, JITType expectedHigher)
+ {
+ return !isHigherTier(expectedLower, expectedHigher);
+ }
+
+ static bool isHigherOrSameTier(JITType expectedHigher, JITType expectedLower)
+ {
+ return isLowerOrSameTier(expectedLower, expectedHigher);
+ }
+
+ static bool isOptimizingJIT(JITType jitType)
+ {
+ return jitType == DFGJIT || jitType == FTLJIT;
+ }
+
+ static bool isBaselineCode(JITType jitType)
+ {
+ return jitType == InterpreterThunk || jitType == BaselineJIT;
+ }
+
+protected:
+ JITCode(JITType);
+
+public:
+ virtual ~JITCode();
+
+ JITType jitType() const
+ {
+ return m_jitType;
+ }
+
+ template<typename PointerType>
+ static JITType jitTypeFor(PointerType jitCode)
+ {
+ if (!jitCode)
+ return None;
+ return jitCode->jitType();
+ }
+
+ virtual CodePtr addressForCall(ArityCheckMode) = 0;
+ virtual void* executableAddressAtOffset(size_t offset) = 0;
+ void* executableAddress() { return executableAddressAtOffset(0); }
+ virtual void* dataAddressAtOffset(size_t offset) = 0;
+ virtual unsigned offsetOf(void* pointerIntoCode) = 0;
+
+ virtual DFG::CommonData* dfgCommon();
+ virtual DFG::JITCode* dfg();
+ virtual FTL::JITCode* ftl();
+ virtual FTL::ForOSREntryJITCode* ftlForOSREntry();
+
+ virtual void validateReferences(const TrackedReferences&);
+
+ JSValue execute(VM*, ProtoCallFrame*);
+
+ void* start() { return dataAddressAtOffset(0); }
+ virtual size_t size() = 0;
+ void* end() { return reinterpret_cast<void*>(reinterpret_cast<uintptr_t>(start()) + size()); }
+
+ virtual bool contains(void*) = 0;
- CodePtr addressForCall()
- {
- return m_ref.code();
- }
+#if ENABLE(JIT)
+ virtual RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex);
+ virtual Optional<CodeOrigin> findPC(CodeBlock*, void* pc) { UNUSED_PARAM(pc); return Nullopt; }
+#endif
- void* executableAddressAtOffset(size_t offset) const
- {
- ASSERT(offset < size());
- return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset;
- }
-
- void* executableAddress() const
- {
- return executableAddressAtOffset(0);
- }
-
- void* dataAddressAtOffset(size_t offset) const
- {
- ASSERT(offset <= size()); // use <= instead of < because it is valid to ask for an address at the exclusive end of the code.
- return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset;
- }
+private:
+ JITType m_jitType;
+};
- // This function returns the offset in bytes of 'pointerIntoCode' into
- // this block of code. The pointer provided must be a pointer into this
- // block of code. It is ASSERTed that no codeblock >4gb in size.
- unsigned offsetOf(void* pointerIntoCode)
- {
- intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress());
- ASSERT(static_cast<intptr_t>(static_cast<unsigned>(result)) == result);
- return static_cast<unsigned>(result);
- }
+class JITCodeWithCodeRef : public JITCode {
+protected:
+ JITCodeWithCodeRef(JITType);
+ JITCodeWithCodeRef(CodeRef, JITType);
-#if ENABLE(JIT)
- // Execute the code!
- inline JSValue execute(JSStack* stack, CallFrame* callFrame, VM* vm)
- {
- JSValue result = JSValue::decode(ctiTrampoline(m_ref.code().executableAddress(), stack, callFrame, 0, 0, vm));
- return vm->exception ? jsNull() : result;
- }
-#endif
+public:
+ virtual ~JITCodeWithCodeRef();
- void* start() const
- {
- return m_ref.code().dataLocation();
- }
+ virtual void* executableAddressAtOffset(size_t offset) override;
+ virtual void* dataAddressAtOffset(size_t offset) override;
+ virtual unsigned offsetOf(void* pointerIntoCode) override;
+ virtual size_t size() override;
+ virtual bool contains(void*) override;
- size_t size() const
- {
- ASSERT(m_ref.code().executableAddress());
- return m_ref.size();
- }
-
- bool tryToDisassemble(const char* prefix) const
- {
- return m_ref.tryToDisassemble(prefix);
- }
+protected:
+ CodeRef m_ref;
+};
- ExecutableMemoryHandle* getExecutableMemory()
- {
- return m_ref.executableMemory();
- }
-
- JITType jitType() const
- {
- return m_jitType;
- }
+class DirectJITCode : public JITCodeWithCodeRef {
+public:
+ DirectJITCode(JITType);
+ DirectJITCode(CodeRef, CodePtr withArityCheck, JITType);
+ virtual ~DirectJITCode();
+
+ void initializeCodeRef(CodeRef, CodePtr withArityCheck);
- // Host functions are a bit special; they have a m_code pointer but they
- // do not individully ref the executable pool containing the trampoline.
- static JITCode HostFunction(CodeRef code)
- {
- return JITCode(code, HostCallThunk);
- }
+ virtual CodePtr addressForCall(ArityCheckMode) override;
- void clear()
- {
- m_ref.~CodeRef();
- new (NotNull, &m_ref) CodeRef();
- }
+private:
+ CodePtr m_withArityCheck;
+};
- private:
- JITCode(PassRefPtr<ExecutableMemoryHandle> executableMemory, JITType jitType)
- : m_ref(executableMemory)
- , m_jitType(jitType)
- {
- }
+class NativeJITCode : public JITCodeWithCodeRef {
+public:
+ NativeJITCode(JITType);
+ NativeJITCode(CodeRef, JITType);
+ virtual ~NativeJITCode();
+
+ void initializeCodeRef(CodeRef);
- CodeRef m_ref;
- JITType m_jitType;
-#endif // ENABLE(JIT) || ENABLE(LLINT)
- };
+ virtual CodePtr addressForCall(ArityCheckMode) override;
+};
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITCompilationEffort.h b/Source/JavaScriptCore/jit/JITCompilationEffort.h
index 5eb680178..29e95426a 100644
--- a/Source/JavaScriptCore/jit/JITCompilationEffort.h
+++ b/Source/JavaScriptCore/jit/JITCompilationEffort.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.cpp b/Source/JavaScriptCore/jit/JITDisassembler.cpp
index 39953fa34..04e1b4d49 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.cpp
+++ b/Source/JavaScriptCore/jit/JITDisassembler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,6 +31,8 @@
#include "CodeBlock.h"
#include "CodeBlockWithJITType.h"
#include "JIT.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -87,7 +89,7 @@ void JITDisassembler::dumpHeader(PrintStream& out, LinkBuffer& linkBuffer)
{
out.print("Generated Baseline JIT code for ", CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT), ", instruction count = ", m_codeBlock->instructionCount(), "\n");
out.print(" Source: ", m_codeBlock->sourceCodeOnOneLine(), "\n");
- out.print(" Code at [", RawPointer(linkBuffer.debugAddress()), ", ", RawPointer(static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize()), "):\n");
+ out.print(" Code at [", RawPointer(linkBuffer.debugAddress()), ", ", RawPointer(static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.size()), "):\n");
}
MacroAssembler::Label JITDisassembler::firstSlowLabel()
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.h b/Source/JavaScriptCore/jit/JITDisassembler.h
index 7edbb9cf7..6655de893 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.h
+++ b/Source/JavaScriptCore/jit/JITDisassembler.h
@@ -26,18 +26,20 @@
#ifndef JITDisassembler_h
#define JITDisassembler_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
-#include "LinkBuffer.h"
#include "MacroAssembler.h"
-#include "ProfilerDatabase.h"
#include <wtf/Vector.h>
+#include <wtf/text/CString.h>
namespace JSC {
class CodeBlock;
+class LinkBuffer;
+
+namespace Profiler {
+class Compilation;
+}
class JITDisassembler {
WTF_MAKE_FAST_ALLOCATED;
diff --git a/Source/JavaScriptCore/jit/JITDivGenerator.cpp b/Source/JavaScriptCore/jit/JITDivGenerator.cpp
new file mode 100644
index 000000000..6b2a7f286
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITDivGenerator.cpp
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITDivGenerator.h"
+
+#if ENABLE(JIT)
+
+#include "JSCJSValueInlines.h"
+
+namespace JSC {
+
+void JITDivGenerator::loadOperand(CCallHelpers& jit, SnippetOperand& opr, JSValueRegs oprRegs, FPRReg destFPR)
+{
+ if (opr.isConstInt32()) {
+ jit.move(CCallHelpers::Imm32(opr.asConstInt32()), m_scratchGPR);
+ jit.convertInt32ToDouble(m_scratchGPR, destFPR);
+#if USE(JSVALUE64)
+ } else if (opr.isConstDouble()) {
+ jit.move(CCallHelpers::Imm64(opr.asRawBits()), m_scratchGPR);
+ jit.move64ToDouble(m_scratchGPR, destFPR);
+#endif
+ } else {
+ if (!opr.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(oprRegs, m_scratchGPR));
+ CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(oprRegs);
+ jit.convertInt32ToDouble(oprRegs.payloadGPR(), destFPR);
+ CCallHelpers::Jump oprIsLoaded = jit.jump();
+ notInt32.link(&jit);
+ jit.unboxDoubleNonDestructive(oprRegs, destFPR, m_scratchGPR, m_scratchFPR);
+ oprIsLoaded.link(&jit);
+ }
+}
+
+void JITDivGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_didEmitFastPath);
+ if (!jit.supportsFloatingPoint())
+ return;
+ if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber())
+ return;
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ m_didEmitFastPath = true;
+ loadOperand(jit, m_leftOperand, m_left, m_leftFPR);
+ loadOperand(jit, m_rightOperand, m_right, m_rightFPR);
+
+ jit.divDouble(m_rightFPR, m_leftFPR);
+
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a speculation
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ CCallHelpers::JumpList notInt32;
+ jit.branchConvertDoubleToInt32(m_leftFPR, m_scratchGPR, notInt32, m_scratchFPR);
+
+ // If we've got an integer, we might as well make that the result of the division.
+ jit.boxInt32(m_scratchGPR, m_result);
+ m_endJumpList.append(jit.jump());
+
+ notInt32.link(&jit);
+#if USE(JSVALUE64)
+ jit.moveDoubleTo64(m_leftFPR, m_scratchGPR);
+ CCallHelpers::Jump notDoubleZero = jit.branchTest64(CCallHelpers::NonZero, m_scratchGPR);
+
+ jit.move(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+ m_endJumpList.append(jit.jump());
+
+ notDoubleZero.link(&jit);
+#endif
+ if (m_resultProfile)
+ jit.add32(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfSpecialFastPathCount()));
+ jit.boxDouble(m_leftFPR, m_result);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITDivGenerator.h b/Source/JavaScriptCore/jit/JITDivGenerator.h
new file mode 100644
index 000000000..f9911f347
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITDivGenerator.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITDivGenerator_h
+#define JITDivGenerator_h
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class JITDivGenerator {
+public:
+ JITDivGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR,
+ ResultProfile* resultProfile = nullptr)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ , m_resultProfile(resultProfile)
+ {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ }
+
+ void generateFastPath(CCallHelpers&);
+
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+
+private:
+ void loadOperand(CCallHelpers&, SnippetOperand&, JSValueRegs opRegs, FPRReg destFPR);
+
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+ ResultProfile* m_resultProfile;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITDivGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITDriver.h b/Source/JavaScriptCore/jit/JITDriver.h
deleted file mode 100644
index a2221fa0f..000000000
--- a/Source/JavaScriptCore/jit/JITDriver.h
+++ /dev/null
@@ -1,123 +0,0 @@
-/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITDriver_h
-#define JITDriver_h
-
-#include <wtf/Platform.h>
-
-#if ENABLE(JIT)
-
-#include "BytecodeGenerator.h"
-#include "DFGDriver.h"
-#include "JIT.h"
-#include "LLIntEntrypoints.h"
-
-namespace JSC {
-
-template<typename CodeBlockType>
-inline bool jitCompileIfAppropriate(ExecState* exec, OwnPtr<CodeBlockType>& codeBlock, JITCode& jitCode, JITCode::JITType jitType, unsigned bytecodeIndex, JITCompilationEffort effort)
-{
- VM& vm = exec->vm();
-
- if (jitType == codeBlock->getJITType())
- return true;
-
- if (!vm.canUseJIT())
- return true;
-
- codeBlock->unlinkIncomingCalls();
-
- JITCode oldJITCode = jitCode;
-
- bool dfgCompiled = false;
- if (jitType == JITCode::DFGJIT)
- dfgCompiled = DFG::tryCompile(exec, codeBlock.get(), jitCode, bytecodeIndex);
- if (dfgCompiled) {
- if (codeBlock->alternative())
- codeBlock->alternative()->unlinkIncomingCalls();
- } else {
- if (codeBlock->alternative()) {
- codeBlock = static_pointer_cast<CodeBlockType>(codeBlock->releaseAlternative());
- jitCode = oldJITCode;
- return false;
- }
- jitCode = JIT::compile(&vm, codeBlock.get(), effort);
- if (!jitCode) {
- jitCode = oldJITCode;
- return false;
- }
- }
- codeBlock->setJITCode(jitCode, MacroAssemblerCodePtr());
-
- return true;
-}
-
-inline bool jitCompileFunctionIfAppropriate(ExecState* exec, OwnPtr<FunctionCodeBlock>& codeBlock, JITCode& jitCode, MacroAssemblerCodePtr& jitCodeWithArityCheck, JITCode::JITType jitType, unsigned bytecodeIndex, JITCompilationEffort effort)
-{
- VM& vm = exec->vm();
-
- if (jitType == codeBlock->getJITType())
- return true;
-
- if (!vm.canUseJIT())
- return true;
-
- codeBlock->unlinkIncomingCalls();
-
- JITCode oldJITCode = jitCode;
- MacroAssemblerCodePtr oldJITCodeWithArityCheck = jitCodeWithArityCheck;
-
- bool dfgCompiled = false;
- if (jitType == JITCode::DFGJIT)
- dfgCompiled = DFG::tryCompileFunction(exec, codeBlock.get(), jitCode, jitCodeWithArityCheck, bytecodeIndex);
- if (dfgCompiled) {
- if (codeBlock->alternative())
- codeBlock->alternative()->unlinkIncomingCalls();
- } else {
- if (codeBlock->alternative()) {
- codeBlock = static_pointer_cast<FunctionCodeBlock>(codeBlock->releaseAlternative());
- jitCode = oldJITCode;
- jitCodeWithArityCheck = oldJITCodeWithArityCheck;
- return false;
- }
- jitCode = JIT::compile(&vm, codeBlock.get(), effort, &jitCodeWithArityCheck);
- if (!jitCode) {
- jitCode = oldJITCode;
- jitCodeWithArityCheck = oldJITCodeWithArityCheck;
- return false;
- }
- }
- codeBlock->setJITCode(jitCode, jitCodeWithArityCheck);
-
- return true;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITDriver_h
-
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index 46f59a3a9..cf2ea28af 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,43 +30,54 @@
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JSCJSValue.h"
+#include "LLIntData.h"
+#include "LLIntOpcode.h"
+#include "LLIntThunks.h"
+#include "Opcode.h"
+#include "JSCInlines.h"
#include "VM.h"
-#include "Operations.h"
-
-#if ENABLE(JIT) || ENABLE(LLINT)
namespace JSC {
-ExceptionHandler genericThrow(VM* vm, ExecState* callFrame, JSValue exceptionValue, unsigned vPCIndex)
+void genericUnwind(VM* vm, ExecState* callFrame, UnwindStart unwindStart)
{
- RELEASE_ASSERT(exceptionValue);
+ if (Options::breakOnThrow()) {
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ if (codeBlock)
+ dataLog("In call frame ", RawPointer(callFrame), " for code block ", *codeBlock, "\n");
+ else
+ dataLog("In call frame ", RawPointer(callFrame), " with null CodeBlock\n");
+ CRASH();
+ }
- vm->exception = JSValue();
- HandlerInfo* handler = vm->interpreter->throwException(callFrame, exceptionValue, vPCIndex); // This may update callFrame & exceptionValue!
- vm->exception = exceptionValue;
+ Exception* exception = vm->exception();
+ RELEASE_ASSERT(exception);
+ HandlerInfo* handler = vm->interpreter->unwind(*vm, callFrame, exception, unwindStart); // This may update callFrame.
void* catchRoutine;
Instruction* catchPCForInterpreter = 0;
if (handler) {
- catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
- catchRoutine = ExecutableBase::catchRoutineFor(handler, catchPCForInterpreter);
+ // handler->target is meaningless for getting a code offset when catching
+ // the exception in a DFG/FTL frame. This bytecode target offset could be
+ // something that's in an inlined frame, which means an array access
+ // with this bytecode offset in the machine frame is utterly meaningless
+ // and can cause an overflow. OSR exit properly exits to handler->target
+ // in the proper frame.
+ if (!JITCode::isOptimizingJIT(callFrame->codeBlock()->jitType()))
+ catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
+#if ENABLE(JIT)
+ catchRoutine = handler->nativeCode.executableAddress();
+#else
+ catchRoutine = catchPCForInterpreter->u.pointer;
+#endif
} else
- catchRoutine = FunctionPtr(LLInt::getCodePtr(ctiOpThrowNotCaught)).value();
+ catchRoutine = LLInt::getCodePtr(handleUncaughtException);
- vm->callFrameForThrow = callFrame;
+ vm->callFrameForCatch = callFrame;
vm->targetMachinePCForThrow = catchRoutine;
vm->targetInterpreterPCForThrow = catchPCForInterpreter;
RELEASE_ASSERT(catchRoutine);
- ExceptionHandler exceptionHandler = { catchRoutine, callFrame };
- return exceptionHandler;
-}
-
-ExceptionHandler jitThrow(VM* vm, ExecState* callFrame, JSValue exceptionValue, ReturnAddressPtr faultLocation)
-{
- return genericThrow(vm, callFrame, exceptionValue, callFrame->codeBlock()->bytecodeOffset(callFrame, faultLocation));
-}
-
}
-#endif
+} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITExceptions.h b/Source/JavaScriptCore/jit/JITExceptions.h
index b611caf95..3ccac84c5 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.h
+++ b/Source/JavaScriptCore/jit/JITExceptions.h
@@ -26,31 +26,17 @@
#ifndef JITExceptions_h
#define JITExceptions_h
+#include "Interpreter.h"
#include "JSCJSValue.h"
-#include "MacroAssemblerCodeRef.h"
-
-#if ENABLE(JIT) || ENABLE(LLINT)
namespace JSC {
class ExecState;
class VM;
-// This header gives other parts of the system access to the JIT's prototocol
-// for the throwing and handling exceptions.
-
-struct ExceptionHandler {
- void* catchRoutine;
- ExecState* callFrame;
-};
-
-ExceptionHandler genericThrow(VM*, ExecState*, JSValue exceptionValue, unsigned vPCIndex);
-
-ExceptionHandler jitThrow(VM*, ExecState*, JSValue exceptionValue, ReturnAddressPtr faultLocation);
+void genericUnwind(VM*, ExecState*, UnwindStart = UnwindFromCurrentFrame);
} // namespace JSC
-#endif // ENABLE(JIT) || ENABLE(LLINT)
-
#endif // JITExceptions_h
diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
new file mode 100644
index 000000000..3ca56f65d
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITInlineCacheGenerator.h"
+
+#if ENABLE(JIT)
+
+#include "CodeBlock.h"
+#include "LinkBuffer.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+static StructureStubInfo* garbageStubInfo()
+{
+ static StructureStubInfo* stubInfo = new StructureStubInfo(AccessType::Get);
+ return stubInfo;
+}
+
+JITInlineCacheGenerator::JITInlineCacheGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType)
+ : m_codeBlock(codeBlock)
+{
+ m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo(accessType) : garbageStubInfo();
+ m_stubInfo->codeOrigin = codeOrigin;
+ m_stubInfo->callSiteIndex = callSite;
+}
+
+JITByIdGenerator::JITByIdGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
+ const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value)
+ : JITInlineCacheGenerator(codeBlock, codeOrigin, callSite, accessType)
+ , m_base(base)
+ , m_value(value)
+{
+ m_stubInfo->patch.usedRegisters = usedRegisters;
+
+ m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
+ m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
+#if USE(JSVALUE32_64)
+ m_stubInfo->patch.baseTagGPR = static_cast<int8_t>(base.tagGPR());
+ m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
+#endif
+}
+
+void JITByIdGenerator::finalize(LinkBuffer& fastPath, LinkBuffer& slowPath)
+{
+ CodeLocationCall callReturnLocation = slowPath.locationOf(m_call);
+ m_stubInfo->callReturnLocation = callReturnLocation;
+ m_stubInfo->patch.deltaCheckImmToCall = MacroAssembler::differenceBetweenCodePtr(
+ fastPath.locationOf(m_structureImm), callReturnLocation);
+ m_stubInfo->patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, fastPath.locationOf(m_structureCheck));
+#if USE(JSVALUE64)
+ m_stubInfo->patch.deltaCallToLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, fastPath.locationOf(m_loadOrStore));
+#else
+ m_stubInfo->patch.deltaCallToTagLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, fastPath.locationOf(m_tagLoadOrStore));
+ m_stubInfo->patch.deltaCallToPayloadLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, fastPath.locationOf(m_loadOrStore));
+#endif
+ m_stubInfo->patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, slowPath.locationOf(m_slowPathBegin));
+ m_stubInfo->patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, fastPath.locationOf(m_done));
+}
+
+void JITByIdGenerator::finalize(LinkBuffer& linkBuffer)
+{
+ finalize(linkBuffer, linkBuffer);
+}
+
+void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit)
+{
+ m_structureCheck = jit.patchableBranch32WithPatch(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureIDOffset()),
+ m_structureImm, MacroAssembler::TrustedImm32(0));
+}
+
+JITGetByIdGenerator::JITGetByIdGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
+ JSValueRegs base, JSValueRegs value)
+ : JITByIdGenerator(
+ codeBlock, codeOrigin, callSite, AccessType::Get, usedRegisters, base, value)
+{
+ RELEASE_ASSERT(base.payloadGPR() != value.tagGPR());
+}
+
+void JITGetByIdGenerator::generateFastPath(MacroAssembler& jit)
+{
+ generateFastPathChecks(jit);
+
+#if USE(JSVALUE64)
+ m_loadOrStore = jit.load64WithCompactAddressOffsetPatch(
+ MacroAssembler::Address(m_base.payloadGPR(), 0), m_value.payloadGPR()).label();
+#else
+ m_tagLoadOrStore = jit.load32WithCompactAddressOffsetPatch(
+ MacroAssembler::Address(m_base.payloadGPR(), 0), m_value.tagGPR()).label();
+ m_loadOrStore = jit.load32WithCompactAddressOffsetPatch(
+ MacroAssembler::Address(m_base.payloadGPR(), 0), m_value.payloadGPR()).label();
+#endif
+
+ m_done = jit.label();
+}
+
+JITPutByIdGenerator::JITPutByIdGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
+ JSValueRegs base, JSValueRegs value, GPRReg scratch,
+ ECMAMode ecmaMode, PutKind putKind)
+ : JITByIdGenerator(
+ codeBlock, codeOrigin, callSite, AccessType::Put, usedRegisters, base, value)
+ , m_ecmaMode(ecmaMode)
+ , m_putKind(putKind)
+{
+ m_stubInfo->patch.usedRegisters.clear(scratch);
+}
+
+void JITPutByIdGenerator::generateFastPath(MacroAssembler& jit)
+{
+ generateFastPathChecks(jit);
+
+#if USE(JSVALUE64)
+ m_loadOrStore = jit.store64WithAddressOffsetPatch(
+ m_value.payloadGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
+#else
+ m_tagLoadOrStore = jit.store32WithAddressOffsetPatch(
+ m_value.tagGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
+ m_loadOrStore = jit.store32WithAddressOffsetPatch(
+ m_value.payloadGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
+#endif
+
+ m_done = jit.label();
+}
+
+V_JITOperation_ESsiJJI JITPutByIdGenerator::slowPathFunction()
+{
+ if (m_ecmaMode == StrictMode) {
+ if (m_putKind == Direct)
+ return operationPutByIdDirectStrictOptimize;
+ return operationPutByIdStrictOptimize;
+ }
+ if (m_putKind == Direct)
+ return operationPutByIdDirectNonStrictOptimize;
+ return operationPutByIdNonStrictOptimize;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
new file mode 100644
index 000000000..0ada0f8b4
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITInlineCacheGenerator_h
+#define JITInlineCacheGenerator_h
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "JITOperations.h"
+#include "JSCJSValue.h"
+#include "PutKind.h"
+#include "RegisterSet.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+class CodeBlock;
+
+class JITInlineCacheGenerator {
+protected:
+ JITInlineCacheGenerator() { }
+ JITInlineCacheGenerator(CodeBlock*, CodeOrigin, CallSiteIndex, AccessType);
+
+public:
+ StructureStubInfo* stubInfo() const { return m_stubInfo; }
+
+protected:
+ CodeBlock* m_codeBlock;
+ StructureStubInfo* m_stubInfo;
+};
+
+class JITByIdGenerator : public JITInlineCacheGenerator {
+protected:
+ JITByIdGenerator() { }
+
+ JITByIdGenerator(
+ CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet&, JSValueRegs base,
+ JSValueRegs value);
+
+public:
+ void reportSlowPathCall(MacroAssembler::Label slowPathBegin, MacroAssembler::Call call)
+ {
+ m_slowPathBegin = slowPathBegin;
+ m_call = call;
+ }
+
+ MacroAssembler::Label slowPathBegin() const { return m_slowPathBegin; }
+ MacroAssembler::Jump slowPathJump() const { return m_structureCheck.m_jump; }
+
+ void finalize(LinkBuffer& fastPathLinkBuffer, LinkBuffer& slowPathLinkBuffer);
+ void finalize(LinkBuffer&);
+
+protected:
+ void generateFastPathChecks(MacroAssembler&);
+
+ JSValueRegs m_base;
+ JSValueRegs m_value;
+
+ MacroAssembler::DataLabel32 m_structureImm;
+ MacroAssembler::PatchableJump m_structureCheck;
+ AssemblerLabel m_loadOrStore;
+#if USE(JSVALUE32_64)
+ AssemblerLabel m_tagLoadOrStore;
+#endif
+ MacroAssembler::Label m_done;
+ MacroAssembler::Label m_slowPathBegin;
+ MacroAssembler::Call m_call;
+};
+
+class JITGetByIdGenerator : public JITByIdGenerator {
+public:
+ JITGetByIdGenerator() { }
+
+ JITGetByIdGenerator(
+ CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base,
+ JSValueRegs value);
+
+ void generateFastPath(MacroAssembler&);
+};
+
+class JITPutByIdGenerator : public JITByIdGenerator {
+public:
+ JITPutByIdGenerator() { }
+
+ JITPutByIdGenerator(
+ CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base,
+ JSValueRegs, GPRReg scratch, ECMAMode, PutKind);
+
+ void generateFastPath(MacroAssembler&);
+
+ V_JITOperation_ESsiJJI slowPathFunction();
+
+private:
+ ECMAMode m_ecmaMode;
+ PutKind m_putKind;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITInlineCacheGenerator_h
+
diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h
index 5e5d834aa..77adc02df 100644
--- a/Source/JavaScriptCore/jit/JITInlines.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,17 +26,63 @@
#ifndef JITInlines_h
#define JITInlines_h
-
#if ENABLE(JIT)
+#include "JSCInlines.h"
+
namespace JSC {
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(unsigned src)
+#if USE(JSVALUE64)
+inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ JumpList slowCases = emitDoubleLoad(instruction, badType);
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
+ return slowCases;
+}
+#else
+inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ JumpList slowCases = emitDoubleLoad(instruction, badType);
+ moveDoubleToInts(fpRegT0, regT0, regT1);
+ return slowCases;
+}
+#endif // USE(JSVALUE64)
+
+ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
+{
+ switch (arrayMode) {
+ case JITInt32:
+ return emitInt32Load(currentInstruction, badType);
+ case JITDouble:
+ return emitDoubleLoad(currentInstruction, badType);
+ case JITContiguous:
+ return emitContiguousLoad(currentInstruction, badType);
+ case JITArrayStorage:
+ return emitArrayStorageLoad(currentInstruction, badType);
+ default:
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return MacroAssembler::JumpList();
+}
+
+inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(Instruction* instruction, PatchableJump& badType, IndexingType expectedShape)
+{
+ return emitContiguousLoad(instruction, badType, expectedShape);
+}
+
+inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ return emitArrayStorageLoad(instruction, badType);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantDouble(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
}
-ALWAYS_INLINE JSValue JIT::getConstantOperand(unsigned src)
+ALWAYS_INLINE JSValue JIT::getConstantOperand(int src)
{
ASSERT(m_codeBlock->isConstantRegisterIndex(src));
return m_codeBlock->getConstant(src);
@@ -52,33 +98,9 @@ ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::Ca
#endif
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- loadPtr(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-}
-
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load32(Address(from, entry * sizeof(Register)), to);
-#if USE(JSVALUE64)
- killLastResultRegister();
-#endif
-}
-
-#if USE(JSVALUE64)
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load64(Address(from, entry * sizeof(Register)), to);
- killLastResultRegister();
-}
-#endif
-
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
- failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ failures.append(branchStructure(NotEqual, Address(src, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
failures.append(branchTest32(Zero, dst));
@@ -97,112 +119,653 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
}
-ALWAYS_INLINE bool JIT::atJumpTarget()
+ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr function)
{
- while (m_jumpTargetsPosition < m_codeBlock->numberOfJumpTargets() && m_codeBlock->jumpTarget(m_jumpTargetsPosition) <= m_bytecodeOffset) {
- if (m_codeBlock->jumpTarget(m_jumpTargetsPosition) == m_bytecodeOffset)
- return true;
- ++m_jumpTargetsPosition;
- }
- return false;
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ Call nakedCall = nearTailCall();
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
+ return nakedCall;
}
-#if defined(ASSEMBLER_HAS_CONSTANT_POOL) && ASSEMBLER_HAS_CONSTANT_POOL
+ALWAYS_INLINE void JIT::updateTopCallFrame()
+{
+ ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
+#if USE(JSVALUE32_64)
+ Instruction* instruction = m_codeBlock->instructions().begin() + m_bytecodeOffset + 1;
+ uint32_t locationBits = CallSiteIndex(instruction).bits();
+#else
+ uint32_t locationBits = CallSiteIndex(m_bytecodeOffset + 1).bits();
+#endif
+ store32(TrustedImm32(locationBits), intTagFor(JSStack::ArgumentCount));
+ storePtr(callFrameRegister, &m_vm->topCallFrame);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheck(const FunctionPtr& function)
+{
+ updateTopCallFrame();
+ MacroAssembler::Call call = appendCall(function);
+ exceptionCheck();
+ return call;
+}
-ALWAYS_INLINE void JIT::beginUninterruptedSequence(int insnSpace, int constSpace)
+#if OS(WINDOWS) && CPU(X86_64)
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr& function)
{
-#if CPU(ARM_TRADITIONAL)
-#ifndef NDEBUG
- // Ensure the label after the sequence can also fit
- insnSpace += sizeof(ARMWord);
- constSpace += sizeof(uint64_t);
+ updateTopCallFrame();
+ MacroAssembler::Call call = appendCallWithSlowPathReturnType(function);
+ exceptionCheck();
+ return call;
+}
#endif
- ensureSpace(insnSpace, constSpace);
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithCallFrameRollbackOnException(const FunctionPtr& function)
+{
+ updateTopCallFrame(); // The callee is responsible for setting topCallFrame to their caller
+ MacroAssembler::Call call = appendCall(function);
+ exceptionCheckWithCallFrameRollback();
+ return call;
+}
-#elif CPU(SH4)
-#ifndef NDEBUG
- insnSpace += sizeof(SH4Word);
- constSpace += sizeof(uint64_t);
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr& function, int dst)
+{
+ MacroAssembler::Call call = appendCallWithExceptionCheck(function);
+#if USE(JSVALUE64)
+ emitPutVirtualRegister(dst, returnValueGPR);
+#else
+ emitStore(dst, returnValueGPR2, returnValueGPR);
#endif
+ return call;
+}
- m_assembler.ensureSpace(insnSpace + m_assembler.maxInstructionSize + 2, constSpace + 8);
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr& function, int dst)
+{
+ MacroAssembler::Call call = appendCallWithExceptionCheck(function);
+ emitValueProfilingSite();
+#if USE(JSVALUE64)
+ emitPutVirtualRegister(dst, returnValueGPR);
+#else
+ emitStore(dst, returnValueGPR2, returnValueGPR);
#endif
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(Z_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ updateTopCallFrame();
+ return appendCall(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJsc operation, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
-#ifndef NDEBUG
- m_uninterruptedInstructionSequenceBegin = label();
- m_uninterruptedConstantSequenceBegin = sizeOfConstantPool();
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJscZ operation, GPRReg arg1, int32_t arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, TrustedImmPtr arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EO operation, GPRReg arg)
+{
+ setupArgumentsWithExecState(arg);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_ESt operation, Structure* structure)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(structure));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EZ operation, int32_t arg)
+{
+ setupArgumentsWithExecState(TrustedImm32(arg));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_E operation, int dst)
+{
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EAapJcpZ operation, int dst, ArrayAllocationProfile* arg1, GPRReg arg2, int32_t arg3)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(arg1), arg2, TrustedImm32(arg3));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EAapJcpZ operation, int dst, ArrayAllocationProfile* arg1, const JSValue* arg2, int32_t arg3)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(arg1), TrustedImmPtr(arg2), TrustedImm32(arg3));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EC operation, int dst, JSCell* cell)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EC operation, JSCell* cell)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(cell));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscC operation, int dst, GPRReg arg1, JSCell* cell)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), arg2);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EP operation, int dst, void* pointer)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(WithProfileTag, J_JITOperation_EPc operation, int dst, Instruction* bytecodePC)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EPc operation, int dst, Instruction* bytecodePC)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZ operation, int dst, int32_t arg)
+{
+ setupArgumentsWithExecState(TrustedImm32(arg));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZZ operation, int dst, int32_t arg1, int32_t arg2)
+{
+ setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImm32(arg2));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_ECC operation, RegisterID regOp1, RegisterID regOp2)
+{
+ setupArgumentsWithExecState(regOp1, regOp2);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EOJss operation, RegisterID regOp1, RegisterID regOp2)
+{
+ setupArgumentsWithExecState(regOp1, regOp2);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Sprt_JITOperation_EZ operation, int32_t op)
+{
+#if OS(WINDOWS) && CPU(X86_64)
+ setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32(op));
+ return appendCallWithExceptionCheckAndSlowPathReturnType(operation);
+#else
+ setupArgumentsWithExecState(TrustedImm32(op));
+ return appendCallWithExceptionCheck(operation);
#endif
}
-ALWAYS_INLINE void JIT::endUninterruptedSequence(int insnSpace, int constSpace, int dst)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_E operation)
{
-#ifndef NDEBUG
- /* There are several cases when the uninterrupted sequence is larger than
- * maximum required offset for pathing the same sequence. Eg.: if in a
- * uninterrupted sequence the last macroassembler's instruction is a stub
- * call, it emits store instruction(s) which should not be included in the
- * calculation of length of uninterrupted sequence. So, the insnSpace and
- * constSpace should be upper limit instead of hard limit.
- */
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheck(operation);
+}
-#if CPU(SH4)
- if ((dst > 15) || (dst < -16)) {
- insnSpace += 8;
- constSpace += 2;
- }
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EC operation, RegisterID regOp)
+{
+ setupArgumentsWithExecState(regOp);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECC operation, RegisterID regOp1, RegisterID regOp2)
+{
+ setupArgumentsWithExecState(regOp1, regOp2);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EE operation, RegisterID regOp)
+{
+ setupArgumentsWithExecState(regOp);
+ updateTopCallFrame();
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EPc operation, Instruction* bytecodePC)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZ operation, int32_t op)
+{
+ setupArgumentsWithExecState(TrustedImm32(op));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnException(J_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ return appendCallWithCallFrameRollbackOnException(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb operation, CodeBlock* pointer)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(pointer));
+ return appendCallWithCallFrameRollbackOnException(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnException(Z_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ return appendCallWithCallFrameRollbackOnException(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4);
+ return appendCallWithExceptionCheck(operation);
+}
+
+#if USE(JSVALUE64)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1, int32_t arg2, int32_t arg3)
+{
+ setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2, int32_t arg3, GPRReg arg4)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1, regOp2, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, regOp3);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, GPRReg arg)
+{
+ setupArgumentsWithExecState(TrustedImm32(dst), arg);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_EJJ operation, int dst, GPRReg arg1, GPRReg arg2)
+{
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EAapJ operation, int dst, ArrayAllocationProfile* arg1, GPRReg arg2)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(arg1), arg2);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operation, int dst, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc operation, int dst, GPRReg arg1, const Identifier* arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operation, int dst, GPRReg arg1, GPRReg arg2)
+{
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1, GPRReg arg2, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1, GPRReg arg2, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+{
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ updateTopCallFrame();
+ return appendCall(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EJS operation, GPRReg arg1, size_t arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJ operation, RegisterID regOp)
+{
+ setupArgumentsWithExecState(regOp);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operation, RegisterID regOp1, RegisterID regOp2)
+{
+ setupArgumentsWithExecState(regOp1, regOp2);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
+{
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
+{
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operation, RegisterID regOp)
+{
+ setupArgumentsWithExecState(regOp);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZJJ operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
+ return appendCallWithExceptionCheck(operation);
+}
- if (((dst >= -16) && (dst < 0)) || ((dst > 7) && (dst <= 15)))
- insnSpace += 8;
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID regOp1, RegisterID regOp2, int32_t op3, RegisterID regOp4)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, TrustedImm32(op3), regOp4);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZ operation, RegisterID regOp1, int32_t op2)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImm32(op2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operation, RegisterID regOp1, int32_t op2, RegisterID regOp3)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImm32(op2), regOp3);
+ return appendCallWithExceptionCheck(operation);
+}
+
+#else // USE(JSVALUE32_64)
+
+// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
+// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
+#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
+#define EABI_32BIT_DUMMY_ARG TrustedImm32(0),
#else
- UNUSED_PARAM(dst);
+#define EABI_32BIT_DUMMY_ARG
#endif
- ASSERT(differenceBetween(m_uninterruptedInstructionSequenceBegin, label()) <= insnSpace);
- ASSERT(sizeOfConstantPool() - m_uninterruptedConstantSequenceBegin <= constSpace);
+// JSVALUE32_64 is a 64-bit integer that cannot be put half in an argument register and half on stack when using SH4 architecture.
+// To avoid this, let's occupy the 4th argument register (r7) with a dummy argument when necessary. This must only be done when there
+// is no other 32-bit value argument behind this 64-bit JSValue.
+#if CPU(SH4)
+#define SH4_32BIT_DUMMY_ARG TrustedImm32(0),
#else
- UNUSED_PARAM(insnSpace);
- UNUSED_PARAM(constSpace);
- UNUSED_PARAM(dst);
+#define SH4_32BIT_DUMMY_ARG
#endif
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1Tag, GPRReg arg1Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
+ updateTopCallFrame();
+ return appendCall(operation);
}
-#endif // ASSEMBLER_HAS_CONSTANT_POOL
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
+ return appendCallWithExceptionCheck(operation);
+}
-ALWAYS_INLINE void JIT::updateTopCallFrame()
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2, int32_t arg3)
{
- ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
-#if USE(JSVALUE32_64)
- storePtr(TrustedImmPtr(m_codeBlock->instructions().begin() + m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
-#else
- store32(TrustedImm32(m_bytecodeOffset + 1), intTagFor(JSStack::ArgumentCount));
-#endif
- storePtr(callFrameRegister, &m_vm->topCallFrame);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3));
+ return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE void JIT::restoreArgumentReferenceForTrampoline()
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, int32_t arg3, GPRReg arg4)
{
-#if CPU(X86)
- // Within a trampoline the return address will be on the stack at this point.
- addPtr(TrustedImm32(sizeof(void*)), stackPointerRegister, firstArgumentRegister);
-#elif CPU(ARM)
- move(stackPointerRegister, firstArgumentRegister);
-#elif CPU(SH4)
- move(stackPointerRegister, firstArgumentRegister);
-#endif
- // In the trampoline on x86-64, the first argument register is not overwritten.
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EAapJ operation, int dst, ArrayAllocationProfile* arg1, GPRReg arg2Tag, GPRReg arg2Payload)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(arg1), arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, const Identifier* arg2)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EJS operation, GPRReg arg1Tag, GPRReg arg1Payload, size_t arg2)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJ operation, RegisterID argTag, RegisterID argPayload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG argPayload, argTag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operation, RegisterID arg1Tag, RegisterID arg1Payload, RegisterID arg2Tag, RegisterID arg2Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZCC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID arg1, RegisterID arg2Tag, RegisterID arg2Payload, int32_t arg3, RegisterID arg4)
+{
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operation, RegisterID regOp1Tag, RegisterID regOp1Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int32_t op1, SymbolTable* symbolTable, RegisterID regOp3Tag, RegisterID regOp3Payload)
+{
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, RegisterID regOp1Tag, RegisterID regOp1Payload)
+{
+ setupArgumentsWithExecState(TrustedImm32(dst), regOp1Payload, regOp1Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, int32_t op2)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, int32_t op2, RegisterID regOp3Tag, RegisterID regOp3Payload)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2Tag, GPRReg arg2Payload)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), EABI_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+#undef EABI_32BIT_DUMMY_ARG
+#undef SH4_32BIT_DUMMY_ARG
+
+#endif // USE(JSVALUE32_64)
+
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
- return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
+ return branchStructure(NotEqual, Address(reg, JSCell::structureIDOffset()), structure);
}
ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
@@ -211,16 +774,24 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
linkSlowCase(iter);
}
+ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, unsigned bytecodeOffset)
+{
+ while (iter != slowCases.end() && iter->to == bytecodeOffset) {
+ iter->from.link(this);
+ ++iter;
+ }
+}
+
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
@@ -230,7 +801,7 @@ ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
ALWAYS_INLINE void JIT::addSlowCase()
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Jump emptyJump; // Doing it this way to make Windows happy.
m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
@@ -238,21 +809,26 @@ ALWAYS_INLINE void JIT::addSlowCase()
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellObject(RegisterID cellReg)
{
- return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ return branch8(AboveOrEqual, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellNotObject(RegisterID cellReg)
+{
+ return branch8(Below, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
}
#if ENABLE(SAMPLING_FLAGS)
@@ -308,7 +884,7 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
+ALWAYS_INLINE bool JIT::isOperandConstantChar(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
@@ -316,21 +892,24 @@ ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(unsigned src)
template<typename StructureType>
inline void JIT::emitAllocateJSObject(RegisterID allocator, StructureType structure, RegisterID result, RegisterID scratch)
{
- loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
- addSlowCase(branchTestPtr(Zero, result));
+ if (Options::forceGCSlowPaths())
+ addSlowCase(jump());
+ else {
+ loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
+ addSlowCase(branchTestPtr(Zero, result));
+ }
// remove the object from the free list
loadPtr(Address(result), scratch);
storePtr(scratch, Address(allocator, MarkedAllocator::offsetOfFreeListHead()));
- // initialize the object's structure
- storePtr(structure, Address(result, JSCell::structureOffset()));
-
// initialize the object's property storage pointer
storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset()));
+
+ // initialize the object's structure
+ emitStoreStructureWithTypeInfo(structure, result, scratch);
}
-#if ENABLE(VALUE_PROFILER)
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
ASSERT(shouldEmitProfiling());
@@ -340,32 +919,15 @@ inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
#if USE(JSVALUE32_64)
const RegisterID valueTag = regT1;
#endif
- const RegisterID scratch = regT3;
- if (ValueProfile::numberOfBuckets == 1) {
- // We're in a simple configuration: only one bucket, so we can just do a direct
- // store.
+ // We're in a simple configuration: only one bucket, so we can just do a direct
+ // store.
#if USE(JSVALUE64)
- store64(value, valueProfile->m_buckets);
+ store64(value, valueProfile->m_buckets);
#else
- EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
- store32(value, &descriptor->asBits.payload);
- store32(valueTag, &descriptor->asBits.tag);
-#endif
- return;
- }
-
- if (m_randomGenerator.getUint32() & 1)
- add32(TrustedImm32(1), bucketCounterRegister);
- else
- add32(TrustedImm32(3), bucketCounterRegister);
- and32(TrustedImm32(ValueProfile::bucketIndexMask), bucketCounterRegister);
- move(TrustedImmPtr(valueProfile->m_buckets), scratch);
-#if USE(JSVALUE64)
- store64(value, BaseIndex(scratch, bucketCounterRegister, TimesEight));
-#elif USE(JSVALUE32_64)
- store32(value, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(valueTag, BaseIndex(scratch, bucketCounterRegister, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ EncodedValueDescriptor* descriptor = bitwise_cast<EncodedValueDescriptor*>(valueProfile->m_buckets);
+ store32(value, &descriptor->asBits.payload);
+ store32(valueTag, &descriptor->asBits.tag);
#endif
}
@@ -380,65 +942,42 @@ inline void JIT::emitValueProfilingSite()
{
emitValueProfilingSite(m_bytecodeOffset);
}
-#endif // ENABLE(VALUE_PROFILER)
-inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
+inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile)
{
- UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now.
-
- RegisterID structure = structureAndIndexingType;
- RegisterID indexingType = structureAndIndexingType;
-
- if (shouldEmitProfiling())
- storePtr(structure, arrayProfile->addressOfLastSeenStructure());
+ if (shouldEmitProfiling()) {
+ load32(MacroAssembler::Address(cell, JSCell::structureIDOffset()), indexingType);
+ store32(indexingType, arrayProfile->addressOfLastSeenStructureID());
+ }
- load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
+ load8(Address(cell, JSCell::indexingTypeOffset()), indexingType);
}
-inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
+inline void JIT::emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex)
{
-#if ENABLE(VALUE_PROFILER)
- emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
-#else
- UNUSED_PARAM(bytecodeIndex);
- emitArrayProfilingSite(structureAndIndexingType, scratch, 0);
-#endif
+ emitArrayProfilingSiteWithCell(cell, indexingType, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
}
inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
{
-#if ENABLE(VALUE_PROFILER)
store8(TrustedImm32(1), arrayProfile->addressOfMayStoreToHole());
-#else
- UNUSED_PARAM(arrayProfile);
-#endif
}
inline void JIT::emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile* arrayProfile)
{
-#if ENABLE(VALUE_PROFILER)
store8(TrustedImm32(1), arrayProfile->addressOfOutOfBounds());
-#else
- UNUSED_PARAM(arrayProfile);
-#endif
}
static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capability)
{
-#if ENABLE(VALUE_PROFILER)
return arrayModesInclude(arrayModes, capability);
-#else
- UNUSED_PARAM(arrayModes);
- UNUSED_PARAM(capability);
- return false;
-#endif
}
inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
{
-#if ENABLE(VALUE_PROFILER)
- profile->computeUpdatedPrediction(m_codeBlock);
- ArrayModes arrayModes = profile->observedArrayModes();
+ ConcurrentJITLocker locker(m_codeBlock->m_lock);
+ profile->computeUpdatedPrediction(locker, m_codeBlock);
+ ArrayModes arrayModes = profile->observedArrayModes(locker);
if (arrayProfileSaw(arrayModes, DoubleShape))
return JITDouble;
if (arrayProfileSaw(arrayModes, Int32Shape))
@@ -446,50 +985,38 @@ inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
if (arrayProfileSaw(arrayModes, ArrayStorageShape))
return JITArrayStorage;
return JITContiguous;
-#else
- UNUSED_PARAM(profile);
- return JITContiguous;
-#endif
+}
+
+ALWAYS_INLINE int32_t JIT::getOperandConstantInt(int src)
+{
+ return getConstantOperand(src).asInt32();
+}
+
+ALWAYS_INLINE double JIT::getOperandConstantDouble(int src)
+{
+ return getConstantOperand(src).asDouble();
}
#if USE(JSVALUE32_64)
inline void JIT::emitLoadTag(int index, RegisterID tag)
{
- RegisterID mappedTag;
- if (getMappedTag(index, mappedTag)) {
- move(mappedTag, tag);
- unmap(tag);
- return;
- }
-
if (m_codeBlock->isConstantRegisterIndex(index)) {
move(Imm32(getConstantOperand(index).tag()), tag);
- unmap(tag);
return;
}
load32(tagFor(index), tag);
- unmap(tag);
}
inline void JIT::emitLoadPayload(int index, RegisterID payload)
{
- RegisterID mappedPayload;
- if (getMappedPayload(index, mappedPayload)) {
- move(mappedPayload, payload);
- unmap(payload);
- return;
- }
-
if (m_codeBlock->isConstantRegisterIndex(index)) {
move(Imm32(getConstantOperand(index).payload()), payload);
- unmap(payload);
return;
}
load32(payloadFor(index), payload);
- unmap(payload);
}
inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
@@ -498,6 +1025,16 @@ inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
move(Imm32(v.tag()), tag);
}
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
+{
+ emitLoad(src, dst.tagGPR(), dst.payloadGPR());
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
+{
+ emitStore(dst, from.tagGPR(), from.payloadGPR());
+}
+
inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
{
RELEASE_ASSERT(tag != payload);
@@ -521,11 +1058,6 @@ inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, Registe
inline void JIT::emitLoad2(int index1, RegisterID tag1, RegisterID payload1, int index2, RegisterID tag2, RegisterID payload2)
{
- if (isMapped(index1)) {
- emitLoad(index1, tag1, payload1);
- emitLoad(index2, tag2, payload2);
- return;
- }
emitLoad(index2, tag2, payload2);
emitLoad(index1, tag1, payload1);
}
@@ -534,7 +1066,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
+ loadDouble(TrustedImmPtr(&inConstantPool), value);
} else
loadDouble(addressFor(index), value);
}
@@ -562,12 +1094,6 @@ inline void JIT::emitStoreInt32(int index, RegisterID payload, bool indexIsInt32
store32(TrustedImm32(JSValue::Int32Tag), tagFor(index, callFrameRegister));
}
-inline void JIT::emitStoreAndMapInt32(int index, RegisterID tag, RegisterID payload, bool indexIsInt32, size_t opcodeLength)
-{
- emitStoreInt32(index, payload, indexIsInt32);
- map(m_bytecodeOffset + opcodeLength, index, tag, payload);
-}
-
inline void JIT::emitStoreInt32(int index, TrustedImm32 payload, bool indexIsInt32)
{
store32(payload, payloadFor(index, callFrameRegister));
@@ -600,86 +1126,11 @@ inline void JIT::emitStore(int index, const JSValue constant, RegisterID base)
store32(Imm32(constant.tag()), tagFor(index, base));
}
-ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+ALWAYS_INLINE void JIT::emitInitRegister(int dst)
{
emitStore(dst, jsUndefined());
}
-inline bool JIT::isLabeled(unsigned bytecodeOffset)
-{
- for (size_t numberOfJumpTargets = m_codeBlock->numberOfJumpTargets(); m_jumpTargetIndex != numberOfJumpTargets; ++m_jumpTargetIndex) {
- unsigned jumpTarget = m_codeBlock->jumpTarget(m_jumpTargetIndex);
- if (jumpTarget == bytecodeOffset)
- return true;
- if (jumpTarget > bytecodeOffset)
- return false;
- }
- return false;
-}
-
-inline void JIT::map(unsigned bytecodeOffset, int virtualRegisterIndex, RegisterID tag, RegisterID payload)
-{
- if (isLabeled(bytecodeOffset))
- return;
-
- m_mappedBytecodeOffset = bytecodeOffset;
- m_mappedVirtualRegisterIndex = virtualRegisterIndex;
- m_mappedTag = tag;
- m_mappedPayload = payload;
-
- ASSERT(!canBeOptimizedOrInlined() || m_mappedPayload == regT0);
- ASSERT(!canBeOptimizedOrInlined() || m_mappedTag == regT1);
-}
-
-inline void JIT::unmap(RegisterID registerID)
-{
- if (m_mappedTag == registerID)
- m_mappedTag = (RegisterID)-1;
- else if (m_mappedPayload == registerID)
- m_mappedPayload = (RegisterID)-1;
-}
-
-inline void JIT::unmap()
-{
- m_mappedBytecodeOffset = (unsigned)-1;
- m_mappedVirtualRegisterIndex = JSStack::ReturnPC;
- m_mappedTag = (RegisterID)-1;
- m_mappedPayload = (RegisterID)-1;
-}
-
-inline bool JIT::isMapped(int virtualRegisterIndex)
-{
- if (m_mappedBytecodeOffset != m_bytecodeOffset)
- return false;
- if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
- return false;
- return true;
-}
-
-inline bool JIT::getMappedPayload(int virtualRegisterIndex, RegisterID& payload)
-{
- if (m_mappedBytecodeOffset != m_bytecodeOffset)
- return false;
- if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
- return false;
- if (m_mappedPayload == (RegisterID)-1)
- return false;
- payload = m_mappedPayload;
- return true;
-}
-
-inline bool JIT::getMappedTag(int virtualRegisterIndex, RegisterID& tag)
-{
- if (m_mappedBytecodeOffset != m_bytecodeOffset)
- return false;
- if (m_mappedVirtualRegisterIndex != virtualRegisterIndex)
- return false;
- if (m_mappedTag == (RegisterID)-1)
- return false;
- tag = m_mappedTag;
- return true;
-}
-
inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex)
{
if (!m_codeBlock->isKnownNotImmediate(virtualRegisterIndex)) {
@@ -700,20 +1151,20 @@ inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterI
}
}
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
-ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant)
+ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t& constant)
{
- if (isOperandConstantImmediateInt(op1)) {
+ if (isOperandConstantInt(op1)) {
constant = getConstantOperand(op1).asInt32();
op = op2;
return true;
}
- if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantInt(op2)) {
constant = getConstantOperand(op2).asInt32();
op = op1;
return true;
@@ -724,23 +1175,10 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(unsigned op1, unsigned op
#else // USE(JSVALUE32_64)
-/* Deprecated: Please use JITStubCall instead. */
-
-ALWAYS_INLINE void JIT::emitGetJITStubArg(unsigned argumentNumber, RegisterID dst)
-{
- unsigned argumentStackOffset = (argumentNumber * (sizeof(JSValue) / sizeof(void*))) + JITSTACKFRAME_ARGS_INDEX;
- peek64(dst, argumentStackOffset);
-}
-
-ALWAYS_INLINE void JIT::killLastResultRegister()
-{
- m_lastResultBytecodeRegister = std::numeric_limits<int>::max();
-}
-
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -749,50 +1187,54 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
move(TrustedImm64(JSValue::encode(value)), dst);
else
move(Imm64(JSValue::encode(value)), dst);
- killLastResultRegister();
- return;
- }
-
- if (src == m_lastResultBytecodeRegister && m_codeBlock->isTemporaryRegisterIndex(src) && !atJumpTarget()) {
- // The argument we want is already stored in eax
- if (dst != cachedResultRegister)
- move(cachedResultRegister, dst);
- killLastResultRegister();
return;
}
load64(Address(callFrameRegister, src * sizeof(Register)), dst);
- killLastResultRegister();
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
+{
+ emitGetVirtualRegister(src, dst.payloadGPR());
+}
+
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(VirtualRegister src, RegisterID dst)
+{
+ emitGetVirtualRegister(src.offset(), dst);
}
ALWAYS_INLINE void JIT::emitGetVirtualRegisters(int src1, RegisterID dst1, int src2, RegisterID dst2)
{
- if (src2 == m_lastResultBytecodeRegister) {
- emitGetVirtualRegister(src2, dst2);
- emitGetVirtualRegister(src1, dst1);
- } else {
- emitGetVirtualRegister(src1, dst1);
- emitGetVirtualRegister(src2, dst2);
- }
+ emitGetVirtualRegister(src1, dst1);
+ emitGetVirtualRegister(src2, dst2);
}
-ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(unsigned src)
+ALWAYS_INLINE void JIT::emitGetVirtualRegisters(VirtualRegister src1, RegisterID dst1, VirtualRegister src2, RegisterID dst2)
{
- return getConstantOperand(src).asInt32();
+ emitGetVirtualRegisters(src1.offset(), dst1, src2.offset(), dst2);
}
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(unsigned src)
+ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
-ALWAYS_INLINE void JIT::emitPutVirtualRegister(unsigned dst, RegisterID from)
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
{
store64(from, Address(callFrameRegister, dst * sizeof(Register)));
- m_lastResultBytecodeRegister = (from == cachedResultRegister) ? static_cast<int>(dst) : std::numeric_limits<int>::max();
}
-ALWAYS_INLINE void JIT::emitInitRegister(unsigned dst)
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
+{
+ emitPutVirtualRegister(dst, from.payloadGPR());
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
+{
+ emitPutVirtualRegister(dst.offset(), from);
+}
+
+ALWAYS_INLINE void JIT::emitInitRegister(int dst)
{
store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, dst * sizeof(Register)));
}
@@ -829,7 +1271,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
+ loadDouble(TrustedImmPtr(&inConstantPool), value);
} else
loadDouble(addressFor(index), value);
}
@@ -837,50 +1279,50 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- ASSERT(isOperandConstantImmediateInt(index));
+ ASSERT(isOperandConstantInt(index));
convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
} else
convertInt32ToDouble(addressFor(index), value);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfInt(RegisterID reg)
{
return branch64(AboveOrEqual, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg)
{
return branch64(Below, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotInt(RegisterID reg)
{
- move(reg1, scratch);
- and64(reg2, scratch);
- return emitJumpIfNotImmediateInteger(scratch);
+ return patchableBranch64(Below, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
- addSlowCase(emitJumpIfNotImmediateInteger(reg));
+ move(reg1, scratch);
+ and64(reg2, scratch);
+ return emitJumpIfNotInt(scratch);
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg)
{
- addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
+ addSlowCase(emitJumpIfNotInt(reg));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
- addSlowCase(emitJumpIfNotImmediateNumber(reg));
+ addSlowCase(emitJumpIfNotInt(reg1, reg2, scratch));
}
-ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg)
{
- emitFastArithIntToImmNoCheck(src, dest);
+ addSlowCase(emitJumpIfNotNumber(reg));
}
-ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
+ALWAYS_INLINE void JIT::emitTagBool(RegisterID reg)
{
or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
}
diff --git a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp
new file mode 100644
index 000000000..1ddaa6ab1
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITLeftShiftGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITLeftShiftGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_rightOperand.isConstInt32()) {
+ // Try to do (intVar << intConstant).
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+
+ jit.moveValueRegs(m_left, m_result);
+ jit.lshift32(CCallHelpers::Imm32(m_rightOperand.asConstInt32()), m_result.payloadGPR());
+
+ } else {
+ // Try to do (intConstant << intVar) or (intVar << intVar).
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ GPRReg rightOperandGPR = m_right.payloadGPR();
+ if (rightOperandGPR == m_result.payloadGPR()) {
+ jit.move(rightOperandGPR, m_scratchGPR);
+ rightOperandGPR = m_scratchGPR;
+ }
+
+ if (m_leftOperand.isConstInt32()) {
+#if USE(JSVALUE32_64)
+ jit.move(m_right.tagGPR(), m_result.tagGPR());
+#endif
+ jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR());
+ } else {
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ jit.moveValueRegs(m_left, m_result);
+ }
+
+ jit.lshift32(rightOperandGPR, m_result.payloadGPR());
+ }
+
+#if USE(JSVALUE64)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#endif
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h
new file mode 100644
index 000000000..633bcb3b1
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITLeftShiftGenerator_h
+#define JITLeftShiftGenerator_h
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITLeftShiftGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITLeftShiftGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITLeftShiftGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITMulGenerator.cpp b/Source/JavaScriptCore/jit/JITMulGenerator.cpp
new file mode 100644
index 000000000..b1fb0b0d0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITMulGenerator.cpp
@@ -0,0 +1,198 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITMulGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITMulGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE64)
+ ASSERT(m_scratchGPR != m_result.payloadGPR());
+#else
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
+
+ if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber()) {
+ ASSERT(!m_didEmitFastPath);
+ return;
+ }
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isPositiveConstInt32() || m_rightOperand.isPositiveConstInt32()) {
+ JSValueRegs var = m_leftOperand.isPositiveConstInt32() ? m_right : m_left;
+ SnippetOperand& varOpr = m_leftOperand.isPositiveConstInt32() ? m_rightOperand : m_leftOperand;
+ SnippetOperand& constOpr = m_leftOperand.isPositiveConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar * intConstant.
+ CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(var);
+
+ GPRReg multiplyResultGPR = m_result.payloadGPR();
+ if (multiplyResultGPR == var.payloadGPR())
+ multiplyResultGPR = m_scratchGPR;
+
+ m_slowPathJumpList.append(jit.branchMul32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constOpr.asConstInt32()), multiplyResultGPR));
+
+ jit.boxInt32(multiplyResultGPR, m_result);
+ m_endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ m_slowPathJumpList.append(notInt32);
+ return;
+ }
+
+ // Try to do doubleVar * double(intConstant).
+ notInt32.link(&jit);
+ if (!varOpr.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(var, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(var, m_leftFPR, m_scratchGPR, m_scratchFPR);
+
+ jit.move(CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR);
+ jit.convertInt32ToDouble(m_scratchGPR, m_rightFPR);
+
+ // Fall thru to doubleVar * doubleVar.
+
+ } else {
+ ASSERT(!m_leftOperand.isPositiveConstInt32() && !m_rightOperand.isPositiveConstInt32());
+
+ CCallHelpers::Jump leftNotInt;
+ CCallHelpers::Jump rightNotInt;
+
+ // Try to do intVar * intVar.
+ leftNotInt = jit.branchIfNotInt32(m_left);
+ rightNotInt = jit.branchIfNotInt32(m_right);
+
+ m_slowPathJumpList.append(jit.branchMul32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), m_scratchGPR));
+ m_slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_scratchGPR)); // Go slow if potential negative zero.
+
+ jit.boxInt32(m_scratchGPR, m_result);
+ m_endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ m_slowPathJumpList.append(leftNotInt);
+ m_slowPathJumpList.append(rightNotInt);
+ return;
+ }
+
+ leftNotInt.link(&jit);
+ if (!m_leftOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
+
+ jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
+ CCallHelpers::Jump rightWasInteger = jit.jump();
+
+ rightNotInt.link(&jit);
+ if (!m_rightOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
+
+ rightIsDouble.link(&jit);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+
+ rightWasInteger.link(&jit);
+
+ // Fall thru to doubleVar * doubleVar.
+ }
+
+ // Do doubleVar * doubleVar.
+ jit.mulDouble(m_rightFPR, m_leftFPR);
+
+ if (!m_resultProfile)
+ jit.boxDouble(m_leftFPR, m_result);
+ else {
+ // The Int52 overflow check below intentionally omits 1ll << 51 as a valid negative Int52 value.
+ // Therefore, we will get a false positive if the result is that value. This is intentionally
+ // done to simplify the checking algorithm.
+
+ const int64_t negativeZeroBits = 1ll << 63;
+#if USE(JSVALUE64)
+ jit.moveDoubleTo64(m_leftFPR, m_result.payloadGPR());
+ CCallHelpers::Jump notNegativeZero = jit.branch64(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm64(negativeZeroBits));
+
+ jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
+ CCallHelpers::Jump done = jit.jump();
+
+ notNegativeZero.link(&jit);
+ jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
+
+ jit.move(m_result.payloadGPR(), m_scratchGPR);
+ jit.urshiftPtr(CCallHelpers::Imm32(52), m_scratchGPR);
+ jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
+ CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
+
+ jit.or32(CCallHelpers::TrustedImm32(ResultProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
+ noInt52Overflow.link(&jit);
+
+ done.link(&jit);
+ jit.sub64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR()); // Box the double.
+#else
+ jit.boxDouble(m_leftFPR, m_result);
+ CCallHelpers::JumpList notNegativeZero;
+ notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm32(0)));
+ notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.tagGPR(), CCallHelpers::TrustedImm32(negativeZeroBits >> 32)));
+
+ jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
+ CCallHelpers::Jump done = jit.jump();
+
+ notNegativeZero.link(&jit);
+ jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
+
+ jit.move(m_result.tagGPR(), m_scratchGPR);
+ jit.urshiftPtr(CCallHelpers::Imm32(52 - 32), m_scratchGPR);
+ jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
+ CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
+
+ jit.or32(CCallHelpers::TrustedImm32(ResultProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
+
+ m_endJumpList.append(noInt52Overflow);
+ if (m_scratchGPR == m_result.tagGPR() || m_scratchGPR == m_result.payloadGPR())
+ jit.boxDouble(m_leftFPR, m_result);
+
+ m_endJumpList.append(done);
+#endif
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITMulGenerator.h b/Source/JavaScriptCore/jit/JITMulGenerator.h
new file mode 100644
index 000000000..faa033bc6
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITMulGenerator.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITMulGenerator_h
+#define JITMulGenerator_h
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class JITMulGenerator {
+public:
+ JITMulGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR,
+ ResultProfile* resultProfile = nullptr)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ , m_resultProfile(resultProfile)
+ {
+ ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
+ }
+
+ void generateFastPath(CCallHelpers&);
+
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+
+private:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+ ResultProfile* m_resultProfile;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITMulGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITNegGenerator.cpp b/Source/JavaScriptCore/jit/JITNegGenerator.cpp
new file mode 100644
index 000000000..c6851676d
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITNegGenerator.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITNegGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITNegGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != m_src.payloadGPR());
+ ASSERT(m_scratchGPR != m_result.payloadGPR());
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_src.tagGPR());
+ ASSERT(m_scratchGPR != m_result.tagGPR());
+#endif
+
+ m_didEmitFastPath = true;
+
+ jit.moveValueRegs(m_src, m_result);
+ CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);
+
+ // -0 should produce a double, and hence cannot be negated as an int.
+ // The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
+ m_slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));
+
+ jit.neg32(m_result.payloadGPR());
+#if USE(JSVALUE64)
+ jit.boxInt32(m_result.payloadGPR(), m_result);
+#endif
+ m_endJumpList.append(jit.jump());
+
+ srcNotInt.link(&jit);
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));
+
+ // For a double, all we need to do is to invert the sign bit.
+#if USE(JSVALUE64)
+ jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
+ jit.xor64(m_scratchGPR, m_result.payloadGPR());
+#else
+ jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
+#endif
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h b/Source/JavaScriptCore/jit/JITNegGenerator.h
index d951075e2..6dc2c85a6 100644
--- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
+++ b/Source/JavaScriptCore/jit/JITNegGenerator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,47 +20,45 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ClosureCallStubRoutine_h
-#define ClosureCallStubRoutine_h
-
-#include <wtf/Platform.h>
+#ifndef JITNegGenerator_h
+#define JITNegGenerator_h
#if ENABLE(JIT)
-#include "CodeOrigin.h"
-#include "GCAwareJITStubRoutine.h"
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
namespace JSC {
-class ClosureCallStubRoutine : public GCAwareJITStubRoutine {
+class JITNegGenerator {
public:
- ClosureCallStubRoutine(
- const MacroAssemblerCodeRef&, VM&, const JSCell* owner,
- Structure*, ExecutableBase*, const CodeOrigin&);
-
- virtual ~ClosureCallStubRoutine();
-
- Structure* structure() const { return m_structure.get(); }
- ExecutableBase* executable() const { return m_executable.get(); }
- const CodeOrigin& codeOrigin() const { return m_codeOrigin; }
+ JITNegGenerator(JSValueRegs result, JSValueRegs src, GPRReg scratchGPR)
+ : m_result(result)
+ , m_src(src)
+ , m_scratchGPR(scratchGPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
-protected:
- virtual void markRequiredObjectsInternal(SlotVisitor&);
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
private:
- WriteBarrier<Structure> m_structure;
- WriteBarrier<ExecutableBase> m_executable;
- // This allows us to figure out who a call is linked to by searching through
- // stub routines.
- CodeOrigin m_codeOrigin;
+ JSValueRegs m_result;
+ JSValueRegs m_src;
+ GPRReg m_scratchGPR;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
};
} // namespace JSC
#endif // ENABLE(JIT)
-#endif // ClosureCallStubRoutine_h
-
+#endif // JITNegGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 2a88f5052..738cb63fe 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -28,16 +28,23 @@
#if ENABLE(JIT)
#include "JIT.h"
-#include "Arguments.h"
+#include "BasicBlockLocation.h"
#include "CopiedSpaceInlines.h"
+#include "Debugger.h"
+#include "Exception.h"
#include "Heap.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
+#include "JSPropertyNameEnumerator.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "SlowPathCall.h"
+#include "TypeLocation.h"
+#include "TypeProfilerLog.h"
+#include "VirtualRegister.h"
+#include "Watchdog.h"
namespace JSC {
@@ -53,37 +60,17 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
- if (canBeOptimizedOrInlined()) {
- // Use simpler approach, since the DFG thinks that the last result register
- // is always set to the destination on every operation.
- emitGetVirtualRegister(src, regT0);
- emitPutVirtualRegister(dst);
- } else {
- if (m_codeBlock->isConstantRegisterIndex(src)) {
- if (!getConstantOperand(src).isNumber())
- store64(TrustedImm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
- else
- store64(Imm64(JSValue::encode(getConstantOperand(src))), Address(callFrameRegister, dst * sizeof(Register)));
- if (dst == m_lastResultBytecodeRegister)
- killLastResultRegister();
- } else if ((src == m_lastResultBytecodeRegister) || (dst == m_lastResultBytecodeRegister)) {
- // If either the src or dst is the cached register go though
- // get/put registers to make sure we track this correctly.
- emitGetVirtualRegister(src, regT0);
- emitPutVirtualRegister(dst);
- } else {
- // Perform the copy via regT1; do not disturb any mapping in regT0.
- load64(Address(callFrameRegister, src * sizeof(Register)), regT1);
- store64(regT1, Address(callFrameRegister, dst * sizeof(Register)));
- }
- }
+ emitGetVirtualRegister(src, regT0);
+ emitPutVirtualRegister(dst);
}
+
void JIT::emit_op_end(Instruction* currentInstruction)
{
- RELEASE_ASSERT(returnValueRegister != callFrameRegister);
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast<int>(sizeof(Register))));
+ RELEASE_ASSERT(returnValueGPR != callFrameRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -96,7 +83,7 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
- size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity());
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
RegisterID resultReg = regT0;
@@ -111,43 +98,54 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_new_object);
- stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.objectAllocationProfile->structure()));
- stubCall.call(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+ callOperation(operationNewObject, structure);
+ emitStoreCell(dst, returnValueGPR);
}
-void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
{
- unsigned baseVal = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int constructor = currentInstruction[2].u.operand;
+ int hasInstanceValue = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(hasInstanceValue, regT0);
- emitGetVirtualRegister(baseVal, regT0);
+ // We don't jump if we know what Symbol.hasInstance would do.
+ Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
- // Check that baseVal is a cell.
- emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
+ emitGetVirtualRegister(constructor, regT0);
+
+ // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
+ test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
+ emitTagBool(regT0);
+ Jump done = jump();
- // Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
+ customhasInstanceValue.link(this);
+ move(TrustedImm32(ValueTrue), regT0);
+
+ done.link(this);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned proto = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
// Load the operands (baseVal, proto, and value respectively) into registers.
// We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
emitGetVirtualRegister(value, regT2);
emitGetVirtualRegister(proto, regT1);
- // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
emitJumpSlowCaseIfNotJSCell(regT2, value);
emitJumpSlowCaseIfNotJSCell(regT1, proto);
// Check that prototype is an object
- loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
- addSlowCase(emitJumpIfNotObject(regT3));
+ addSlowCase(emitJumpIfCellNotObject(regT1));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -157,7 +155,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ emitLoadStructure(regT2, regT2, regT3);
load64(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
@@ -170,10 +168,16 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
+void JIT::emit_op_instanceof_custom(Instruction*)
+{
+ // This always goes to slow path since we expect it to be rare.
+ addSlowCase(jump());
+}
+
void JIT::emit_op_is_undefined(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
Jump isCell = emitJumpIfJSCell(regT0);
@@ -182,56 +186,55 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
Jump done = jump();
isCell.link(this);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump notMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT1, regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
comparePtr(Equal, regT0, regT1, regT0);
notMasqueradesAsUndefined.link(this);
done.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_is_boolean(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_is_number(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
test64(NonZero, regT0, tagTypeNumberRegister, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_is_string(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitGetVirtualRegister(value, regT0);
Jump isNotCell = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
- emitTagAsBoolImmediate(regT0);
+ compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
+ emitTagBool(regT0);
Jump done = jump();
isNotCell.link(this);
@@ -241,104 +244,37 @@ void JIT::emit_op_is_string(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_call(Instruction* currentInstruction)
+void JIT::emit_op_is_object(Instruction* currentInstruction)
{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_call_eval(Instruction* currentInstruction)
-{
- compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
-}
-
-void JIT::emit_op_call_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
-void JIT::emit_op_construct(Instruction* currentInstruction)
-{
- compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
-}
+ emitGetVirtualRegister(value, regT0);
+ Jump isNotCell = emitJumpIfNotJSCell(regT0);
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
-{
- int activation = currentInstruction[1].u.operand;
- Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(activation, regT2);
- stubCall.call();
- activationNotCreated.link(this);
-}
+ compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
+ emitTagBool(regT0);
+ Jump done = jump();
-void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
-{
- int arguments = currentInstruction[1].u.operand;
- int activation = currentInstruction[2].u.operand;
+ isNotCell.link(this);
+ move(TrustedImm32(ValueFalse), regT0);
- Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(arguments))));
- JITStubCall stubCall(this, cti_op_tear_off_arguments);
- stubCall.addArgument(unmodifiedArgumentsRegister(arguments), regT2);
- stubCall.addArgument(activation, regT2);
- stubCall.call();
- argsNotCreated.link(this);
+ done.link(this);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_ret(Instruction* currentInstruction)
{
ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
+ ASSERT(regT1 != returnValueGPR);
+ ASSERT(returnValueGPR != callFrameRegister);
// Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-}
-
-void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
-{
- ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueRegister);
- ASSERT(returnValueRegister != callFrameRegister);
-
- // Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueRegister);
- Jump notJSCell = emitJumpIfNotJSCell(returnValueRegister);
- loadPtr(Address(returnValueRegister, JSCell::structureOffset()), regT2);
- Jump notObject = emitJumpIfNotObject(regT2);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
-
- // Return 'this' in %eax.
- notJSCell.link(this);
- notObject.link(this);
- emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueRegister);
-
- // Grab the return address.
- emitGetFromCallFrameHeaderPtr(JSStack::ReturnPC, regT1);
-
- // Restore our caller's "r".
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
+ checkStackPointerAlignment();
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -350,7 +286,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitGetVirtualRegister(src, regT0);
Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ addSlowCase(emitJumpIfCellObject(regT0));
isImm.link(this);
if (dst != src)
@@ -360,10 +296,8 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat);
+ slowPathCall.call();
}
void JIT::emit_op_not(Instruction* currentInstruction)
@@ -386,7 +320,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target);
- Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+ Jump isNonZero = emitJumpIfInt(regT0);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target);
addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))));
@@ -396,15 +330,15 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(src, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump masqueradesGlobalObjectIsForeign = jump();
@@ -419,15 +353,15 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
};
void JIT::emit_op_jneq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitGetVirtualRegister(src, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
@@ -442,7 +376,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
Special::Pointer ptr = currentInstruction[2].u.specialPointer;
unsigned target = currentInstruction[3].u.operand;
@@ -453,9 +387,9 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
void JIT::emit_op_eq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
compare32(Equal, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -465,7 +399,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0))));
- addJump(emitJumpIfImmediateInteger(regT0), target);
+ addJump(emitJumpIfInt(regT0), target);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target);
addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))));
@@ -476,170 +410,34 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
void JIT::emit_op_neq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
compare32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xor64(regT1, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- or64(regT1, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-
void JIT::emit_op_throw(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call();
- ASSERT(regT0 == returnValueRegister);
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
-}
-
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitGetVirtualRegister(base, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(emitJumpIfNotJSCell(regT0));
- if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- isNotObject.append(emitJumpIfNotObject(regT2));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store64(tagTypeNumberRegister, addressFor(i));
- store32(TrustedImm32(Int32Tag), intTagFor(size));
- store32(regT3, intPayloadFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- move(regT0, regT1);
- and32(TrustedImm32(~TagBitUndefined), regT1);
- addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget);
-
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT0);
- toObjectStubCall.call(base);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(intPayloadFor(i), regT0);
- Jump end = branch32(Equal, regT0, intPayloadFor(size));
-
- // Grab key @ i
- loadPtr(addressFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
-
- load64(BaseIndex(regT2, regT0, TimesEight), regT2);
-
- emitPutVirtualRegister(dst, regT2);
-
- // Increment i
- add32(TrustedImm32(1), regT0);
- store32(regT0, intPayloadFor(i));
-
- // Verify that i is valid:
- emitGetVirtualRegister(base, regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- load64(Address(regT2, Structure::prototypeOffset()), regT2);
- callHasProperty.append(emitJumpIfNotJSCell(regT2));
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(TrustedImm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- emitGetVirtualRegister(dst, regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
+ ASSERT(regT0 == returnValueGPR);
+ copyCalleeSavesToVMCalleeSavesBuffer();
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperationNoExceptionCheck(operationThrow, regT0);
+ jumpToExceptionHandler();
}
void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_push_with_scope);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.call();
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- JITStubCall(this, cti_op_pop_scope).call();
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope);
+ slowPathCall.call();
}
void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
+ int src2 = currentInstruction[3].u.operand;
emitGetVirtualRegisters(src1, regT0, src2, regT1);
@@ -650,18 +448,18 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// Jump slow if either is a double. First test if it's an integer, which is fine, and then test
// if it's a double.
- Jump leftOK = emitJumpIfImmediateInteger(regT0);
- addSlowCase(emitJumpIfImmediateNumber(regT0));
+ Jump leftOK = emitJumpIfInt(regT0);
+ addSlowCase(emitJumpIfNumber(regT0));
leftOK.link(this);
- Jump rightOK = emitJumpIfImmediateInteger(regT1);
- addSlowCase(emitJumpIfImmediateNumber(regT1));
+ Jump rightOK = emitJumpIfInt(regT1);
+ addSlowCase(emitJumpIfNumber(regT1));
rightOK.link(this);
if (type == OpStrictEq)
compare64(Equal, regT1, regT0, regT0);
else
compare64(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
@@ -681,69 +479,101 @@ void JIT::emit_op_to_number(Instruction* currentInstruction)
int srcVReg = currentInstruction[2].u.operand;
emitGetVirtualRegister(srcVReg, regT0);
- addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ addSlowCase(emitJumpIfNotNumber(regT0));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
+void JIT::emit_op_to_string(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_push_name_scope);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[1].u.operand)));
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call();
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, regT0);
+
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_catch(Instruction* currentInstruction)
{
- killLastResultRegister(); // FIXME: Implicitly treat op_catch as a labeled statement, and remove this line of code.
- move(regT0, callFrameRegister);
- peek(regT3, OBJECT_OFFSETOF(struct JITStackFrame, vm) / sizeof(void*));
- load64(Address(regT3, OBJECT_OFFSETOF(VM, exception)), regT0);
- store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, OBJECT_OFFSETOF(VM, exception)));
+ restoreCalleeSavesFromVMCalleeSavesBuffer();
+
+ move(TrustedImmPtr(m_vm), regT3);
+ load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
+ storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+
+ callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
+ Jump isCatchableException = branchTest32(Zero, returnValueGPR);
+ jumpToExceptionHandler();
+ isCatchableException.link(this);
+
+ move(TrustedImmPtr(m_vm), regT3);
+ load64(Address(regT3, VM::exceptionOffset()), regT0);
+ store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset()));
emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+ load64(Address(regT0, Exception::valueOffset()), regT0);
+ emitPutVirtualRegister(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_assert(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
+{
+ int currentScope = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentScope, regT0);
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_char(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_string(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
@@ -751,53 +581,39 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee, regT2);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitGetVirtualRegister(scrutinee, regT0);
+ callOperation(operationSwitchStringWithUnknownKeyType, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_throw_static_error);
- if (!m_codeBlock->getConstant(currentInstruction[1].u.operand).isNumber())
- stubCall.addArgument(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
- else
- stubCall.addArgument(Imm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))));
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.call();
+ move(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))), regT0);
+ callOperation(operationThrowStaticError, regT0, currentInstruction[2].u.operand);
}
void JIT::emit_op_debug(Instruction* currentInstruction)
{
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- UNUSED_PARAM(currentInstruction);
- breakpoint();
-#else
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
- stubCall.call();
-#endif
+ load32(codeBlock()->debuggerRequestsAddress(), regT0);
+ Jump noDebuggerRequests = branchTest32(Zero, regT0);
+ callOperation(operationDebug, currentInstruction[1].u.operand);
+ noDebuggerRequests.link(this);
}
void JIT::emit_op_eq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(Equal, regT0, regT2, regT0);
@@ -811,25 +627,25 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_neq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(NotEqual, regT0, regT2, regT0);
@@ -843,213 +659,173 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
void JIT::emit_op_enter(Instruction*)
{
- emitEnterOptimizationCheck();
-
// Even though CTI doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
// object lifetime and increasing GC pressure.
size_t count = m_codeBlock->m_numVars;
- for (size_t j = 0; j < count; ++j)
- emitInitRegister(j);
-}
+ for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j)
+ emitInitRegister(virtualRegisterForLocal(j).offset());
-void JIT::emit_op_create_activation(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- JITStubCall(this, cti_op_push_activation).call(currentInstruction[1].u.operand);
- emitPutVirtualRegister(dst);
- activationCreated.link(this);
-}
+ emitWriteBarrier(m_codeBlock);
-void JIT::emit_op_create_arguments(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(dst);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(dst));
- argsCreated.link(this);
+ emitEnterOptimizationCheck();
}
-void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+void JIT::emit_op_get_scope(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
-
- store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst));
+ int dst = currentInstruction[1].u.operand;
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
+ emitStoreCell(dst, regT0);
}
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
+void JIT::emit_op_to_this(Instruction* currentInstruction)
{
+ WriteBarrierBase<Structure>* cachedStructure = &currentInstruction[2].u.structure;
emitGetVirtualRegister(currentInstruction[1].u.operand, regT1);
emitJumpSlowCaseIfNotJSCell(regT1);
- if (shouldEmitProfiling()) {
- loadPtr(Address(regT1, JSCell::structureOffset()), regT0);
- emitValueProfilingSite();
- }
- addSlowCase(branchPtr(Equal, Address(regT1, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
-}
-void JIT::emit_op_get_callee(Instruction* currentInstruction)
-{
- unsigned result = currentInstruction[1].u.operand;
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(result);
+ addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ loadPtr(cachedStructure, regT2);
+ addSlowCase(branchTestPtr(Zero, regT2));
+ load32(Address(regT2, Structure::structureIDOffset()), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
}
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
RegisterID calleeReg = regT0;
+ RegisterID rareDataReg = regT4;
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID structureReg = regT2;
+ RegisterID cachedFunctionReg = regT4;
RegisterID scratchReg = regT3;
emitGetVirtualRegister(callee, calleeReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
+ addSlowCase(branchTestPtr(Zero, rareDataReg));
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
addSlowCase(branchTestPtr(Zero, allocatorReg));
+ loadPtr(cachedFunction, cachedFunctionReg);
+ Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
+ addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
+ hasSeenMultipleCallees.link(this);
+
emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkSlowCase(iter); // doesn't have rare data
linkSlowCase(iter); // doesn't have an allocation profile
linkSlowCase(iter); // allocation failed
+ linkSlowCase(iter); // cached function didn't match
- JITStubCall stubCall(this, cti_op_create_this);
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_check_tdz(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ addSlowCase(branchTest64(Zero, regT0));
+}
+
+void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error);
+ slowPathCall.call();
}
void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
+ Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationProfileWillCall, regT0);
+ profilerDone.link(this);
}
void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand, regT1);
- stubCall.call();
+ Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationProfileDidCall, regT0);
+ profilerDone.link(this);
}
// Slow cases
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- void* globalThis = m_codeBlock->globalObject()->globalThis();
-
linkSlowCase(iter);
- if (shouldEmitProfiling())
- move(TrustedImm64((JSValue::encode(jsUndefined()))), regT0);
- Jump isNotUndefined = branch64(NotEqual, regT1, TrustedImm64(JSValue::encode(jsUndefined())));
- emitValueProfilingSite();
- move(TrustedImm64(JSValue::encode(JSValue(static_cast<JSCell*>(globalThis)))), regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this));
-
linkSlowCase(iter);
- if (shouldEmitProfiling())
- move(TrustedImm64(JSValue::encode(m_vm->stringStructure.get())), regT0);
- isNotUndefined.link(this);
- emitValueProfilingSite();
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this);
+ slowPathCall.call();
}
void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_primitive);
+ slowPathCall.call();
}
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_not);
+ slowPathCall.call();
}
void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), currentInstruction[2].u.operand); // inverted!
+ callOperation(operationConvertJSValueToBoolean, regT0);
+ emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), currentInstruction[2].u.operand); // inverted!
}
void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(regT0);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), currentInstruction[2].u.operand);
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitxor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_bitor);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ callOperation(operationConvertJSValueToBoolean, regT0);
+ emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), currentInstruction[2].u.operand);
}
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ callOperation(operationCompareEq, regT0, regT1);
+ emitTagBool(returnValueGPR);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_eq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
+ callOperation(operationCompareEq, regT0, regT1);
xor32(TrustedImm32(0x1), regT0);
- emitTagAsBoolImmediate(regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitTagBool(returnValueGPR);
+ emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1057,10 +833,8 @@ void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseE
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_stricteq);
+ slowPathCall.call();
}
void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1068,196 +842,55 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call(currentInstruction[1].u.operand);
-}
-
-void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_check_has_instance);
- stubCall.addArgument(value, regT2);
- stubCall.addArgument(baseVal, regT2);
- stubCall.call(dst);
-
- emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_nstricteq);
+ slowPathCall.call();
}
void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned proto = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
linkSlowCaseIfNotJSCell(iter, value);
linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value, regT2);
- stubCall.addArgument(proto, regT2);
- stubCall.call(dst);
-}
-
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
-}
-
-void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_to_number);
- stubCall.addArgument(regT0);
- stubCall.call(currentInstruction[1].u.operand);
+ emitGetVirtualRegister(value, regT0);
+ emitGetVirtualRegister(proto, regT1);
+ callOperation(operationInstanceOf, dst, regT0, regT1);
}
-void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
- sub32(TrustedImm32(1), regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
-}
+ int value = currentInstruction[2].u.operand;
+ int constructor = currentInstruction[3].u.operand;
+ int hasInstanceValue = currentInstruction[4].u.operand;
-void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
linkSlowCase(iter);
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(base, regT0);
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.call(dst);
+ emitGetVirtualRegister(value, regT0);
+ emitGetVirtualRegister(constructor, regT1);
+ emitGetVirtualRegister(hasInstanceValue, regT2);
+ callOperation(operationInstanceOfCustom, regT0, regT1, regT2);
+ emitTagBool(returnValueGPR);
+ emitPutVirtualRegister(dst, returnValueGPR);
}
-void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
- emitGetVirtualRegister(property, regT1);
- addSlowCase(emitJumpIfNotImmediateInteger(regT1));
- add32(TrustedImm32(1), regT1);
- // regT1 now contains the integer index of the argument we want, including this
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, regT2));
+ linkSlowCase(iter);
- neg32(regT1);
- signExtend32ToPtr(regT1, regT1);
- load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(dst, regT0);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number);
+ slowPathCall.call();
}
-void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- Jump skipArgumentsCreation = jump();
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITStubCall(this, cti_op_create_arguments).call();
- emitPutVirtualRegister(arguments);
- emitPutVirtualRegister(unmodifiedArgumentsRegister(arguments));
-
- skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val_generic);
- stubCall.addArgument(arguments, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.callWithValueProfiling(dst);
-}
-
-void JIT::emit_op_put_to_base(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int id = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- PutToBaseOperation* operation = currentInstruction[4].u.putToBaseOperation;
- switch (operation->m_kind) {
- case PutToBaseOperation::GlobalVariablePutChecked:
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
- case PutToBaseOperation::GlobalVariablePut: {
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- if (operation->m_isDynamic) {
- emitGetVirtualRegister(base, regT0);
- addSlowCase(branchPtr(NotEqual, regT0, TrustedImmPtr(globalObject)));
- }
- emitGetVirtualRegister(value, regT0);
- store64(regT0, operation->m_registerAddress);
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- return;
- }
- case PutToBaseOperation::VariablePut: {
- emitGetVirtualRegisters(base, regT0, value, regT1);
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT2);
- store64(regT1, Address(regT2, operation->m_offset * sizeof(Register)));
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- return;
- }
+ linkSlowCase(iter); // Not JSCell.
+ linkSlowCase(iter); // Not JSString.
- case PutToBaseOperation::GlobalPropertyPut: {
- emitGetVirtualRegisters(base, regT0, value, regT1);
- loadPtr(&operation->m_structure, regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2));
- ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity());
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- load32(&operation->m_offsetInButterfly, regT3);
- signExtend32ToPtr(regT3, regT3);
- store64(regT1, BaseIndex(regT2, regT3, TimesEight));
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- return;
- }
-
- case PutToBaseOperation::Uninitialised:
- case PutToBaseOperation::Readonly:
- case PutToBaseOperation::Generic:
- JITStubCall stubCall(this, cti_op_put_to_base);
-
- stubCall.addArgument(TrustedImm32(base));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
- stubCall.addArgument(TrustedImm32(value));
- stubCall.addArgument(TrustedImmPtr(operation));
- stubCall.call();
- return;
- }
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string);
+ slowPathCall.call();
}
#endif // USE(JSVALUE64)
@@ -1265,13 +898,10 @@ void JIT::emit_op_put_to_base(Instruction* currentInstruction)
void JIT::emit_op_loop_hint(Instruction*)
{
// Emit the JIT optimization check:
- if (canBeOptimized())
+ if (canBeOptimized()) {
addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
-
- // Emit the watchdog timer check:
- if (m_vm->watchdog.isEnabled())
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress())));
+ }
}
void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
@@ -1281,442 +911,534 @@ void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& i
if (canBeOptimized()) {
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_optimize);
- stubCall.addArgument(TrustedImm32(m_bytecodeOffset));
- stubCall.call();
+ copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer();
+
+ callOperation(operationOptimize, m_bytecodeOffset);
+ Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
+ if (!ASSERT_DISABLED) {
+ Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
+ abortWithReason(JITUnreasonableLoopHintJumpTarget);
+ ok.link(this);
+ }
+ jump(returnValueGPR);
+ noOptimizedEntry.link(this);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
}
+#else
+ UNUSED_PARAM(iter);
#endif
+}
- // Emit the slow path of the watchdog timer check:
- if (m_vm->watchdog.isEnabled()) {
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_handle_watchdog_timer);
- stubCall.call();
+void JIT::emit_op_watchdog(Instruction*)
+{
+ ASSERT(m_vm->watchdog());
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog()->timerDidFireAddress())));
+}
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
- }
+void JIT::emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ASSERT(m_vm->watchdog());
+ linkSlowCase(iter);
+ callOperation(operationHandleWatchdogTimer);
+}
+void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+{
+ callOperation(operationNewRegexp, currentInstruction[1].u.operand, m_codeBlock->regexp(currentInstruction[2].u.operand));
}
-void JIT::emit_resolve_operations(ResolveOperations* resolveOperations, const int* baseVR, const int* valueVR)
+void JIT::emitNewFuncCommon(Instruction* currentInstruction)
{
+ Jump lazyJump;
+ int dst = currentInstruction[1].u.operand;
-#if USE(JSVALUE32_64)
- unmap();
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
#else
- killLastResultRegister();
+ emitLoadPayload(currentInstruction[2].u.operand, regT0);
#endif
-
- if (resolveOperations->isEmpty()) {
- addSlowCase(jump());
- return;
+ FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand);
+
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
+ if (opcodeID == op_new_func)
+ callOperation(operationNewFunction, dst, regT0, funcExec);
+ else {
+ ASSERT(opcodeID == op_new_generator_func);
+ callOperation(operationNewGeneratorFunction, dst, regT0, funcExec);
}
+}
- const RegisterID value = regT0;
-#if USE(JSVALUE32_64)
- const RegisterID valueTag = regT1;
-#endif
- const RegisterID scope = regT2;
- const RegisterID scratch = regT3;
-
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- ResolveOperation* pc = resolveOperations->data();
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, scope);
- bool setBase = false;
- bool resolvingBase = true;
- while (resolvingBase) {
- switch (pc->m_operation) {
- case ResolveOperation::ReturnGlobalObjectAsBase:
- move(TrustedImmPtr(globalObject), value);
-#if USE(JSVALUE32_64)
- move(TrustedImm32(JSValue::CellTag), valueTag);
-#endif
- emitValueProfilingSite();
- emitStoreCell(*baseVR, value);
- return;
- case ResolveOperation::SetBaseToGlobal:
- RELEASE_ASSERT(baseVR);
- setBase = true;
- move(TrustedImmPtr(globalObject), scratch);
- emitStoreCell(*baseVR, scratch);
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::SetBaseToUndefined: {
- RELEASE_ASSERT(baseVR);
- setBase = true;
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ emitNewFuncCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_generator_func(Instruction* currentInstruction)
+{
+ emitNewFuncCommon(currentInstruction);
+}
+
+void JIT::emitNewFuncExprCommon(Instruction* currentInstruction)
+{
+ Jump notUndefinedScope;
+ int dst = currentInstruction[1].u.operand;
#if USE(JSVALUE64)
- move(TrustedImm64(JSValue::encode(jsUndefined())), scratch);
- emitPutVirtualRegister(*baseVR, scratch);
-#else
- emitStore(*baseVR, jsUndefined());
-#endif
- resolvingBase = false;
- ++pc;
- break;
- }
- case ResolveOperation::SetBaseToScope:
- RELEASE_ASSERT(baseVR);
- setBase = true;
- emitStoreCell(*baseVR, scope);
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::ReturnScopeAsBase:
- emitStoreCell(*baseVR, scope);
- RELEASE_ASSERT(value == regT0);
- move(scope, value);
-#if USE(JSVALUE32_64)
- move(TrustedImm32(JSValue::CellTag), valueTag);
-#endif
- emitValueProfilingSite();
- return;
- case ResolveOperation::SkipTopScopeNode: {
-#if USE(JSVALUE32_64)
- Jump activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
-#else
- Jump activationNotCreated = branchTest64(Zero, addressFor(m_codeBlock->activationRegister()));
-#endif
- loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
- activationNotCreated.link(this);
- ++pc;
- break;
- }
- case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
- move(scope, regT3);
- loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
- Jump atTopOfScope = branchTestPtr(Zero, regT1);
- Label loopStart = label();
- loadPtr(Address(regT3, JSCell::structureOffset()), regT2);
- Jump isActivation = branchPtr(Equal, regT2, TrustedImmPtr(globalObject->activationStructure()));
- addSlowCase(branchPtr(NotEqual, regT2, TrustedImmPtr(globalObject->nameScopeStructure())));
- isActivation.link(this);
- move(regT1, regT3);
- loadPtr(Address(regT3, JSScope::offsetOfNext()), regT1);
- branchTestPtr(NonZero, regT1, loopStart);
- atTopOfScope.link(this);
- ++pc;
- break;
- }
- case ResolveOperation::SkipScopes: {
- for (int i = 0; i < pc->m_scopesToSkip; i++)
- loadPtr(Address(scope, JSScope::offsetOfNext()), scope);
- ++pc;
- break;
- }
- case ResolveOperation::Fail:
- addSlowCase(jump());
- return;
- default:
- resolvingBase = false;
- }
- }
- if (baseVR && !setBase)
- emitStoreCell(*baseVR, scope);
-
- RELEASE_ASSERT(valueVR);
- ResolveOperation* resolveValueOperation = pc;
- switch (resolveValueOperation->m_operation) {
- case ResolveOperation::GetAndReturnGlobalProperty: {
- // Verify structure.
- move(TrustedImmPtr(globalObject), regT2);
- move(TrustedImmPtr(resolveValueOperation), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_structure)), regT1);
- addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset())));
-
- // Load property.
- load32(Address(regT3, OBJECT_OFFSETOF(ResolveOperation, m_offset)), regT3);
-
- // regT2: GlobalObject
- // regT3: offset
-#if USE(JSVALUE32_64)
- compileGetDirectOffset(regT2, valueTag, value, regT3, KnownNotFinal);
-#else
- compileGetDirectOffset(regT2, value, regT3, regT1, KnownNotFinal);
-#endif
- break;
- }
- case ResolveOperation::GetAndReturnGlobalVarWatchable:
- case ResolveOperation::GetAndReturnGlobalVar: {
-#if USE(JSVALUE32_64)
- load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag), valueTag);
- load32(reinterpret_cast<char*>(pc->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload), value);
-#else
- load64(reinterpret_cast<char*>(pc->m_registerAddress), value);
-#endif
- break;
- }
- case ResolveOperation::GetAndReturnScopedVar: {
- loadPtr(Address(scope, JSVariableObject::offsetOfRegisters()), scope);
-#if USE(JSVALUE32_64)
- load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), valueTag);
- load32(Address(scope, pc->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), value);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined())));
+ store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst));
#else
- load64(Address(scope, pc->m_offset * sizeof(Register)), value);
+ emitLoadPayload(currentInstruction[2].u.operand, regT0);
+ notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag));
+ emitStore(dst, jsUndefined());
#endif
- break;
- }
- default:
- CRASH();
- return;
+ Jump done = jump();
+ notUndefinedScope.link(this);
+
+ FunctionExecutable* function = m_codeBlock->functionExpr(currentInstruction[3].u.operand);
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
+
+ if (opcodeID == op_new_func_exp || opcodeID == op_new_arrow_func_exp)
+ callOperation(operationNewFunction, dst, regT0, function);
+ else {
+ ASSERT(opcodeID == op_new_generator_func_exp);
+ callOperation(operationNewGeneratorFunction, dst, regT0, function);
}
-#if USE(JSVALUE32_64)
- emitStore(*valueVR, valueTag, value);
-#else
- emitPutVirtualRegister(*valueVR, value);
-#endif
- emitValueProfilingSite();
+ done.link(this);
}
-void JIT::emitSlow_link_resolve_operations(ResolveOperations* resolveOperations, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
- if (resolveOperations->isEmpty()) {
- linkSlowCase(iter);
- return;
- }
+ emitNewFuncExprCommon(currentInstruction);
+}
- ResolveOperation* pc = resolveOperations->data();
- bool resolvingBase = true;
- while (resolvingBase) {
- switch (pc->m_operation) {
- case ResolveOperation::ReturnGlobalObjectAsBase:
- return;
- case ResolveOperation::SetBaseToGlobal:
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::SetBaseToUndefined: {
- resolvingBase = false;
- ++pc;
- break;
- }
- case ResolveOperation::SetBaseToScope:
- resolvingBase = false;
- ++pc;
- break;
- case ResolveOperation::ReturnScopeAsBase:
- return;
- case ResolveOperation::SkipTopScopeNode: {
- ++pc;
- break;
- }
- case ResolveOperation::SkipScopes:
- ++pc;
- break;
- case ResolveOperation::Fail:
- linkSlowCase(iter);
- return;
- case ResolveOperation::CheckForDynamicEntriesBeforeGlobalScope: {
- linkSlowCase(iter);
- ++pc;
- break;
- }
- default:
- resolvingBase = false;
- }
- }
- ResolveOperation* resolveValueOperation = pc;
- switch (resolveValueOperation->m_operation) {
- case ResolveOperation::GetAndReturnGlobalProperty: {
- linkSlowCase(iter);
- break;
- }
- case ResolveOperation::GetAndReturnGlobalVarWatchable:
- case ResolveOperation::GetAndReturnGlobalVar:
- break;
- case ResolveOperation::GetAndReturnScopedVar:
- break;
- default:
- CRASH();
- return;
- }
+void JIT::emit_op_new_generator_func_exp(Instruction* currentInstruction)
+{
+ emitNewFuncExprCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_arrow_func_exp(Instruction* currentInstruction)
+{
+ emitNewFuncExprCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_array(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int valuesIndex = currentInstruction[2].u.operand;
+ int size = currentInstruction[3].u.operand;
+ addPtr(TrustedImm32(valuesIndex * sizeof(Register)), callFrameRegister, regT0);
+ callOperation(operationNewArrayWithProfile, dst,
+ currentInstruction[4].u.arrayAllocationProfile, regT0, size);
}
-void JIT::emit_op_resolve(Instruction* currentInstruction)
+void JIT::emit_op_new_array_with_size(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
int dst = currentInstruction[1].u.operand;
- emit_resolve_operations(operations, 0, &dst);
+ int sizeIndex = currentInstruction[2].u.operand;
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(sizeIndex, regT0);
+ callOperation(operationNewArrayWithSizeAndProfile, dst,
+ currentInstruction[3].u.arrayAllocationProfile, regT0);
+#else
+ emitLoad(sizeIndex, regT1, regT0);
+ callOperation(operationNewArrayWithSizeAndProfile, dst,
+ currentInstruction[3].u.arrayAllocationProfile, regT1, regT0);
+#endif
}
-void JIT::emitSlow_op_resolve(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[3].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, cti_op_resolve);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.resolveOperations));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ int valuesIndex = currentInstruction[2].u.operand;
+ int size = currentInstruction[3].u.operand;
+ const JSValue* values = codeBlock()->constantBuffer(valuesIndex);
+ callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size);
}
-void JIT::emit_op_resolve_base(Instruction* currentInstruction)
+#if USE(JSVALUE64)
+void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
int dst = currentInstruction[1].u.operand;
- emit_resolve_operations(operations, &dst, 0);
+ int base = currentInstruction[2].u.operand;
+ int enumerator = currentInstruction[4].u.operand;
+
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+ emitPutVirtualRegister(dst);
}
-void JIT::emitSlow_op_resolve_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, currentInstruction[3].u.operand ? cti_op_resolve_base_strict_put : cti_op_resolve_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation));
- stubCall.callWithValueProfiling(currentInstruction[1].u.operand);
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+ Jump done = jump();
+
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+
+ patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+
+ patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
}
-void JIT::emit_op_resolve_with_base(Instruction* currentInstruction)
+void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
- emit_resolve_operations(operations, &base, &value);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT2);
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
+
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+
+ Label done = label();
+
+ emitPutVirtualRegister(dst);
+
+ Label nextHotPath = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
}
-void JIT::emitSlow_op_resolve_with_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, cti_op_resolve_with_base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[5].u.putToBaseOperation));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
+
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // read barrier
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ Label slowPath = label();
+
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(property, regT1);
+ Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
}
-void JIT::emit_op_resolve_with_this(Instruction* currentInstruction)
+void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
- emit_resolve_operations(operations, &base, &value);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int index = currentInstruction[4].u.operand;
+ int enumerator = currentInstruction[5].u.operand;
+
+ // Check that base is a cell
+ emitGetVirtualRegister(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Check the structure
+ emitGetVirtualRegister(enumerator, regT2);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT1);
+ addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ // Compute the offset
+ emitGetVirtualRegister(index, regT1);
+ // If index is less than the enumerator's cached inline storage, then it's an inline access
+ Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
+ signExtend32ToPtr(regT1, regT1);
+ load64(BaseIndex(regT0, regT1, TimesEight), regT0);
+
+ Jump done = jump();
+
+ // Otherwise it's out of line
+ outOfLineAccess.link(this);
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ addSlowCase(branchIfNotToSpace(regT0));
+ sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1);
+ neg32(regT1);
+ signExtend32ToPtr(regT1, regT1);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0);
+
+ done.link(this);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst, regT0);
}
-void JIT::emitSlow_op_resolve_with_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveOperations* operations = currentInstruction[4].u.resolveOperations;
- emitSlow_link_resolve_operations(operations, iter);
- JITStubCall stubCall(this, cti_op_resolve_with_this);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.addArgument(TrustedImm32(currentInstruction[1].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.resolveOperations));
- stubCall.callWithValueProfiling(currentInstruction[2].u.operand);
+ int base = currentInstruction[2].u.operand;
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname);
+ slowPathCall.call();
}
-void JIT::emitSlow_op_put_to_base(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
{
- int base = currentInstruction[1].u.operand;
- int id = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
- PutToBaseOperation* putToBaseOperation = currentInstruction[4].u.putToBaseOperation;
- switch (putToBaseOperation->m_kind) {
- case PutToBaseOperation::VariablePut:
- return;
+ emitGetVirtualRegister(index, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
- case PutToBaseOperation::GlobalVariablePutChecked:
- linkSlowCase(iter);
- case PutToBaseOperation::GlobalVariablePut:
- if (!putToBaseOperation->m_isDynamic)
- return;
- linkSlowCase(iter);
- break;
+ move(TrustedImm64(JSValue::encode(jsNull())), regT0);
- case PutToBaseOperation::Uninitialised:
- case PutToBaseOperation::Readonly:
- case PutToBaseOperation::Generic:
- return;
+ Jump done = jump();
+ inBounds.link(this);
- case PutToBaseOperation::GlobalPropertyPut:
- linkSlowCase(iter);
- break;
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ signExtend32ToPtr(regT0, regT0);
+ load64(BaseIndex(regT1, regT0, TimesEight), regT0);
+ done.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(index, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
+
+ move(TrustedImm64(JSValue::encode(jsNull())), regT0);
+
+ Jump done = jump();
+ inBounds.link(this);
+
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ signExtend32ToPtr(regT0, regT0);
+ load64(BaseIndex(regT1, regT0, TimesEight), regT0);
+
+ done.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_profile_type(Instruction* currentInstruction)
+{
+ TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
+ int valueToProfile = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(valueToProfile, regT0);
+
+ JumpList jumpToEnd;
+
+ jumpToEnd.append(branchTest64(Zero, regT0));
+
+ // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
+ // These typechecks are inlined to match those of the 64-bit JSValue type checks.
+ if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
+ jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined()))));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNull)
+ jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))));
+ else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) {
+ move(regT0, regT1);
+ and64(TrustedImm32(~1), regT1);
+ jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse)));
+ } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt)
+ jumpToEnd.append(emitJumpIfInt(regT0));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
+ jumpToEnd.append(emitJumpIfNumber(regT0));
+ else if (cachedTypeLocation->m_lastSeenType == TypeString) {
+ Jump isNotCell = emitJumpIfNotJSCell(regT0);
+ jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+ isNotCell.link(this);
}
- JITStubCall stubCall(this, cti_op_put_to_base);
+ // Load the type profiling log into T2.
+ TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
+ move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
+ // Load the next log entry into T1.
+ loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
+
+ // Store the JSValue onto the log entry.
+ store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset()));
+
+ // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry.
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ Jump skipIsCell = jump();
+ notCell.link(this);
+ store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ skipIsCell.link(this);
+
+ // Store the typeLocation on the log entry.
+ move(TrustedImmPtr(cachedTypeLocation), regT0);
+ store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
+
+ // Increment the current log entry.
+ addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
+ store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
+ Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
+ // Clear the log if we're at the end of the log.
+ callOperation(operationProcessTypeProfilerLog);
+ skipClearLog.link(this);
- stubCall.addArgument(TrustedImm32(base));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
- stubCall.addArgument(TrustedImm32(value));
- stubCall.addArgument(TrustedImmPtr(putToBaseOperation));
- stubCall.call();
+ jumpToEnd.link(this);
}
-void JIT::emit_op_new_regexp(Instruction* currentInstruction)
+#endif // USE(JSVALUE64)
+
+void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_regexp);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->regexp(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length);
+ slowPathCall.call();
}
-void JIT::emit_op_new_func(Instruction* currentInstruction)
+void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- Jump lazyJump;
- int dst = currentInstruction[1].u.operand;
- if (currentInstruction[3].u.operand) {
-#if USE(JSVALUE32_64)
- lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
-#else
- lazyJump = branchTest64(NonZero, addressFor(dst));
-#endif
- }
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_has_generic_property(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property);
+ slowPathCall.call();
+}
- JITStubCall stubCall(this, cti_op_new_func);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand)));
- stubCall.call(dst);
+void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_to_index_string(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string);
+ slowPathCall.call();
+}
- if (currentInstruction[3].u.operand) {
-#if USE(JSVALUE32_64)
- unmap();
+void JIT::emit_op_profile_control_flow(Instruction* currentInstruction)
+{
+ BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
+#if USE(JSVALUE64)
+ basicBlockLocation->emitExecuteCode(*this);
#else
- killLastResultRegister();
+ basicBlockLocation->emitExecuteCode(*this, regT0);
#endif
- lazyJump.link(this);
- }
}
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_func_exp);
- stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments);
+ slowPathCall.call();
}
-void JIT::emit_op_new_array(Instruction* currentInstruction)
+void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments);
+ slowPathCall.call();
}
-void JIT::emit_op_new_array_with_size(Instruction* currentInstruction)
+void JIT::emit_op_create_out_of_band_arguments(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array_with_size);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_out_of_band_arguments);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_copy_rest(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_copy_rest);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_rest_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue;
+ load32(payloadFor(JSStack::ArgumentCount), regT0);
+ sub32(TrustedImm32(1), regT0);
+ Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip));
+ sub32(Imm32(numParamsToSkip), regT0);
#if USE(JSVALUE64)
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
+ boxInt32(regT0, JSValueRegs(regT0));
+#endif
+ Jump done = jump();
+
+ zeroLength.link(this);
+#if USE(JSVALUE64)
+ move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0);
+#else
+ move(TrustedImm32(0), regT0);
+#endif
+
+ done.link(this);
+#if USE(JSVALUE64)
+ emitPutVirtualRegister(dst, regT0);
#else
- stubCall.addArgument(currentInstruction[2].u.operand);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0));
#endif
- stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.arrayAllocationProfile));
- stubCall.call(currentInstruction[1].u.operand);
}
-void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
+void JIT::emit_op_save(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_save);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_resume(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_new_array_buffer);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.addArgument(TrustedImmPtr(currentInstruction[4].u.arrayAllocationProfile));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resume);
+ slowPathCall.call();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 4836a66b5..c326ff3e0 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -30,14 +30,20 @@
#if USE(JSVALUE32_64)
#include "JIT.h"
+#include "CCallHelpers.h"
+#include "Debugger.h"
+#include "Exception.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JSArray.h"
#include "JSCell.h"
+#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "JSVariableObject.h"
+#include "JSPropertyNameEnumerator.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "SlowPathCall.h"
+#include "TypeProfilerLog.h"
+#include "VirtualRegister.h"
namespace JSC {
@@ -45,139 +51,80 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
{
Call nativeCall;
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_vm->topCallFrame);
#if CPU(X86)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- peek(regT1);
- emitPutToCallFrameHeader(regT1, JSStack::ReturnPC);
-
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::ecx);
- subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
-
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ subPtr(TrustedImm32(8), stackPointerRegister); // Align stack for call.
+ storePtr(X86Registers::ecx, Address(stackPointerRegister));
// call the function
nativeCall = call();
- addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
-
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
+ addPtr(TrustedImm32(8), stackPointerRegister);
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, ARMRegisters::r0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(ARMRegisters::r1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- // call the function
- nativeCall = call();
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
+#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+#if CPU(MIPS)
+ // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
subPtr(TrustedImm32(16), stackPointerRegister);
+#endif
- // Setup arg0
- move(callFrameRegister, MIPSRegisters::a0);
+ // Calling convention is f(argumentGPR0, argumentGPR1, ...).
+ // Host function signature is f(ExecState*).
+ move(callFrameRegister, argumentGPR0);
+
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, argumentGPR1);
+ loadPtr(Address(argumentGPR1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- // Call
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
- loadPtr(Address(MIPSRegisters::a2, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
-
// call the function
nativeCall = call();
+#if CPU(MIPS)
// Restore stack space
addPtr(TrustedImm32(16), stackPointerRegister);
-
- restoreReturnAddressBeforeReturn(regT3);
-#elif CPU(SH4)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutToCallFrameHeader(regT3, JSStack::ReturnPC);
-
- // Calling convention: f(r0 == regT4, r1 == regT5, ...);
- // Host function signature: f(ExecState*);
- move(callFrameRegister, regT4);
-
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT5);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- loadPtr(Address(regT5, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
-
- // call the function
- nativeCall = call();
+#endif
restoreReturnAddressBeforeReturn(regT3);
#else
#error "JIT not supported on this platform."
- breakpoint();
+ abortWithReason(JITNotSupported);
#endif // CPU(X86)
// Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(vm->addressOfException()), TrustedImm32(0));
+ emitFunctionEpilogue();
// Return.
ret();
// Handle an exception
sawException.link(this);
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(TrustedImmPtr(&vm->exceptionLocation), regT2);
- storePtr(regT1, regT2);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
storePtr(callFrameRegister, &m_vm->topCallFrame);
- // Set the return address.
- move(TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), regT1);
- restoreReturnAddressBeforeReturn(regT1);
- ret();
+#if CPU(X86)
+ addPtr(TrustedImm32(-4), stackPointerRegister);
+ move(callFrameRegister, X86Registers::ecx);
+ push(X86Registers::ecx);
+#else
+ move(callFrameRegister, argumentGPR0);
+#endif
+ move(TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), regT3);
+ call(regT3);
+
+#if CPU(X86)
+ addPtr(TrustedImm32(8), stackPointerRegister);
+#endif
+
+ jumpToExceptionHandler();
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
patchBuffer.link(nativeCall, FunctionPtr(func));
return FINALIZE_CODE(patchBuffer, ("JIT CTI native call"));
@@ -185,23 +132,23 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
void JIT::emit_op_mov(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
if (m_codeBlock->isConstantRegisterIndex(src))
emitStore(dst, getConstantOperand(src));
else {
emitLoad(src, regT1, regT0);
emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_mov), dst, regT1, regT0);
}
}
void JIT::emit_op_end(Instruction* currentInstruction)
{
- ASSERT(returnValueRegister != callFrameRegister);
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, JSStack::ReturnPC * static_cast<int>(sizeof(Register))));
+ ASSERT(returnValueGPR != callFrameRegister);
+ emitLoad(currentInstruction[1].u.operand, regT1, returnValueGPR);
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -214,12 +161,12 @@ void JIT::emit_op_jmp(Instruction* currentInstruction)
void JIT::emit_op_new_object(Instruction* currentInstruction)
{
Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
- size_t allocationSize = JSObject::allocationSize(structure->inlineCapacity());
+ size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
- RegisterID resultReg = regT0;
+ RegisterID resultReg = returnValueGPR;
RegisterID allocatorReg = regT1;
- RegisterID scratchReg = regT2;
+ RegisterID scratchReg = regT3;
move(TrustedImmPtr(allocator), allocatorReg);
emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg);
@@ -229,43 +176,56 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_new_object);
- stubCall.addArgument(TrustedImmPtr(currentInstruction[3].u.objectAllocationProfile->structure()));
- stubCall.call(currentInstruction[1].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
+ callOperation(operationNewObject, structure);
+ emitStoreCell(dst, returnValueGPR);
}
-void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
{
- unsigned baseVal = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int constructor = currentInstruction[2].u.operand;
+ int hasInstanceValue = currentInstruction[3].u.operand;
- emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(hasInstanceValue, regT0);
+ // We don't jump if we know what Symbol.hasInstance would do.
+ Jump hasInstanceValueNotCell = emitJumpIfNotJSCell(hasInstanceValue);
+ Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
+
+ // We know that constructor is an object from the way bytecode is emitted for instanceof expressions.
+ emitLoadPayload(constructor, regT0);
+
+ // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
+ test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
+ Jump done = jump();
+
+ hasInstanceValueNotCell.link(this);
+ customhasInstanceValue.link(this);
+ move(TrustedImm32(1), regT0);
+
+ done.link(this);
+ emitStoreBool(dst, regT0);
- // Check that baseVal is a cell.
- emitJumpSlowCaseIfNotJSCell(baseVal);
-
- // Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned proto = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
// Load the operands into registers.
// We use regT0 for baseVal since we will be done with this first, and we can then use it for the result.
emitLoadPayload(value, regT2);
emitLoadPayload(proto, regT1);
- // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
emitJumpSlowCaseIfNotJSCell(value);
emitJumpSlowCaseIfNotJSCell(proto);
// Check that prototype is an object
- loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
- addSlowCase(emitJumpIfNotObject(regT3));
+ addSlowCase(emitJumpIfCellNotObject(regT1));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -275,7 +235,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ loadPtr(Address(regT2, JSCell::structureIDOffset()), regT2);
load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
branchTest32(NonZero, regT2).linkTo(loop, this);
@@ -288,43 +248,47 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitStoreBool(dst, regT0);
}
-void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_instanceof_custom(Instruction*)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned baseVal = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_check_has_instance);
- stubCall.addArgument(value);
- stubCall.addArgument(baseVal);
- stubCall.call(dst);
-
- emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
+ // This always goes to slow path since we expect it to be rare.
+ addSlowCase(jump());
}
void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
- unsigned proto = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int proto = currentInstruction[3].u.operand;
linkSlowCaseIfNotJSCell(iter, value);
linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_instanceof);
- stubCall.addArgument(value);
- stubCall.addArgument(proto);
- stubCall.call(dst);
+ emitLoad(value, regT1, regT0);
+ emitLoad(proto, regT3, regT2);
+ callOperation(operationInstanceOf, dst, regT1, regT0, regT3, regT2);
+}
+
+void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int constructor = currentInstruction[3].u.operand;
+ int hasInstanceValue = currentInstruction[4].u.operand;
+
+ linkSlowCase(iter);
+
+ emitLoad(value, regT1, regT0);
+ emitLoadPayload(constructor, regT2);
+ emitLoad(hasInstanceValue, regT4, regT3);
+ callOperation(operationInstanceOfCustom, regT1, regT0, regT2, regT4, regT3);
+ emitStoreBool(dst, returnValueGPR);
}
void JIT::emit_op_is_undefined(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitLoad(value, regT1, regT0);
Jump isCell = branch32(Equal, regT1, TrustedImm32(JSValue::CellTag));
@@ -333,12 +297,12 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
Jump done = jump();
isCell.link(this);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump notMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
compare32(Equal, regT0, regT1, regT0);
@@ -350,8 +314,8 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
void JIT::emit_op_is_boolean(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitLoadTag(value, regT0);
compare32(Equal, regT0, TrustedImm32(JSValue::BooleanTag), regT0);
@@ -360,8 +324,8 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction)
void JIT::emit_op_is_number(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitLoadTag(value, regT0);
add32(TrustedImm32(1), regT0);
@@ -371,14 +335,13 @@ void JIT::emit_op_is_number(Instruction* currentInstruction)
void JIT::emit_op_is_string(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned value = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
emitLoad(value, regT1, regT0);
Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
+ compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
Jump done = jump();
isNotCell.link(this);
@@ -388,27 +351,22 @@ void JIT::emit_op_is_string(Instruction* currentInstruction)
emitStoreBool(dst, regT0);
}
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+void JIT::emit_op_is_object(Instruction* currentInstruction)
{
- unsigned activation = currentInstruction[1].u.operand;
- Jump activationNotCreated = branch32(Equal, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
- JITStubCall stubCall(this, cti_op_tear_off_activation);
- stubCall.addArgument(activation);
- stubCall.call();
- activationNotCreated.link(this);
-}
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
-void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
-{
- int arguments = currentInstruction[1].u.operand;
- int activation = currentInstruction[2].u.operand;
+ emitLoad(value, regT1, regT0);
+ Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
+ Jump done = jump();
- Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(arguments)), TrustedImm32(JSValue::EmptyValueTag));
- JITStubCall stubCall(this, cti_op_tear_off_arguments);
- stubCall.addArgument(unmodifiedArgumentsRegister(arguments));
- stubCall.addArgument(activation);
- stubCall.call();
- argsNotCreated.link(this);
+ isNotCell.link(this);
+ move(TrustedImm32(0), regT0);
+
+ done.link(this);
+ emitStoreBool(dst, regT0);
}
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
@@ -419,37 +377,31 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ addSlowCase(emitJumpIfCellObject(regT0));
isImm.link(this);
if (dst != src)
emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_to_primitive), dst, regT1, regT0);
}
void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
-
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_to_primitive);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_primitive);
+ slowPathCall.call();
}
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_strcat);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat);
+ slowPathCall.call();
}
void JIT::emit_op_not(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
emitLoadTag(src, regT0);
@@ -462,19 +414,15 @@ void JIT::emit_op_not(Instruction* currentInstruction)
void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
-
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_not);
- stubCall.addArgument(src);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_not);
+ slowPathCall.call();
}
void JIT::emit_op_jfalse(Instruction* currentInstruction)
{
- unsigned cond = currentInstruction[1].u.operand;
+ int cond = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitLoad(cond, regT1, regT0);
@@ -486,7 +434,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned cond = currentInstruction[1].u.operand;
+ int cond = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
linkSlowCase(iter);
@@ -502,15 +450,13 @@ void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEnt
notNumber.link(this);
}
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(Zero, regT0), target); // Inverted.
+ callOperation(operationConvertJSValueToBoolean, regT1, regT0);
+ emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); // Inverted.
}
void JIT::emit_op_jtrue(Instruction* currentInstruction)
{
- unsigned cond = currentInstruction[1].u.operand;
+ int cond = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitLoad(cond, regT1, regT0);
@@ -522,7 +468,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned cond = currentInstruction[1].u.operand;
+ int cond = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
linkSlowCase(iter);
@@ -538,24 +484,21 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntr
notNumber.link(this);
}
- JITStubCall stubCall(this, cti_op_jtrue);
- stubCall.addArgument(cond);
- stubCall.call();
- emitJumpSlowToHot(branchTest32(NonZero, regT0), target);
+ callOperation(operationConvertJSValueToBoolean, regT1, regT0);
+ emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
}
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump masqueradesGlobalObjectIsForeign = jump();
@@ -572,16 +515,15 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_null(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
unsigned target = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
@@ -598,7 +540,7 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
{
- unsigned src = currentInstruction[1].u.operand;
+ int src = currentInstruction[1].u.operand;
Special::Pointer ptr = currentInstruction[2].u.specialPointer;
unsigned target = currentInstruction[3].u.operand;
@@ -609,9 +551,9 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
void JIT::emit_op_eq(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
+ int src2 = currentInstruction[3].u.operand;
emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
addSlowCase(branch32(NotEqual, regT1, regT3));
@@ -625,9 +567,9 @@ void JIT::emit_op_eq(Instruction* currentInstruction)
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned op1 = currentInstruction[2].u.operand;
- unsigned op2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
JumpList storeResult;
JumpList genericCase;
@@ -635,33 +577,29 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>:
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call();
+ callOperation(operationCompareStringEq, regT0, regT2);
storeResult.append(jump());
// Generic case.
genericCase.append(getSlowCase(iter)); // doubles
genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(op1);
- stubCallEq.addArgument(op2);
- stubCallEq.call(regT0);
+ emitLoad(op1, regT1, regT0);
+ emitLoad(op2, regT3, regT2);
+ callOperation(operationCompareEq, regT1, regT0, regT3, regT2);
storeResult.link(this);
- emitStoreBool(dst, regT0);
+ emitStoreBool(dst, returnValueGPR);
}
void JIT::emit_op_neq(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
+ int src2 = currentInstruction[3].u.operand;
emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
addSlowCase(branch32(NotEqual, regT1, regT3));
@@ -675,7 +613,7 @@ void JIT::emit_op_neq(Instruction* currentInstruction)
void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
+ int dst = currentInstruction[1].u.operand;
JumpList storeResult;
JumpList genericCase;
@@ -683,34 +621,28 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
- JITStubCall stubCallEqStrings(this, cti_op_eq_strings);
- stubCallEqStrings.addArgument(regT0);
- stubCallEqStrings.addArgument(regT2);
- stubCallEqStrings.call(regT0);
+ callOperation(operationCompareStringEq, regT0, regT2);
storeResult.append(jump());
// Generic case.
genericCase.append(getSlowCase(iter)); // doubles
genericCase.link(this);
- JITStubCall stubCallEq(this, cti_op_eq);
- stubCallEq.addArgument(regT1, regT0);
- stubCallEq.addArgument(regT3, regT2);
- stubCallEq.call(regT0);
+ callOperation(operationCompareEq, regT1, regT0, regT3, regT2);
storeResult.link(this);
- xor32(TrustedImm32(0x1), regT0);
- emitStoreBool(dst, regT0);
+ xor32(TrustedImm32(0x1), returnValueGPR);
+ emitStoreBool(dst, returnValueGPR);
}
void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src1 = currentInstruction[2].u.operand;
+ int src2 = currentInstruction[3].u.operand;
emitLoad2(src1, regT1, regT0, src2, regT3, regT2);
@@ -718,12 +650,12 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
addSlowCase(branch32(NotEqual, regT1, regT3));
addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
- // Jump to a slow case if both are strings.
+ // Jump to a slow case if both are strings or symbols (non object).
Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
- addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ Jump firstIsObject = emitJumpIfCellObject(regT0);
+ addSlowCase(emitJumpIfCellNotObject(regT2));
notCell.link(this);
- firstNotString.link(this);
+ firstIsObject.link(this);
// Simply compare the payloads.
if (type == OpStrictEq)
@@ -741,18 +673,12 @@ void JIT::emit_op_stricteq(Instruction* currentInstruction)
void JIT::emitSlow_op_stricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_stricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_stricteq);
+ slowPathCall.call();
}
void JIT::emit_op_nstricteq(Instruction* currentInstruction)
@@ -762,34 +688,28 @@ void JIT::emit_op_nstricteq(Instruction* currentInstruction)
void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src1 = currentInstruction[2].u.operand;
- unsigned src2 = currentInstruction[3].u.operand;
-
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_nstricteq);
- stubCall.addArgument(src1);
- stubCall.addArgument(src2);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_nstricteq);
+ slowPathCall.call();
}
void JIT::emit_op_eq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
compare32(Equal, regT0, regT2, regT1);
@@ -809,18 +729,18 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
void JIT::emit_op_neq_null(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned src = currentInstruction[2].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
compare32(NotEqual, regT0, regT2, regT1);
@@ -840,229 +760,154 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
void JIT::emit_op_throw(Instruction* currentInstruction)
{
- unsigned exception = currentInstruction[1].u.operand;
- JITStubCall stubCall(this, cti_op_throw);
- stubCall.addArgument(exception);
- stubCall.call();
-
-#ifndef NDEBUG
- // cti_op_throw always changes it's return address,
- // this point in the code should never be reached.
- breakpoint();
-#endif
+ ASSERT(regT0 == returnValueGPR);
+ copyCalleeSavesToVMCalleeSavesBuffer();
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ callOperationNoExceptionCheck(operationThrow, regT1, regT0);
+ jumpToExceptionHandler();
}
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitLoad(base, regT1, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- if (base != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- isNotObject.append(emitJumpIfNotObject(regT2));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- JITStubCall getPnamesStubCall(this, cti_op_get_pnames);
- getPnamesStubCall.addArgument(regT0);
- getPnamesStubCall.call(dst);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(TrustedImm32(Int32Tag), intTagFor(i));
- store32(TrustedImm32(0), intPayloadFor(i));
- store32(TrustedImm32(Int32Tag), intTagFor(size));
- store32(regT3, payloadFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget);
- addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget);
- JITStubCall toObjectStubCall(this, cti_to_object);
- toObjectStubCall.addArgument(regT1, regT0);
- toObjectStubCall.call(base);
- jump().linkTo(isObject, this);
-
- end.link(this);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope);
+ slowPathCall.call();
}
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
+void JIT::emit_op_to_number(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(intPayloadFor(i), regT0);
- Jump end = branch32(Equal, regT0, intPayloadFor(size));
-
- // Grab key @ i
- loadPtr(payloadFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- load32(BaseIndex(regT2, regT0, TimesEight), regT2);
- store32(TrustedImm32(JSValue::CellTag), tagFor(dst));
- store32(regT2, payloadFor(dst));
-
- // Increment i
- add32(TrustedImm32(1), regT0);
- store32(regT0, intPayloadFor(i));
-
- // Verify that i is valid:
- loadPtr(payloadFor(base), regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag)));
- loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(TrustedImm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
+ int src = currentInstruction[2].u.operand;
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- loadPtr(addressFor(dst), regT1);
- JITStubCall stubCall(this, cti_has_property);
- stubCall.addArgument(regT0);
- stubCall.addArgument(regT1);
- stubCall.call();
+ emitLoad(src, regT1, regT0);
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
+ Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
+ isInt32.link(this);
- // End of loop.
- end.link(this);
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
}
-void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
+void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- JITStubCall stubCall(this, cti_op_push_with_scope);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
-}
+ linkSlowCase(iter);
-void JIT::emit_op_pop_scope(Instruction*)
-{
- JITStubCall(this, cti_op_pop_scope).call();
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number);
+ slowPathCall.call();
}
-void JIT::emit_op_to_number(Instruction* currentInstruction)
+void JIT::emit_op_to_string(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
- Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
- isInt32.link(this);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
if (src != dst)
emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_to_number), dst, regT1, regT0);
}
-void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
-
- linkSlowCase(iter);
+ linkSlowCase(iter); // Not JSCell.
+ linkSlowCase(iter); // Not JSString.
- JITStubCall stubCall(this, cti_op_to_number);
- stubCall.addArgument(regT1, regT0);
- stubCall.call(dst);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string);
+ slowPathCall.call();
}
-void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
+void JIT::emit_op_catch(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_push_name_scope);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[1].u.operand)));
- stubCall.addArgument(currentInstruction[2].u.operand);
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call();
+ restoreCalleeSavesFromVMCalleeSavesBuffer();
+
+ move(TrustedImmPtr(m_vm), regT3);
+ // operationThrow returns the callFrame for the handler.
+ load32(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
+ storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+
+ callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
+ Jump isCatchableException = branchTest32(Zero, returnValueGPR);
+ jumpToExceptionHandler();
+ isCatchableException.link(this);
+
+ move(TrustedImmPtr(m_vm), regT3);
+
+ // Now store the exception returned by operationThrow.
+ load32(Address(regT3, VM::exceptionOffset()), regT2);
+ move(TrustedImm32(JSValue::CellTag), regT1);
+
+ store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset()));
+
+ unsigned exception = currentInstruction[1].u.operand;
+ emitStore(exception, regT1, regT2);
+
+ load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+
+ unsigned thrownValue = currentInstruction[2].u.operand;
+ emitStore(thrownValue, regT1, regT0);
}
-void JIT::emit_op_catch(Instruction* currentInstruction)
+void JIT::emit_op_assert(Instruction* currentInstruction)
{
- // cti_op_throw returns the callFrame for the handler.
- move(regT0, callFrameRegister);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert);
+ slowPathCall.call();
+}
- // Now store the exception returned by cti_op_throw.
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(struct JITStackFrame, vm)), regT3);
- load32(Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- load32(Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- store32(TrustedImm32(JSValue().payload()), Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(TrustedImm32(JSValue().tag()), Address(regT3, OBJECT_OFFSETOF(VM, exception) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment);
+ slowPathCall.call();
+}
- unsigned exception = currentInstruction[1].u.operand;
- emitStore(exception, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_catch), exception, regT1, regT0);
+void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
+{
+ int currentScope = currentInstruction[2].u.operand;
+ emitLoadPayload(currentScope, regT0);
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->immediateSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_imm);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitLoad(scrutinee, regT1, regT0);
+ callOperation(operationSwitchImmWithUnknownKeyType, regT1, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_char(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
// create jump table for switch destinations, track this switch statement.
- SimpleJumpTable* jumpTable = &m_codeBlock->characterSwitchJumpTable(tableIndex);
+ SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
- JITStubCall stubCall(this, cti_op_switch_char);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitLoad(scrutinee, regT1, regT0);
+ callOperation(operationSwitchCharWithUnknownKeyType, regT1, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_switch_string(Instruction* currentInstruction)
{
- unsigned tableIndex = currentInstruction[1].u.operand;
+ size_t tableIndex = currentInstruction[1].u.operand;
unsigned defaultOffset = currentInstruction[2].u.operand;
unsigned scrutinee = currentInstruction[3].u.operand;
@@ -1070,40 +915,27 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
StringJumpTable* jumpTable = &m_codeBlock->stringSwitchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset));
- JITStubCall stubCall(this, cti_op_switch_string);
- stubCall.addArgument(scrutinee);
- stubCall.addArgument(TrustedImm32(tableIndex));
- stubCall.call();
- jump(regT0);
+ emitLoad(scrutinee, regT1, regT0);
+ callOperation(operationSwitchStringWithUnknownKeyType, regT1, regT0, tableIndex);
+ jump(returnValueGPR);
}
void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
{
- unsigned message = currentInstruction[1].u.operand;
-
- JITStubCall stubCall(this, cti_op_throw_static_error);
- stubCall.addArgument(m_codeBlock->getConstant(message));
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.call();
+ emitLoad(m_codeBlock->getConstant(currentInstruction[1].u.operand), regT1, regT0);
+ callOperation(operationThrowStaticError, regT1, regT0, currentInstruction[2].u.operand);
}
void JIT::emit_op_debug(Instruction* currentInstruction)
{
-#if ENABLE(DEBUG_WITH_BREAKPOINT)
- UNUSED_PARAM(currentInstruction);
- breakpoint();
-#else
- JITStubCall stubCall(this, cti_op_debug);
- stubCall.addArgument(Imm32(currentInstruction[1].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[2].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[3].u.operand));
- stubCall.addArgument(Imm32(currentInstruction[4].u.operand));
- stubCall.call();
-#endif
+ load32(codeBlock()->debuggerRequestsAddress(), regT0);
+ Jump noDebuggerRequests = branchTest32(Zero, regT0);
+ callOperation(operationDebug, currentInstruction[1].u.operand);
+ noDebuggerRequests.link(this);
}
-void JIT::emit_op_enter(Instruction*)
+void JIT::emit_op_enter(Instruction* currentInstruction)
{
emitEnterOptimizationCheck();
@@ -1111,261 +943,389 @@ void JIT::emit_op_enter(Instruction*)
// registers to zap stale pointers, to avoid unnecessarily prolonging
// object lifetime and increasing GC pressure.
for (int i = 0; i < m_codeBlock->m_numVars; ++i)
- emitStore(i, jsUndefined());
-}
+ emitStore(virtualRegisterForLocal(i).offset(), jsUndefined());
-void JIT::emit_op_create_activation(Instruction* currentInstruction)
-{
- unsigned activation = currentInstruction[1].u.operand;
-
- Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
- JITStubCall(this, cti_op_push_activation).call(activation);
- activationCreated.link(this);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter);
+ slowPathCall.call();
}
-void JIT::emit_op_create_arguments(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
-
- JITStubCall(this, cti_op_create_arguments).call();
- emitStore(dst, regT1, regT0);
- emitStore(unmodifiedArgumentsRegister(dst), regT1, regT0);
-
- argsCreated.link(this);
-}
-
-void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
-
- emitStore(dst, JSValue());
-}
-
-void JIT::emit_op_get_callee(Instruction* currentInstruction)
+void JIT::emit_op_get_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
- move(TrustedImm32(JSValue::CellTag), regT1);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
+ emitStoreCell(dst, regT0);
}
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
RegisterID calleeReg = regT0;
+ RegisterID rareDataReg = regT4;
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID structureReg = regT2;
+ RegisterID cachedFunctionReg = regT4;
RegisterID scratchReg = regT3;
emitLoadPayload(callee, calleeReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
+ addSlowCase(branchTestPtr(Zero, rareDataReg));
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
addSlowCase(branchTestPtr(Zero, allocatorReg));
+ loadPtr(cachedFunction, cachedFunctionReg);
+ Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
+ addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
+ hasSeenMultipleCallees.link(this);
+
emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
emitStoreCell(currentInstruction[1].u.operand, resultReg);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkSlowCase(iter); // doesn't have rare data
linkSlowCase(iter); // doesn't have an allocation profile
linkSlowCase(iter); // allocation failed
+ linkSlowCase(iter); // cached function didn't match
- JITStubCall stubCall(this, cti_op_create_this);
- stubCall.addArgument(TrustedImm32(currentInstruction[3].u.operand));
- stubCall.call(currentInstruction[1].u.operand);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
+ slowPathCall.call();
}
-void JIT::emit_op_convert_this(Instruction* currentInstruction)
+void JIT::emit_op_to_this(Instruction* currentInstruction)
{
- unsigned thisRegister = currentInstruction[1].u.operand;
+ WriteBarrierBase<Structure>* cachedStructure = &currentInstruction[2].u.structure;
+ int thisRegister = currentInstruction[1].u.operand;
emitLoad(thisRegister, regT3, regT2);
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
- if (shouldEmitProfiling()) {
- loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
- move(regT3, regT1);
- emitValueProfilingSite();
- }
- addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ addSlowCase(branch8(NotEqual, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0);
+ loadPtr(cachedStructure, regT2);
+ addSlowCase(branchPtr(NotEqual, regT0, regT2));
}
-void JIT::emitSlow_op_convert_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- void* globalThis = m_codeBlock->globalObject()->globalThis();
- unsigned thisRegister = currentInstruction[1].u.operand;
-
linkSlowCase(iter);
- if (shouldEmitProfiling()) {
- move(TrustedImm32(JSValue::UndefinedTag), regT1);
- move(TrustedImm32(0), regT0);
- }
- Jump isNotUndefined = branch32(NotEqual, regT3, TrustedImm32(JSValue::UndefinedTag));
- emitValueProfilingSite();
- move(TrustedImmPtr(globalThis), regT0);
- move(TrustedImm32(JSValue::CellTag), regT1);
- emitStore(thisRegister, regT1, regT0);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_convert_this));
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_check_tdz(Instruction* currentInstruction)
+{
+ emitLoadTag(currentInstruction[1].u.operand, regT0);
+ addSlowCase(branch32(Equal, regT0, TrustedImm32(JSValue::EmptyValueTag)));
+}
+void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
linkSlowCase(iter);
- if (shouldEmitProfiling()) {
- move(TrustedImm32(JSValue::CellTag), regT1);
- move(TrustedImmPtr(m_vm->stringStructure.get()), regT0);
- }
- isNotUndefined.link(this);
- emitValueProfilingSite();
- JITStubCall stubCall(this, cti_op_convert_this);
- stubCall.addArgument(regT3, regT2);
- stubCall.call(thisRegister);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error);
+ slowPathCall.call();
}
void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_profile_will_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
+ load32(m_vm->enabledProfilerAddress(), regT0);
+ Jump profilerDone = branchTestPtr(Zero, regT0);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ callOperation(operationProfileWillCall, regT1, regT0);
+ profilerDone.link(this);
}
void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_profile_did_call);
- stubCall.addArgument(currentInstruction[1].u.operand);
- stubCall.call();
+ load32(m_vm->enabledProfilerAddress(), regT0);
+ Jump profilerDone = branchTestPtr(Zero, regT0);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ callOperation(operationProfileDidCall, regT1, regT0);
+ profilerDone.link(this);
}
-void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
- load32(payloadFor(JSStack::ArgumentCount), regT0);
- sub32(TrustedImm32(1), regT0);
- emitStoreInt32(dst, regT0);
+ int base = currentInstruction[2].u.operand;
+ int enumerator = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(base);
+
+ emitLoadPayload(enumerator, regT1);
+
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ move(TrustedImm32(1), regT0);
+ emitStoreBool(dst, regT0);
}
-void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
+ move(TrustedImm32(1), regT0);
+ Jump done = jump();
+
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+
+ patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+
+ patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
+}
+
+void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
{
- linkSlowCase(iter);
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
+
+ emitLoadPayload(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(base);
+
+ emitLoadPayload(property, regT1);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT2);
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
+ move(TrustedImm32(1), regT0);
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
- JITStubCall stubCall(this, cti_op_get_by_id_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.call(dst);
+ Label done = label();
+
+ emitStoreBool(dst, regT0);
+
+ Label nextHotPath = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
}
-void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
+ int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
- emitLoad(property, regT1, regT2);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- add32(TrustedImm32(1), regT2);
- // regT2 now contains the integer index of the argument we want, including this
- load32(payloadFor(JSStack::ArgumentCount), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, regT3));
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
+
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ Label slowPath = label();
+
+ emitLoad(base, regT1, regT0);
+ emitLoad(property, regT3, regT2);
+ Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT1, regT0, regT3, regT2, byValInfo);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
+}
+
+void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int index = currentInstruction[4].u.operand;
+ int enumerator = currentInstruction[5].u.operand;
+
+ // Check that base is a cell
+ emitLoadPayload(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(base);
+
+ // Check the structure
+ emitLoadPayload(enumerator, regT1);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT2);
+ addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ // Compute the offset
+ emitLoadPayload(index, regT2);
+ // If index is less than the enumerator's cached inline storage, then it's an inline access
+ Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ Jump done = jump();
+
+ // Otherwise it's out of line
+ outOfLineAccess.link(this);
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ addSlowCase(branchIfNotToSpace(regT0));
+ sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2);
neg32(regT2);
- loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
- loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT1);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+
+ done.link(this);
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
}
-void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned arguments = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- Jump skipArgumentsCreation = jump();
-
+ int base = currentInstruction[2].u.operand;
+ linkSlowCaseIfNotJSCell(iter, base);
linkSlowCase(iter);
linkSlowCase(iter);
- JITStubCall(this, cti_op_create_arguments).call();
- emitStore(arguments, regT1, regT0);
- emitStore(unmodifiedArgumentsRegister(arguments), regT1, regT0);
-
- skipArgumentsCreation.link(this);
- JITStubCall stubCall(this, cti_op_get_by_val_generic);
- stubCall.addArgument(arguments);
- stubCall.addArgument(property);
- stubCall.callWithValueProfiling(dst);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname);
+ slowPathCall.call();
}
-void JIT::emit_op_put_to_base(Instruction* currentInstruction)
+void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
{
- int base = currentInstruction[1].u.operand;
- int id = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
- PutToBaseOperation* operation = currentInstruction[4].u.putToBaseOperation;
+ emitLoadPayload(index, regT0);
+ emitLoadPayload(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
+ move(TrustedImm32(JSValue::NullTag), regT2);
+ move(TrustedImm32(0), regT0);
- switch (operation->m_kind) {
- case PutToBaseOperation::GlobalVariablePutChecked:
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(operation->m_predicatePointer)));
- case PutToBaseOperation::GlobalVariablePut: {
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- if (operation->m_isDynamic)
- addSlowCase(branchPtr(NotEqual, payloadFor(base), TrustedImmPtr(globalObject)));
+ Jump done = jump();
+ inBounds.link(this);
- emitLoad(value, regT1, regT0);
- storePtr(regT0, reinterpret_cast<char*>(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.payload));
- storePtr(regT1, reinterpret_cast<char*>(operation->m_registerAddress) + OBJECT_OFFSETOF(JSValue, u.asBits.tag));
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- break;
- }
- case PutToBaseOperation::VariablePut: {
- loadPtr(payloadFor(base), regT3);
- emitLoad(value, regT1, regT0);
- loadPtr(Address(regT3, JSVariableObject::offsetOfRegisters()), regT2);
- store32(regT0, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(regT1, Address(regT2, operation->m_offset * sizeof(Register) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(regT3, regT1, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- break;
- }
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0);
+ move(TrustedImm32(JSValue::CellTag), regT2);
- case PutToBaseOperation::GlobalPropertyPut: {
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- loadPtr(payloadFor(base), regT3);
- emitLoad(value, regT1, regT0);
- loadPtr(&operation->m_structure, regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT3, JSCell::structureOffset()), regT2));
- ASSERT(!operation->m_structure || !operation->m_structure->inlineCapacity());
- loadPtr(Address(regT3, JSObject::butterflyOffset()), regT2);
- load32(&operation->m_offsetInButterfly, regT3);
- storePtr(regT0, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- storePtr(regT1, BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(globalObject, regT1, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- break;
- }
+ done.link(this);
+ emitStore(dst, regT2, regT0);
+}
- case PutToBaseOperation::Uninitialised:
- case PutToBaseOperation::Readonly:
- case PutToBaseOperation::Generic:
- JITStubCall stubCall(this, cti_op_put_to_base);
-
- stubCall.addArgument(TrustedImm32(base));
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(id)));
- stubCall.addArgument(TrustedImm32(value));
- stubCall.addArgument(TrustedImmPtr(operation));
- stubCall.call();
- break;
+void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitLoadPayload(index, regT0);
+ emitLoadPayload(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
+
+ move(TrustedImm32(JSValue::NullTag), regT2);
+ move(TrustedImm32(0), regT0);
+
+ Jump done = jump();
+ inBounds.link(this);
+
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0);
+ move(TrustedImm32(JSValue::CellTag), regT2);
+
+ done.link(this);
+ emitStore(dst, regT2, regT0);
+}
+
+void JIT::emit_op_profile_type(Instruction* currentInstruction)
+{
+ TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
+ int valueToProfile = currentInstruction[1].u.operand;
+
+ // Load payload in T0. Load tag in T3.
+ emitLoadPayload(valueToProfile, regT0);
+ emitLoadTag(valueToProfile, regT3);
+
+ JumpList jumpToEnd;
+
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::EmptyValueTag)));
+
+ // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
+ // These typechecks are inlined to match those of the 32-bit JSValue type checks.
+ if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::UndefinedTag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNull)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::NullTag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeBoolean)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::BooleanTag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNumber) {
+ jumpToEnd.append(branch32(Below, regT3, TrustedImm32(JSValue::LowestTag)));
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag)));
+ } else if (cachedTypeLocation->m_lastSeenType == TypeString) {
+ Jump isNotCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag));
+ jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+ isNotCell.link(this);
}
+
+ // Load the type profiling log into T2.
+ TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
+ move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
+
+ // Load the next log entry into T1.
+ loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
+
+ // Store the JSValue onto the log entry.
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+ // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry.
+ Jump notCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag));
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ Jump skipNotCell = jump();
+ notCell.link(this);
+ store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ skipNotCell.link(this);
+
+ // Store the typeLocation on the log entry.
+ move(TrustedImmPtr(cachedTypeLocation), regT0);
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
+
+ // Increment the current log entry.
+ addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
+ store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
+ jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())));
+ // Clear the log if we're at the end of the log.
+ callOperation(operationProcessTypeProfilerLog);
+
+ jumpToEnd.link(this);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITOperations.cpp b/Source/JavaScriptCore/jit/JITOperations.cpp
new file mode 100644
index 000000000..868eed755
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITOperations.cpp
@@ -0,0 +1,2237 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITOperations.h"
+
+#if ENABLE(JIT)
+
+#include "ArrayConstructor.h"
+#include "CommonSlowPaths.h"
+#include "DFGCompilationMode.h"
+#include "DFGDriver.h"
+#include "DFGOSREntry.h"
+#include "DFGThunks.h"
+#include "DFGWorklist.h"
+#include "Debugger.h"
+#include "DirectArguments.h"
+#include "Error.h"
+#include "ErrorHandlingScope.h"
+#include "ExceptionFuzz.h"
+#include "GetterSetter.h"
+#include "HostCallReturnValue.h"
+#include "JIT.h"
+#include "JITExceptions.h"
+#include "JITToDFGDeferredCompilationCallback.h"
+#include "JSCInlines.h"
+#include "JSGeneratorFunction.h"
+#include "JSGlobalObjectFunctions.h"
+#include "JSLexicalEnvironment.h"
+#include "JSPropertyNameEnumerator.h"
+#include "JSStackInlines.h"
+#include "JSWithScope.h"
+#include "LegacyProfiler.h"
+#include "ObjectConstructor.h"
+#include "PropertyName.h"
+#include "Repatch.h"
+#include "ScopedArguments.h"
+#include "TestRunnerUtils.h"
+#include "TypeProfilerLog.h"
+#include "VMInlines.h"
+#include <wtf/InlineASM.h>
+
+namespace JSC {
+
+extern "C" {
+
+#if COMPILER(MSVC)
+void * _ReturnAddress(void);
+#pragma intrinsic(_ReturnAddress)
+
+#define OUR_RETURN_ADDRESS _ReturnAddress()
+#else
+#define OUR_RETURN_ADDRESS __builtin_return_address(0)
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+#define CTI_SAMPLER vm->interpreter->sampler()
+#else
+#define CTI_SAMPLER 0
+#endif
+
+
+void JIT_OPERATION operationThrowStackOverflowError(ExecState* exec, CodeBlock* codeBlock)
+{
+ // We pass in our own code block, because the callframe hasn't been populated.
+ VM* vm = codeBlock->vm();
+
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ if (!callerFrame)
+ callerFrame = exec;
+
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ throwStackOverflowError(callerFrame);
+}
+
+#if ENABLE(WEBASSEMBLY)
+void JIT_OPERATION operationThrowDivideError(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ ErrorHandlingScope errorScope(*vm);
+ vm->throwException(callerFrame, createError(callerFrame, ASCIILiteral("Division by zero or division overflow.")));
+}
+
+void JIT_OPERATION operationThrowOutOfBoundsAccessError(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ ErrorHandlingScope errorScope(*vm);
+ vm->throwException(callerFrame, createError(callerFrame, ASCIILiteral("Out-of-bounds access.")));
+}
+#endif
+
+int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ JSStack& stack = vm->interpreter->stack();
+
+ int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForCall);
+ if (missingArgCount < 0) {
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ throwStackOverflowError(callerFrame);
+ }
+
+ return missingArgCount;
+}
+
+int32_t JIT_OPERATION operationConstructArityCheck(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ JSStack& stack = vm->interpreter->stack();
+
+ int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForConstruct);
+ if (missingArgCount < 0) {
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ throwStackOverflowError(callerFrame);
+ }
+
+ return missingArgCount;
+}
+
+EncodedJSValue JIT_OPERATION operationGetById(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ stubInfo->tookSlowPath = true;
+
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
+ Identifier ident = Identifier::fromUid(vm, uid);
+ return JSValue::encode(baseValue.get(exec, ident, slot));
+}
+
+EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
+ Identifier ident = Identifier::fromUid(vm, uid);
+ return JSValue::encode(baseValue.get(exec, ident, slot));
+}
+
+EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ Identifier ident = Identifier::fromUid(vm, uid);
+
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
+
+ bool hasResult = baseValue.getPropertySlot(exec, ident, slot);
+ if (stubInfo->considerCaching())
+ repatchGetByID(exec, baseValue, ident, slot, *stubInfo);
+
+ return JSValue::encode(hasResult? slot.getValue(exec, ident) : jsUndefined());
+}
+
+EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ if (!base->isObject()) {
+ vm->throwException(exec, createInvalidInParameterError(exec, base));
+ return JSValue::encode(jsUndefined());
+ }
+
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ Identifier ident = Identifier::fromUid(vm, key);
+ PropertySlot slot(base, PropertySlot::InternalMethodType::HasProperty);
+ bool result = asObject(base)->getPropertySlot(exec, ident, slot);
+
+ RELEASE_ASSERT(accessType == stubInfo->accessType);
+
+ if (stubInfo->considerCaching())
+ repatchIn(exec, base, ident, result, slot, *stubInfo);
+
+ return JSValue::encode(jsBoolean(result));
+}
+
+EncodedJSValue JIT_OPERATION operationIn(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ stubInfo->tookSlowPath = true;
+
+ if (!base->isObject()) {
+ vm->throwException(exec, createInvalidInParameterError(exec, base));
+ return JSValue::encode(jsUndefined());
+ }
+
+ Identifier ident = Identifier::fromUid(vm, key);
+ return JSValue::encode(jsBoolean(asObject(base)->hasProperty(exec, ident)));
+}
+
+EncodedJSValue JIT_OPERATION operationGenericIn(ExecState* exec, JSCell* base, EncodedJSValue key)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return JSValue::encode(jsBoolean(CommonSlowPaths::opIn(exec, JSValue::decode(key), base)));
+}
+
+void JIT_OPERATION operationPutByIdStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ stubInfo->tookSlowPath = true;
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext());
+ JSValue::decode(encodedBase).putInline(exec, ident, JSValue::decode(encodedValue), slot);
+}
+
+void JIT_OPERATION operationPutByIdNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ stubInfo->tookSlowPath = true;
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext());
+ JSValue::decode(encodedBase).putInline(exec, ident, JSValue::decode(encodedValue), slot);
+}
+
+void JIT_OPERATION operationPutByIdDirectStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ stubInfo->tookSlowPath = true;
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext());
+ asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
+}
+
+void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ stubInfo->tookSlowPath = true;
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext());
+ asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
+}
+
+void JIT_OPERATION operationPutByIdStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext());
+
+ Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr;
+ baseValue.putInline(exec, ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ if (stubInfo->considerCaching())
+ repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect);
+}
+
+void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext());
+
+ Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr;
+ baseValue.putInline(exec, ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ if (stubInfo->considerCaching())
+ repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect);
+}
+
+void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSObject* baseObject = asObject(JSValue::decode(encodedBase));
+ PutPropertySlot slot(baseObject, true, exec->codeBlock()->putByIdContext());
+
+ Structure* structure = baseObject->structure(*vm);
+ baseObject->putDirect(exec->vm(), ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ if (stubInfo->considerCaching())
+ repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct);
+}
+
+void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident = Identifier::fromUid(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSObject* baseObject = asObject(JSValue::decode(encodedBase));
+ PutPropertySlot slot(baseObject, false, exec->codeBlock()->putByIdContext());
+
+ Structure* structure = baseObject->structure(*vm);
+ baseObject->putDirect(exec->vm(), ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ if (stubInfo->considerCaching())
+ repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct);
+}
+
+void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObject* base, Structure* structure, PropertyOffset offset, EncodedJSValue value)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ ASSERT(structure->outOfLineCapacity() > base->structure(vm)->outOfLineCapacity());
+ ASSERT(!vm.heap.storageAllocator().fastPathShouldSucceed(structure->outOfLineCapacity() * sizeof(JSValue)));
+ base->setStructureAndReallocateStorageIfNecessary(vm, structure);
+ base->putDirect(vm, offset, JSValue::decode(value));
+}
+
+ALWAYS_INLINE static bool isStringOrSymbol(JSValue value)
+{
+ return value.isString() || value.isSymbol();
+}
+
+static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value, ByValInfo* byValInfo)
+{
+ VM& vm = callFrame->vm();
+ if (LIKELY(subscript.isUInt32())) {
+ byValInfo->tookSlowPath = true;
+ uint32_t i = subscript.asUInt32();
+ if (baseValue.isObject()) {
+ JSObject* object = asObject(baseValue);
+ if (object->canSetIndexQuickly(i))
+ object->setIndexQuickly(callFrame->vm(), i, value);
+ else {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ object->methodTable(vm)->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
+ }
+ } else
+ baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode());
+ return;
+ }
+
+ auto property = subscript.toPropertyKey(callFrame);
+ // Don't put to an object if toString threw an exception.
+ if (callFrame->vm().exception())
+ return;
+
+ if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
+ byValInfo->tookSlowPath = true;
+
+ PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode());
+ baseValue.putInline(callFrame, property, value, slot);
+}
+
+static void directPutByVal(CallFrame* callFrame, JSObject* baseObject, JSValue subscript, JSValue value, ByValInfo* byValInfo)
+{
+ bool isStrictMode = callFrame->codeBlock()->isStrictMode();
+ if (LIKELY(subscript.isUInt32())) {
+ // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices.
+ byValInfo->tookSlowPath = true;
+ uint32_t index = subscript.asUInt32();
+ ASSERT(isIndex(index));
+ if (baseObject->canSetIndexQuicklyForPutDirect(index)) {
+ baseObject->setIndexQuickly(callFrame->vm(), index, value);
+ return;
+ }
+
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ baseObject->putDirectIndex(callFrame, index, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ return;
+ }
+
+ if (subscript.isDouble()) {
+ double subscriptAsDouble = subscript.asDouble();
+ uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble);
+ if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) {
+ byValInfo->tookSlowPath = true;
+ baseObject->putDirectIndex(callFrame, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ return;
+ }
+ }
+
+ // Don't put to an object if toString threw an exception.
+ auto property = subscript.toPropertyKey(callFrame);
+ if (callFrame->vm().exception())
+ return;
+
+ if (Optional<uint32_t> index = parseIndex(property)) {
+ byValInfo->tookSlowPath = true;
+ baseObject->putDirectIndex(callFrame, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ return;
+ }
+
+ if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
+ byValInfo->tookSlowPath = true;
+
+ PutPropertySlot slot(baseObject, isStrictMode);
+ baseObject->putDirect(callFrame->vm(), property, value, slot);
+}
+
+enum class OptimizationResult {
+ NotOptimized,
+ SeenOnce,
+ Optimized,
+ GiveUp,
+};
+
+static OptimizationResult tryPutByValOptimize(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+{
+ // See if it's worth optimizing at all.
+ OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
+
+ VM& vm = exec->vm();
+
+ if (baseValue.isObject() && subscript.isInt32()) {
+ JSObject* object = asObject(baseValue);
+
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+
+ Structure* structure = object->structure(vm);
+ if (hasOptimizableIndexing(structure)) {
+ // Attempt to optimize.
+ JITArrayMode arrayMode = jitArrayModeForStructure(structure);
+ if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) {
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+ byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
+
+ JIT::compilePutByVal(&vm, exec->codeBlock(), byValInfo, returnAddress, arrayMode);
+ optimizationResult = OptimizationResult::Optimized;
+ }
+ }
+
+ // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
+ if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ if (baseValue.isObject() && isStringOrSymbol(subscript)) {
+ const Identifier propertyName = subscript.toPropertyKey(exec);
+ if (!subscript.isString() || !parseIndex(propertyName)) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+ if (byValInfo->seen) {
+ if (byValInfo->cachedId == propertyName) {
+ JIT::compilePutByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, NotDirect, propertyName);
+ optimizationResult = OptimizationResult::Optimized;
+ } else {
+ // Seem like a generic property access site.
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+ } else {
+ byValInfo->seen = true;
+ byValInfo->cachedId = propertyName;
+ optimizationResult = OptimizationResult::SeenOnce;
+ }
+ }
+ }
+
+ if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. For cases where we see non-index-intercepting
+ // objects, this gives 10 iterations worth of opportunity for us to observe
+ // that the put_by_val may be polymorphic. We count up slowPathCount even if
+ // the result is GiveUp.
+ if (++byValInfo->slowPathCount >= 10)
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ return optimizationResult;
+}
+
+void JIT_OPERATION operationPutByValOptimize(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ JSValue value = JSValue::decode(encodedValue);
+ if (tryPutByValOptimize(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)) == OptimizationResult::GiveUp) {
+ // Don't ever try to optimize.
+ byValInfo->tookSlowPath = true;
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationPutByValGeneric));
+ }
+ putByVal(exec, baseValue, subscript, value, byValInfo);
+}
+
+static OptimizationResult tryDirectPutByValOptimize(ExecState* exec, JSObject* object, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+{
+ // See if it's worth optimizing at all.
+ OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
+
+ VM& vm = exec->vm();
+
+ if (subscript.isInt32()) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+
+ Structure* structure = object->structure(vm);
+ if (hasOptimizableIndexing(structure)) {
+ // Attempt to optimize.
+ JITArrayMode arrayMode = jitArrayModeForStructure(structure);
+ if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) {
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+ byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
+
+ JIT::compileDirectPutByVal(&vm, exec->codeBlock(), byValInfo, returnAddress, arrayMode);
+ optimizationResult = OptimizationResult::Optimized;
+ }
+ }
+
+ // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
+ if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
+ optimizationResult = OptimizationResult::GiveUp;
+ } else if (isStringOrSymbol(subscript)) {
+ const Identifier propertyName = subscript.toPropertyKey(exec);
+ Optional<uint32_t> index = parseIndex(propertyName);
+
+ if (!subscript.isString() || !index) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+ if (byValInfo->seen) {
+ if (byValInfo->cachedId == propertyName) {
+ JIT::compilePutByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, Direct, propertyName);
+ optimizationResult = OptimizationResult::Optimized;
+ } else {
+ // Seem like a generic property access site.
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+ } else {
+ byValInfo->seen = true;
+ byValInfo->cachedId = propertyName;
+ optimizationResult = OptimizationResult::SeenOnce;
+ }
+ }
+ }
+
+ if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. For cases where we see non-index-intercepting
+ // objects, this gives 10 iterations worth of opportunity for us to observe
+ // that the get_by_val may be polymorphic. We count up slowPathCount even if
+ // the result is GiveUp.
+ if (++byValInfo->slowPathCount >= 10)
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ return optimizationResult;
+}
+
+void JIT_OPERATION operationDirectPutByValOptimize(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ JSValue value = JSValue::decode(encodedValue);
+ RELEASE_ASSERT(baseValue.isObject());
+ JSObject* object = asObject(baseValue);
+ if (tryDirectPutByValOptimize(exec, object, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)) == OptimizationResult::GiveUp) {
+ // Don't ever try to optimize.
+ byValInfo->tookSlowPath = true;
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationDirectPutByValGeneric));
+ }
+
+ directPutByVal(exec, object, subscript, value, byValInfo);
+}
+
+void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ JSValue value = JSValue::decode(encodedValue);
+
+ putByVal(exec, baseValue, subscript, value, byValInfo);
+}
+
+
+void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ JSValue value = JSValue::decode(encodedValue);
+ RELEASE_ASSERT(baseValue.isObject());
+ directPutByVal(exec, asObject(baseValue), subscript, value, byValInfo);
+}
+
+EncodedJSValue JIT_OPERATION operationCallEval(ExecState* exec, ExecState* execCallee)
+{
+ UNUSED_PARAM(exec);
+
+ execCallee->setCodeBlock(0);
+
+ if (!isHostFunction(execCallee->calleeAsValue(), globalFuncEval))
+ return JSValue::encode(JSValue());
+
+ VM* vm = &execCallee->vm();
+ JSValue result = eval(execCallee);
+ if (vm->exception())
+ return EncodedJSValue();
+
+ return JSValue::encode(result);
+}
+
+static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee, CallLinkInfo* callLinkInfo)
+{
+ ExecState* exec = execCallee->callerFrame();
+ VM* vm = &exec->vm();
+
+ execCallee->setCodeBlock(0);
+
+ if (callLinkInfo->specializationKind() == CodeForCall) {
+ CallData callData;
+ CallType callType = getCallData(callee, callData);
+
+ ASSERT(callType != CallTypeJS);
+
+ if (callType == CallTypeHost) {
+ NativeCallFrameTracer tracer(vm, execCallee);
+ execCallee->setCallee(asObject(callee));
+ vm->hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
+ if (vm->exception()) {
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ return encodeResult(
+ bitwise_cast<void*>(getHostCallReturnValue),
+ reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
+ }
+
+ ASSERT(callType == CallTypeNone);
+ exec->vm().throwException(exec, createNotAFunctionError(exec, callee));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ ASSERT(callLinkInfo->specializationKind() == CodeForConstruct);
+
+ ConstructData constructData;
+ ConstructType constructType = getConstructData(callee, constructData);
+
+ ASSERT(constructType != ConstructTypeJS);
+
+ if (constructType == ConstructTypeHost) {
+ NativeCallFrameTracer tracer(vm, execCallee);
+ execCallee->setCallee(asObject(callee));
+ vm->hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
+ if (vm->exception()) {
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ return encodeResult(bitwise_cast<void*>(getHostCallReturnValue), reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ ASSERT(constructType == ConstructTypeNone);
+ exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+}
+
+SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+{
+ ExecState* exec = execCallee->callerFrame();
+ VM* vm = &exec->vm();
+ CodeSpecializationKind kind = callLinkInfo->specializationKind();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue calleeAsValue = execCallee->calleeAsValue();
+ JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
+ if (!calleeAsFunctionCell) {
+ // FIXME: We should cache these kinds of calls. They can be common and currently they are
+ // expensive.
+ // https://bugs.webkit.org/show_bug.cgi?id=144458
+ return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
+ }
+
+ JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
+ JSScope* scope = callee->scopeUnchecked();
+ ExecutableBase* executable = callee->executable();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = 0;
+ if (executable->isHostFunction()) {
+ codePtr = executable->entrypointFor(kind, MustCheckArity);
+#if ENABLE(WEBASSEMBLY)
+ } else if (executable->isWebAssemblyExecutable()) {
+ WebAssemblyExecutable* webAssemblyExecutable = static_cast<WebAssemblyExecutable*>(executable);
+ webAssemblyExecutable->prepareForExecution(execCallee);
+ codeBlock = webAssemblyExecutable->codeBlockForCall();
+ ASSERT(codeBlock);
+ ArityCheckMode arity;
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
+ arity = MustCheckArity;
+ else
+ arity = ArityCheckNotRequired;
+ codePtr = webAssemblyExecutable->entrypointFor(kind, arity);
+#endif
+ } else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+
+ if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
+ exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ JSObject* error = functionExecutable->prepareForExecution(execCallee, callee, scope, kind);
+ if (error) {
+ exec->vm().throwException(exec, error);
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+ codeBlock = functionExecutable->codeBlockFor(kind);
+ ArityCheckMode arity;
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo->isVarargs())
+ arity = MustCheckArity;
+ else
+ arity = ArityCheckNotRequired;
+ codePtr = functionExecutable->entrypointFor(kind, arity);
+ }
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
+ else
+ linkFor(execCallee, *callLinkInfo, codeBlock, callee, codePtr);
+
+ return encodeResult(codePtr.executableAddress(), reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
+}
+
+inline SlowPathReturnType virtualForWithFunction(
+ ExecState* execCallee, CallLinkInfo* callLinkInfo, JSCell*& calleeAsFunctionCell)
+{
+ ExecState* exec = execCallee->callerFrame();
+ VM* vm = &exec->vm();
+ CodeSpecializationKind kind = callLinkInfo->specializationKind();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue calleeAsValue = execCallee->calleeAsValue();
+ calleeAsFunctionCell = getJSFunction(calleeAsValue);
+ if (UNLIKELY(!calleeAsFunctionCell))
+ return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
+
+ JSFunction* function = jsCast<JSFunction*>(calleeAsFunctionCell);
+ JSScope* scope = function->scopeUnchecked();
+ ExecutableBase* executable = function->executable();
+ if (UNLIKELY(!executable->hasJITCodeFor(kind))) {
+ bool isWebAssemblyExecutable = false;
+#if ENABLE(WEBASSEMBLY)
+ isWebAssemblyExecutable = executable->isWebAssemblyExecutable();
+#endif
+ if (!isWebAssemblyExecutable) {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+
+ if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
+ exec->vm().throwException(exec, createNotAConstructorError(exec, function));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ JSObject* error = functionExecutable->prepareForExecution(execCallee, function, scope, kind);
+ if (error) {
+ exec->vm().throwException(exec, error);
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+ } else {
+#if ENABLE(WEBASSEMBLY)
+ if (!isCall(kind)) {
+ exec->vm().throwException(exec, createNotAConstructorError(exec, function));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ WebAssemblyExecutable* webAssemblyExecutable = static_cast<WebAssemblyExecutable*>(executable);
+ webAssemblyExecutable->prepareForExecution(execCallee);
+#endif
+ }
+ }
+ return encodeResult(executable->entrypointFor(
+ kind, MustCheckArity).executableAddress(),
+ reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
+}
+
+SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+{
+ ASSERT(callLinkInfo->specializationKind() == CodeForCall);
+ JSCell* calleeAsFunctionCell;
+ SlowPathReturnType result = virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCell);
+
+ linkPolymorphicCall(execCallee, *callLinkInfo, CallVariant(calleeAsFunctionCell));
+
+ return result;
+}
+
+SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+{
+ JSCell* calleeAsFunctionCellIgnored;
+ return virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCellIgnored);
+}
+
+size_t JIT_OPERATION operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return jsLess<true>(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+}
+
+size_t JIT_OPERATION operationCompareLessEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return jsLessEq<true>(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+}
+
+size_t JIT_OPERATION operationCompareGreater(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return jsLess<false>(exec, JSValue::decode(encodedOp2), JSValue::decode(encodedOp1));
+}
+
+size_t JIT_OPERATION operationCompareGreaterEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return jsLessEq<false>(exec, JSValue::decode(encodedOp2), JSValue::decode(encodedOp1));
+}
+
+size_t JIT_OPERATION operationConvertJSValueToBoolean(ExecState* exec, EncodedJSValue encodedOp)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return JSValue::decode(encodedOp).toBoolean(exec);
+}
+
+size_t JIT_OPERATION operationCompareEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return JSValue::equalSlowCaseInline(exec, JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+}
+
+#if USE(JSVALUE64)
+EncodedJSValue JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSCell* right)
+#else
+size_t JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSCell* right)
+#endif
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ bool result = WTF::equal(*asString(left)->value(exec).impl(), *asString(right)->value(exec).impl());
+#if USE(JSVALUE64)
+ return JSValue::encode(jsBoolean(result));
+#else
+ return result;
+#endif
+}
+
+size_t JIT_OPERATION operationHasProperty(ExecState* exec, JSObject* base, JSString* property)
+{
+ int result = base->hasProperty(exec, property->toIdentifier(exec));
+ return result;
+}
+
+
+EncodedJSValue JIT_OPERATION operationNewArrayWithProfile(ExecState* exec, ArrayAllocationProfile* profile, const JSValue* values, int size)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ return JSValue::encode(constructArrayNegativeIndexed(exec, profile, values, size));
+}
+
+EncodedJSValue JIT_OPERATION operationNewArrayBufferWithProfile(ExecState* exec, ArrayAllocationProfile* profile, const JSValue* values, int size)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ return JSValue::encode(constructArray(exec, profile, values, size));
+}
+
+EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState* exec, ArrayAllocationProfile* profile, EncodedJSValue size)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ JSValue sizeValue = JSValue::decode(size);
+ return JSValue::encode(constructArrayWithSizeQuirk(exec, profile, exec->lexicalGlobalObject(), sizeValue));
+}
+
+}
+
+template<typename FunctionType>
+static EncodedJSValue operationNewFunctionCommon(ExecState* exec, JSScope* scope, JSCell* functionExecutable, bool isInvalidated)
+{
+ ASSERT(functionExecutable->inherits(FunctionExecutable::info()));
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ if (isInvalidated)
+ return JSValue::encode(FunctionType::createWithInvalidatedReallocationWatchpoint(vm, static_cast<FunctionExecutable*>(functionExecutable), scope));
+ return JSValue::encode(FunctionType::create(vm, static_cast<FunctionExecutable*>(functionExecutable), scope));
+}
+
+extern "C" {
+
+EncodedJSValue JIT_OPERATION operationNewFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSFunction>(exec, scope, functionExecutable, false);
+}
+
+EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSFunction>(exec, scope, functionExecutable, true);
+}
+
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSGeneratorFunction>(exec, scope, functionExecutable, false);
+}
+
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSGeneratorFunction>(exec, scope, functionExecutable, true);
+}
+
+JSCell* JIT_OPERATION operationNewObject(ExecState* exec, Structure* structure)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return constructEmptyObject(exec, structure);
+}
+
+EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ RegExp* regexp = static_cast<RegExp*>(regexpPtr);
+ if (!regexp->isValid()) {
+ vm.throwException(exec, createSyntaxError(exec, ASCIILiteral("Invalid flags supplied to RegExp constructor.")));
+ return JSValue::encode(jsUndefined());
+ }
+
+ return JSValue::encode(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regexp));
+}
+
+// The only reason for returning an UnusedPtr (instead of void) is so that we can reuse the
+// existing DFG slow path generator machinery when creating the slow path for CheckWatchdogTimer
+// in the DFG. If a DFG slow path generator that supports a void return type is added in the
+// future, we can switch to using that then.
+UnusedPtr JIT_OPERATION operationHandleWatchdogTimer(ExecState* exec)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ if (UNLIKELY(vm.shouldTriggerTermination(exec)))
+ vm.throwException(exec, createTerminatedExecutionException(&vm));
+
+ return nullptr;
+}
+
+void JIT_OPERATION operationThrowStaticError(ExecState* exec, EncodedJSValue encodedValue, int32_t referenceErrorFlag)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue errorMessageValue = JSValue::decode(encodedValue);
+ RELEASE_ASSERT(errorMessageValue.isString());
+ String errorMessage = asString(errorMessageValue)->value(exec);
+ if (referenceErrorFlag)
+ vm.throwException(exec, createReferenceError(exec, errorMessage));
+ else
+ vm.throwException(exec, createTypeError(exec, errorMessage));
+}
+
+void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookID)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID));
+}
+
+#if ENABLE(DFG_JIT)
+static void updateAllPredictionsAndOptimizeAfterWarmUp(CodeBlock* codeBlock)
+{
+ codeBlock->updateAllPredictions();
+ codeBlock->optimizeAfterWarmUp();
+}
+
+SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ // Defer GC for a while so that it doesn't run between when we enter into this
+ // slow path and when we figure out the state of our code block. This prevents
+ // a number of awkward reentrancy scenarios, including:
+ //
+ // - The optimized version of our code block being jettisoned by GC right after
+ // we concluded that we wanted to use it, but have not planted it into the JS
+ // stack yet.
+ //
+ // - An optimized version of our code block being installed just as we decided
+ // that it wasn't ready yet.
+ //
+ // Note that jettisoning won't happen if we already initiated OSR, because in
+ // that case we would have already planted the optimized code block into the JS
+ // stack.
+ DeferGCForAWhile deferGC(vm.heap);
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ if (codeBlock->jitType() != JITCode::BaselineJIT) {
+ dataLog("Unexpected code block in Baseline->DFG tier-up: ", *codeBlock, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ if (bytecodeIndex) {
+ // If we're attempting to OSR from a loop, assume that this should be
+ // separately optimized.
+ codeBlock->m_shouldAlwaysBeInlined = false;
+ }
+
+ if (Options::verboseOSR()) {
+ dataLog(
+ *codeBlock, ": Entered optimize with bytecodeIndex = ", bytecodeIndex,
+ ", executeCounter = ", codeBlock->jitExecuteCounter(),
+ ", optimizationDelayCounter = ", codeBlock->reoptimizationRetryCounter(),
+ ", exitCounter = ");
+ if (codeBlock->hasOptimizedReplacement())
+ dataLog(codeBlock->replacement()->osrExitCounter());
+ else
+ dataLog("N/A");
+ dataLog("\n");
+ }
+
+ if (!codeBlock->checkIfOptimizationThresholdReached()) {
+ codeBlock->updateAllPredictions();
+ if (Options::verboseOSR())
+ dataLog("Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.\n");
+ return encodeResult(0, 0);
+ }
+
+ if (vm.enabledProfiler()) {
+ updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
+ return encodeResult(0, 0);
+ }
+
+ Debugger* debugger = codeBlock->globalObject()->debugger();
+ if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests())) {
+ updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
+ return encodeResult(0, 0);
+ }
+
+ if (codeBlock->m_shouldAlwaysBeInlined) {
+ updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
+ if (Options::verboseOSR())
+ dataLog("Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.\n");
+ return encodeResult(0, 0);
+ }
+
+ // We cannot be in the process of asynchronous compilation and also have an optimized
+ // replacement.
+ DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull();
+ ASSERT(
+ !worklist
+ || !(worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown
+ && codeBlock->hasOptimizedReplacement()));
+
+ DFG::Worklist::State worklistState;
+ if (worklist) {
+ // The call to DFG::Worklist::completeAllReadyPlansForVM() will complete all ready
+ // (i.e. compiled) code blocks. But if it completes ours, we also need to know
+ // what the result was so that we don't plow ahead and attempt OSR or immediate
+ // reoptimization. This will have already also set the appropriate JIT execution
+ // count threshold depending on what happened, so if the compilation was anything
+ // but successful we just want to return early. See the case for worklistState ==
+ // DFG::Worklist::Compiled, below.
+
+ // Note that we could have alternatively just called Worklist::compilationState()
+ // here, and if it returned Compiled, we could have then called
+ // completeAndScheduleOSR() below. But that would have meant that it could take
+ // longer for code blocks to be completed: they would only complete when *their*
+ // execution count trigger fired; but that could take a while since the firing is
+ // racy. It could also mean that code blocks that never run again after being
+ // compiled would sit on the worklist until next GC. That's fine, but it's
+ // probably a waste of memory. Our goal here is to complete code blocks as soon as
+ // possible in order to minimize the chances of us executing baseline code after
+ // optimized code is already available.
+ worklistState = worklist->completeAllReadyPlansForVM(
+ vm, DFG::CompilationKey(codeBlock, DFG::DFGMode));
+ } else
+ worklistState = DFG::Worklist::NotKnown;
+
+ if (worklistState == DFG::Worklist::Compiling) {
+ // We cannot be in the process of asynchronous compilation and also have an optimized
+ // replacement.
+ RELEASE_ASSERT(!codeBlock->hasOptimizedReplacement());
+ codeBlock->setOptimizationThresholdBasedOnCompilationResult(CompilationDeferred);
+ return encodeResult(0, 0);
+ }
+
+ if (worklistState == DFG::Worklist::Compiled) {
+ // If we don't have an optimized replacement but we did just get compiled, then
+ // the compilation failed or was invalidated, in which case the execution count
+ // thresholds have already been set appropriately by
+ // CodeBlock::setOptimizationThresholdBasedOnCompilationResult() and we have
+ // nothing left to do.
+ if (!codeBlock->hasOptimizedReplacement()) {
+ codeBlock->updateAllPredictions();
+ if (Options::verboseOSR())
+ dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
+ return encodeResult(0, 0);
+ }
+ } else if (codeBlock->hasOptimizedReplacement()) {
+ if (Options::verboseOSR())
+ dataLog("Considering OSR ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
+ // If we have an optimized replacement, then it must be the case that we entered
+ // cti_optimize from a loop. That's because if there's an optimized replacement,
+ // then all calls to this function will be relinked to the replacement and so
+ // the prologue OSR will never fire.
+
+ // This is an interesting threshold check. Consider that a function OSR exits
+ // in the middle of a loop, while having a relatively low exit count. The exit
+ // will reset the execution counter to some target threshold, meaning that this
+ // code won't be reached until that loop heats up for >=1000 executions. But then
+ // we do a second check here, to see if we should either reoptimize, or just
+ // attempt OSR entry. Hence it might even be correct for
+ // shouldReoptimizeFromLoopNow() to always return true. But we make it do some
+ // additional checking anyway, to reduce the amount of recompilation thrashing.
+ if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Triggering reoptimization of ", *codeBlock,
+ "(", *codeBlock->replacement(), ") (in loop).\n");
+ }
+ codeBlock->replacement()->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTrigger, CountReoptimization);
+ return encodeResult(0, 0);
+ }
+ } else {
+ if (!codeBlock->shouldOptimizeNow()) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Delaying optimization for ", *codeBlock,
+ " because of insufficient profiling.\n");
+ }
+ return encodeResult(0, 0);
+ }
+
+ if (Options::verboseOSR())
+ dataLog("Triggering optimized compilation of ", *codeBlock, "\n");
+
+ unsigned numVarsWithValues;
+ if (bytecodeIndex)
+ numVarsWithValues = codeBlock->m_numVars;
+ else
+ numVarsWithValues = 0;
+ Operands<JSValue> mustHandleValues(codeBlock->numParameters(), numVarsWithValues);
+ int localsUsedForCalleeSaves = static_cast<int>(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters());
+ for (size_t i = 0; i < mustHandleValues.size(); ++i) {
+ int operand = mustHandleValues.operandForIndex(i);
+ if (operandIsLocal(operand) && VirtualRegister(operand).toLocal() < localsUsedForCalleeSaves)
+ continue;
+ mustHandleValues[i] = exec->uncheckedR(operand).jsValue();
+ }
+
+ CodeBlock* replacementCodeBlock = codeBlock->newReplacement();
+ CompilationResult result = DFG::compile(
+ vm, replacementCodeBlock, nullptr, DFG::DFGMode, bytecodeIndex,
+ mustHandleValues, JITToDFGDeferredCompilationCallback::create());
+
+ if (result != CompilationSuccessful)
+ return encodeResult(0, 0);
+ }
+
+ CodeBlock* optimizedCodeBlock = codeBlock->replacement();
+ ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType()));
+
+ if (void* dataBuffer = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ".\n");
+ }
+
+ codeBlock->optimizeSoon();
+ return encodeResult(vm.getCTIStub(DFG::osrEntryThunkGenerator).code().executableAddress(), dataBuffer);
+ }
+
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Optimizing ", *codeBlock, " -> ", *codeBlock->replacement(),
+ " succeeded, OSR failed, after a delay of ",
+ codeBlock->optimizationDelayCounter(), ".\n");
+ }
+
+ // Count the OSR failure as a speculation failure. If this happens a lot, then
+ // reoptimize.
+ optimizedCodeBlock->countOSRExit();
+
+ // We are a lot more conservative about triggering reoptimization after OSR failure than
+ // before it. If we enter the optimize_from_loop trigger with a bucket full of fail
+ // already, then we really would like to reoptimize immediately. But this case covers
+ // something else: there weren't many (or any) speculation failures before, but we just
+ // failed to enter the speculative code because some variable had the wrong value or
+ // because the OSR code decided for any spurious reason that it did not want to OSR
+ // right now. So, we only trigger reoptimization only upon the more conservative (non-loop)
+ // reoptimization trigger.
+ if (optimizedCodeBlock->shouldReoptimizeNow()) {
+ if (Options::verboseOSR()) {
+ dataLog(
+ "Triggering reoptimization of ", *codeBlock, " -> ",
+ *codeBlock->replacement(), " (after OSR fail).\n");
+ }
+ optimizedCodeBlock->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTriggerOnOSREntryFail, CountReoptimization);
+ return encodeResult(0, 0);
+ }
+
+ // OSR failed this time, but it might succeed next time! Let the code run a bit
+ // longer and then try again.
+ codeBlock->optimizeAfterWarmUp();
+
+ return encodeResult(0, 0);
+}
+#endif
+
+void JIT_OPERATION operationPutByIndex(ExecState* exec, EncodedJSValue encodedArrayValue, int32_t index, EncodedJSValue encodedValue)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue arrayValue = JSValue::decode(encodedArrayValue);
+ ASSERT(isJSArray(arrayValue));
+ asArray(arrayValue)->putDirectIndex(exec, index, JSValue::decode(encodedValue));
+}
+
+enum class AccessorType {
+ Getter,
+ Setter
+};
+
+static void putAccessorByVal(ExecState* exec, JSObject* base, JSValue subscript, int32_t attribute, JSObject* accessor, AccessorType accessorType)
+{
+ auto propertyKey = subscript.toPropertyKey(exec);
+ if (exec->hadException())
+ return;
+
+ if (accessorType == AccessorType::Getter)
+ base->putGetter(exec, propertyKey, accessor, attribute);
+ else
+ base->putSetter(exec, propertyKey, accessor, attribute);
+}
+
+void JIT_OPERATION operationPutGetterById(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t options, JSCell* getter)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ ASSERT(object && object->isObject());
+ JSObject* baseObj = object->getObject();
+
+ ASSERT(getter->isObject());
+ baseObj->putGetter(exec, uid, getter, options);
+}
+
+void JIT_OPERATION operationPutSetterById(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t options, JSCell* setter)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ ASSERT(object && object->isObject());
+ JSObject* baseObj = object->getObject();
+
+ ASSERT(setter->isObject());
+ baseObj->putSetter(exec, uid, setter, options);
+}
+
+void JIT_OPERATION operationPutGetterByVal(ExecState* exec, JSCell* base, EncodedJSValue encodedSubscript, int32_t attribute, JSCell* getter)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ putAccessorByVal(exec, asObject(base), JSValue::decode(encodedSubscript), attribute, asObject(getter), AccessorType::Getter);
+}
+
+void JIT_OPERATION operationPutSetterByVal(ExecState* exec, JSCell* base, EncodedJSValue encodedSubscript, int32_t attribute, JSCell* setter)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ putAccessorByVal(exec, asObject(base), JSValue::decode(encodedSubscript), attribute, asObject(setter), AccessorType::Setter);
+}
+
+#if USE(JSVALUE64)
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t attribute, EncodedJSValue encodedGetterValue, EncodedJSValue encodedSetterValue)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ ASSERT(object && object->isObject());
+ JSObject* baseObj = asObject(object);
+
+ GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
+
+ JSValue getter = JSValue::decode(encodedGetterValue);
+ JSValue setter = JSValue::decode(encodedSetterValue);
+ ASSERT(getter.isObject() || getter.isUndefined());
+ ASSERT(setter.isObject() || setter.isUndefined());
+ ASSERT(getter.isObject() || setter.isObject());
+
+ if (!getter.isUndefined())
+ accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter));
+ if (!setter.isUndefined())
+ accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter));
+ baseObj->putDirectAccessor(exec, uid, accessor, attribute);
+}
+
+#else
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t attribute, JSCell* getter, JSCell* setter)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ ASSERT(object && object->isObject());
+ JSObject* baseObj = asObject(object);
+
+ GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
+
+ ASSERT(!getter || getter->isObject());
+ ASSERT(!setter || setter->isObject());
+ ASSERT(getter || setter);
+
+ if (getter)
+ accessor->setGetter(vm, exec->lexicalGlobalObject(), getter->getObject());
+ if (setter)
+ accessor->setSetter(vm, exec->lexicalGlobalObject(), setter->getObject());
+ baseObj->putDirectAccessor(exec, uid, accessor, attribute);
+}
+#endif
+
+void JIT_OPERATION operationPopScope(ExecState* exec, int32_t scopeReg)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSScope* scope = exec->uncheckedR(scopeReg).Register::scope();
+ exec->uncheckedR(scopeReg) = scope->next();
+}
+
+void JIT_OPERATION operationProfileDidCall(ExecState* exec, EncodedJSValue encodedValue)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
+ profiler->didExecute(exec, JSValue::decode(encodedValue));
+}
+
+void JIT_OPERATION operationProfileWillCall(ExecState* exec, EncodedJSValue encodedValue)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
+ profiler->willExecute(exec, JSValue::decode(encodedValue));
+}
+
+int32_t JIT_OPERATION operationInstanceOfCustom(ExecState* exec, EncodedJSValue encodedValue, JSObject* constructor, EncodedJSValue encodedHasInstance)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue hasInstanceValue = JSValue::decode(encodedHasInstance);
+
+ ASSERT(hasInstanceValue != exec->lexicalGlobalObject()->functionProtoHasInstanceSymbolFunction() || !constructor->structure()->typeInfo().implementsDefaultHasInstance());
+
+ if (constructor->hasInstance(exec, value, hasInstanceValue))
+ return 1;
+ return 0;
+}
+
+}
+
+static bool canAccessArgumentIndexQuickly(JSObject& object, uint32_t index)
+{
+ switch (object.structure()->typeInfo().type()) {
+ case DirectArgumentsType: {
+ DirectArguments* directArguments = jsCast<DirectArguments*>(&object);
+ if (directArguments->canAccessArgumentIndexQuicklyInDFG(index))
+ return true;
+ break;
+ }
+ case ScopedArgumentsType: {
+ ScopedArguments* scopedArguments = jsCast<ScopedArguments*>(&object);
+ if (scopedArguments->canAccessArgumentIndexQuicklyInDFG(index))
+ return true;
+ break;
+ }
+ default:
+ break;
+ }
+ return false;
+}
+
+static JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+{
+ if (LIKELY(baseValue.isCell() && subscript.isString())) {
+ VM& vm = exec->vm();
+ Structure& structure = *baseValue.asCell()->structure(vm);
+ if (JSCell::canUseFastGetOwnProperty(structure)) {
+ if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get())) {
+ ASSERT(exec->bytecodeOffset());
+ if (byValInfo->stubInfo && byValInfo->cachedId.impl() != existingAtomicString)
+ byValInfo->tookSlowPath = true;
+ return result;
+ }
+ }
+ }
+ }
+
+ if (subscript.isUInt32()) {
+ ASSERT(exec->bytecodeOffset());
+ byValInfo->tookSlowPath = true;
+
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(baseValue)) {
+ if (asString(baseValue)->canGetIndex(i)) {
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(operationGetByValString));
+ return asString(baseValue)->getIndex(exec, i);
+ }
+ byValInfo->arrayProfile->setOutOfBounds();
+ } else if (baseValue.isObject()) {
+ JSObject* object = asObject(baseValue);
+ if (object->canGetIndexQuickly(i))
+ return object->getIndexQuickly(i);
+
+ if (!canAccessArgumentIndexQuickly(*object, i)) {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ }
+ }
+
+ return baseValue.get(exec, i);
+ }
+
+ baseValue.requireObjectCoercible(exec);
+ if (exec->hadException())
+ return jsUndefined();
+ auto property = subscript.toPropertyKey(exec);
+ if (exec->hadException())
+ return jsUndefined();
+
+ ASSERT(exec->bytecodeOffset());
+ if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
+ byValInfo->tookSlowPath = true;
+
+ return baseValue.get(exec, property);
+}
+
+static OptimizationResult tryGetByValOptimize(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+{
+ // See if it's worth optimizing this at all.
+ OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
+
+ VM& vm = exec->vm();
+
+ if (baseValue.isObject() && subscript.isInt32()) {
+ JSObject* object = asObject(baseValue);
+
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure(vm))) {
+ // Attempt to optimize.
+ Structure* structure = object->structure(vm);
+ JITArrayMode arrayMode = jitArrayModeForStructure(structure);
+ if (arrayMode != byValInfo->arrayMode) {
+ // If we reached this case, we got an interesting array mode we did not expect when we compiled.
+ // Let's update the profile to do better next time.
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+ byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
+
+ JIT::compileGetByVal(&vm, exec->codeBlock(), byValInfo, returnAddress, arrayMode);
+ optimizationResult = OptimizationResult::Optimized;
+ }
+ }
+
+ // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
+ if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ if (baseValue.isObject() && isStringOrSymbol(subscript)) {
+ const Identifier propertyName = subscript.toPropertyKey(exec);
+ if (!subscript.isString() || !parseIndex(propertyName)) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+ if (byValInfo->seen) {
+ if (byValInfo->cachedId == propertyName) {
+ JIT::compileGetByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, propertyName);
+ optimizationResult = OptimizationResult::Optimized;
+ } else {
+ // Seem like a generic property access site.
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+ } else {
+ byValInfo->seen = true;
+ byValInfo->cachedId = propertyName;
+ optimizationResult = OptimizationResult::SeenOnce;
+ }
+
+ }
+ }
+
+ if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. For cases where we see non-index-intercepting
+ // objects, this gives 10 iterations worth of opportunity for us to observe
+ // that the get_by_val may be polymorphic. We count up slowPathCount even if
+ // the result is GiveUp.
+ if (++byValInfo->slowPathCount >= 10)
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ return optimizationResult;
+}
+
+extern "C" {
+
+EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+
+ JSValue result = getByVal(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS));
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ ReturnAddressPtr returnAddress = ReturnAddressPtr(OUR_RETURN_ADDRESS);
+ if (tryGetByValOptimize(exec, baseValue, subscript, byValInfo, returnAddress) == OptimizationResult::GiveUp) {
+ // Don't ever try to optimize.
+ byValInfo->tookSlowPath = true;
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(operationGetByValGeneric));
+ }
+
+ return JSValue::encode(getByVal(exec, baseValue, subscript, byValInfo, returnAddress));
+}
+
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+
+ ASSERT(baseValue.isObject());
+ ASSERT(subscript.isUInt32());
+
+ JSObject* object = asObject(baseValue);
+ bool didOptimize = false;
+
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure(vm))) {
+ // Attempt to optimize.
+ JITArrayMode arrayMode = jitArrayModeForStructure(object->structure(vm));
+ if (arrayMode != byValInfo->arrayMode) {
+ JIT::compileHasIndexedProperty(&vm, exec->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
+ didOptimize = true;
+ }
+ }
+
+ if (!didOptimize) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. Or, if we failed to patch and we have some object
+ // that intercepts indexed get, then don't even wait until 10 times. For cases
+ // where we see non-index-intercepting objects, this gives 10 iterations worth of
+ // opportunity for us to observe that the get_by_val may be polymorphic.
+ if (++byValInfo->slowPathCount >= 10
+ || object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
+ // Don't ever try to optimize.
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationHasIndexedPropertyGeneric));
+ }
+ }
+
+ uint32_t index = subscript.asUInt32();
+ if (object->canGetIndexQuickly(index))
+ return JSValue::encode(JSValue(JSValue::JSTrue));
+
+ if (!canAccessArgumentIndexQuickly(*object, index)) {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ }
+ return JSValue::encode(jsBoolean(object->hasProperty(exec, index)));
+}
+
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+
+ ASSERT(baseValue.isObject());
+ ASSERT(subscript.isUInt32());
+
+ JSObject* object = asObject(baseValue);
+ uint32_t index = subscript.asUInt32();
+ if (object->canGetIndexQuickly(index))
+ return JSValue::encode(JSValue(JSValue::JSTrue));
+
+ if (!canAccessArgumentIndexQuickly(*object, index)) {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ }
+ return JSValue::encode(jsBoolean(object->hasProperty(exec, subscript.asUInt32())));
+}
+
+EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+
+ JSValue result;
+ if (LIKELY(subscript.isUInt32())) {
+ uint32_t i = subscript.asUInt32();
+ if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i))
+ result = asString(baseValue)->getIndex(exec, i);
+ else {
+ result = baseValue.get(exec, i);
+ if (!isJSString(baseValue)) {
+ ASSERT(exec->bytecodeOffset());
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(byValInfo->stubRoutine ? operationGetByValGeneric : operationGetByValOptimize));
+ }
+ }
+ } else {
+ baseValue.requireObjectCoercible(exec);
+ if (exec->hadException())
+ return JSValue::encode(jsUndefined());
+ auto property = subscript.toPropertyKey(exec);
+ if (exec->hadException())
+ return JSValue::encode(jsUndefined());
+ result = baseValue.get(exec, property);
+ }
+
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue encodedBase, const Identifier* identifier)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSObject* baseObj = JSValue::decode(encodedBase).toObject(exec);
+ bool couldDelete = baseObj->methodTable(vm)->deleteProperty(baseObj, exec, *identifier);
+ JSValue result = jsBoolean(couldDelete);
+ if (!couldDelete && exec->codeBlock()->isStrictMode())
+ vm.throwException(exec, createTypeError(exec, ASCIILiteral("Unable to delete property.")));
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedProto)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue proto = JSValue::decode(encodedProto);
+
+ ASSERT(!value.isObject() || !proto.isObject());
+
+ bool result = JSObject::defaultHasInstance(exec, value, proto);
+ return JSValue::encode(jsBoolean(result));
+}
+
+int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSStack* stack = &exec->interpreter()->stack();
+ JSValue arguments = JSValue::decode(encodedArguments);
+ return sizeFrameForVarargs(exec, stack, arguments, numUsedStackSlots, firstVarArgOffset);
+}
+
+CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue encodedArguments, int32_t firstVarArgOffset, int32_t length)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue arguments = JSValue::decode(encodedArguments);
+ setupVarargsFrame(exec, newCallFrame, arguments, firstVarArgOffset, length);
+ return newCallFrame;
+}
+
+EncodedJSValue JIT_OPERATION operationToObject(ExecState* exec, EncodedJSValue value)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ return JSValue::encode(JSValue::decode(value).toObject(exec));
+}
+
+char* JIT_OPERATION operationSwitchCharWithUnknownKeyType(ExecState* exec, EncodedJSValue encodedKey, size_t tableIndex)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue key = JSValue::decode(encodedKey);
+ CodeBlock* codeBlock = exec->codeBlock();
+
+ SimpleJumpTable& jumpTable = codeBlock->switchJumpTable(tableIndex);
+ void* result = jumpTable.ctiDefault.executableAddress();
+
+ if (key.isString()) {
+ StringImpl* value = asString(key)->value(exec).impl();
+ if (value->length() == 1)
+ result = jumpTable.ctiForValue((*value)[0]).executableAddress();
+ }
+
+ return reinterpret_cast<char*>(result);
+}
+
+char* JIT_OPERATION operationSwitchImmWithUnknownKeyType(ExecState* exec, EncodedJSValue encodedKey, size_t tableIndex)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue key = JSValue::decode(encodedKey);
+ CodeBlock* codeBlock = exec->codeBlock();
+
+ SimpleJumpTable& jumpTable = codeBlock->switchJumpTable(tableIndex);
+ void* result;
+ if (key.isInt32())
+ result = jumpTable.ctiForValue(key.asInt32()).executableAddress();
+ else if (key.isDouble() && key.asDouble() == static_cast<int32_t>(key.asDouble()))
+ result = jumpTable.ctiForValue(static_cast<int32_t>(key.asDouble())).executableAddress();
+ else
+ result = jumpTable.ctiDefault.executableAddress();
+ return reinterpret_cast<char*>(result);
+}
+
+char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState* exec, EncodedJSValue encodedKey, size_t tableIndex)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue key = JSValue::decode(encodedKey);
+ CodeBlock* codeBlock = exec->codeBlock();
+
+ void* result;
+ StringJumpTable& jumpTable = codeBlock->stringSwitchJumpTable(tableIndex);
+
+ if (key.isString()) {
+ StringImpl* value = asString(key)->value(exec).impl();
+ result = jumpTable.ctiForValue(value).executableAddress();
+ } else
+ result = jumpTable.ctiDefault.executableAddress();
+
+ return reinterpret_cast<char*>(result);
+}
+
+EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* bytecodePC)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ CodeBlock* codeBlock = exec->codeBlock();
+ Instruction* pc = bytecodePC;
+
+ const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
+ JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[2].u.operand).jsValue());
+ GetPutInfo getPutInfo(pc[4].u.operand);
+
+ // ModuleVar is always converted to ClosureVar for get_from_scope.
+ ASSERT(getPutInfo.resolveType() != ModuleVar);
+
+ PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
+ if (!scope->getPropertySlot(exec, ident, slot)) {
+ if (getPutInfo.resolveMode() == ThrowIfNotFound)
+ vm.throwException(exec, createUndefinedVariableError(exec, ident));
+ return JSValue::encode(jsUndefined());
+ }
+
+ JSValue result = JSValue();
+ if (jsDynamicCast<JSGlobalLexicalEnvironment*>(scope)) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ result = slot.getValue(exec, ident);
+ if (result == jsTDZValue()) {
+ exec->vm().throwException(exec, createTDZError(exec));
+ return JSValue::encode(jsUndefined());
+ }
+ }
+
+ CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident);
+
+ if (!result)
+ result = slot.getValue(exec, ident);
+ return JSValue::encode(result);
+}
+
+void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ Instruction* pc = bytecodePC;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
+ JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[1].u.operand).jsValue());
+ JSValue value = exec->r(pc[3].u.operand).jsValue();
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
+
+ // ModuleVar does not keep the scope register value alive in DFG.
+ ASSERT(getPutInfo.resolveType() != ModuleVar);
+
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope);
+ environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value);
+ if (WatchpointSet* set = pc[5].u.watchpointSet)
+ set->touch("Executed op_put_scope<LocalClosureVar>");
+ return;
+ }
+
+ bool hasProperty = scope->hasProperty(exec, ident);
+ if (hasProperty
+ && jsDynamicCast<JSGlobalLexicalEnvironment*>(scope)
+ && getPutInfo.initializationMode() != Initialization) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
+ JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot);
+ if (slot.getValue(exec, ident) == jsTDZValue()) {
+ exec->vm().throwException(exec, createTDZError(exec));
+ return;
+ }
+ }
+
+ if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) {
+ exec->vm().throwException(exec, createUndefinedVariableError(exec, ident));
+ return;
+ }
+
+ PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, getPutInfo.initializationMode() == Initialization);
+ scope->methodTable()->put(scope, exec, ident, value, slot);
+
+ if (exec->vm().exception())
+ return;
+
+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident);
+}
+
+void JIT_OPERATION operationThrow(ExecState* exec, EncodedJSValue encodedExceptionValue)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue exceptionValue = JSValue::decode(encodedExceptionValue);
+ vm->throwException(exec, exceptionValue);
+
+ // Results stored out-of-band in vm.targetMachinePCForThrow & vm.callFrameForCatch
+ genericUnwind(vm, exec);
+}
+
+void JIT_OPERATION operationFlushWriteBarrierBuffer(ExecState* exec, JSCell* cell)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ vm->heap.flushWriteBarrierBuffer(cell);
+}
+
+void JIT_OPERATION operationOSRWriteBarrier(ExecState* exec, JSCell* cell)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ vm->heap.writeBarrier(cell);
+}
+
+// NB: We don't include the value as part of the barrier because the write barrier elision
+// phase in the DFG only tracks whether the object being stored to has been barriered. It
+// would be much more complicated to try to model the value being stored as well.
+void JIT_OPERATION operationUnconditionalWriteBarrier(ExecState* exec, JSCell* cell)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ vm->heap.writeBarrier(cell);
+}
+
+void JIT_OPERATION operationInitGlobalConst(ExecState* exec, Instruction* pc)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue value = exec->r(pc[2].u.operand).jsValue();
+ pc[1].u.variablePointer->set(*vm, exec->codeBlock()->globalObject(), value);
+}
+
+void JIT_OPERATION lookupExceptionHandler(VM* vm, ExecState* exec)
+{
+ NativeCallFrameTracer tracer(vm, exec);
+ genericUnwind(vm, exec);
+ ASSERT(vm->targetMachinePCForThrow);
+}
+
+void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM* vm, ExecState* exec)
+{
+ NativeCallFrameTracer tracer(vm, exec);
+ genericUnwind(vm, exec, UnwindFromCallerFrame);
+ ASSERT(vm->targetMachinePCForThrow);
+}
+
+void JIT_OPERATION operationVMHandleException(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ genericUnwind(vm, exec);
+}
+
+// This function "should" just take the ExecState*, but doing so would make it more difficult
+// to call from exception check sites. So, unlike all of our other functions, we allow
+// ourselves to play some gnarly ABI tricks just to simplify the calling convention. This is
+// particularly safe here since this is never called on the critical path - it's only for
+// testing.
+void JIT_OPERATION operationExceptionFuzz(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+#if COMPILER(GCC_OR_CLANG)
+ void* returnPC = __builtin_return_address(0);
+ doExceptionFuzzing(exec, "JITOperations", returnPC);
+#endif // COMPILER(GCC_OR_CLANG)
+}
+
+EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState* exec, EncodedJSValue encodedBaseValue, JSCell* propertyName)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ if (baseValue.isUndefinedOrNull())
+ return JSValue::encode(jsBoolean(false));
+
+ JSObject* base = baseValue.toObject(exec);
+ return JSValue::encode(jsBoolean(base->hasProperty(exec, asString(propertyName)->toIdentifier(exec))));
+}
+
+EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState* exec, JSCell* baseCell, int32_t subscript)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSObject* object = baseCell->toObject(exec, exec->lexicalGlobalObject());
+ return JSValue::encode(jsBoolean(object->hasProperty(exec, subscript)));
+}
+
+JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState* exec, JSCell* cell)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSObject* base = cell->toObject(exec, exec->lexicalGlobalObject());
+
+ return propertyNameEnumerator(exec, base);
+}
+
+EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState* exec, JSCell* enumeratorCell, int32_t index)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(enumeratorCell);
+ JSString* propertyName = enumerator->propertyNameAtIndex(index);
+ return JSValue::encode(propertyName ? propertyName : jsNull());
+}
+
+JSCell* JIT_OPERATION operationToIndexString(ExecState* exec, int32_t index)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ return jsString(exec, Identifier::from(exec, index).string());
+}
+
+void JIT_OPERATION operationProcessTypeProfilerLog(ExecState* exec)
+{
+ exec->vm().typeProfilerLog()->processLogEntries(ASCIILiteral("Log Full, called from inside baseline JIT"));
+}
+
+int32_t JIT_OPERATION operationCheckIfExceptionIsUncatchableAndNotifyProfiler(ExecState* exec)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ RELEASE_ASSERT(!!vm.exception());
+
+ if (LegacyProfiler* profiler = vm.enabledProfiler())
+ profiler->exceptionUnwind(exec);
+
+ if (isTerminatedExecutionException(vm.exception())) {
+ genericUnwind(&vm, exec);
+ return 1;
+ } else
+ return 0;
+}
+
+} // extern "C"
+
+// Note: getHostCallReturnValueWithExecState() needs to be placed before the
+// definition of getHostCallReturnValue() below because the Windows build
+// requires it.
+extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState* exec)
+{
+ if (!exec)
+ return JSValue::encode(JSValue());
+ return JSValue::encode(exec->vm().hostCallReturnValue);
+}
+
+#if COMPILER(GCC_OR_CLANG) && CPU(X86_64)
+asm (
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "lea -8(%rsp), %rdi\n"
+ "jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+);
+
+#elif COMPILER(GCC_OR_CLANG) && CPU(X86)
+asm (
+".text" "\n" \
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "push %ebp\n"
+ "mov %esp, %eax\n"
+ "leal -4(%esp), %esp\n"
+ "push %eax\n"
+ "call " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+ "leal 8(%esp), %esp\n"
+ "pop %ebp\n"
+ "ret\n"
+);
+
+#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_THUMB2)
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "sub r0, sp, #8" "\n"
+ "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+);
+
+#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_TRADITIONAL)
+asm (
+".text" "\n"
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+INLINE_ARM_FUNCTION(getHostCallReturnValue)
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "sub r0, sp, #8" "\n"
+ "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+);
+
+#elif CPU(ARM64)
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "sub x0, sp, #16" "\n"
+ "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+);
+
+#elif COMPILER(GCC_OR_CLANG) && CPU(MIPS)
+
+#if WTF_MIPS_PIC
+#define LOAD_FUNCTION_TO_T9(function) \
+ ".set noreorder" "\n" \
+ ".cpload $25" "\n" \
+ ".set reorder" "\n" \
+ "la $t9, " LOCAL_REFERENCE(function) "\n"
+#else
+#define LOAD_FUNCTION_TO_T9(function) "" "\n"
+#endif
+
+asm (
+".text" "\n"
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ LOAD_FUNCTION_TO_T9(getHostCallReturnValueWithExecState)
+ "addi $a0, $sp, -8" "\n"
+ "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+);
+
+#elif COMPILER(GCC_OR_CLANG) && CPU(SH4)
+
+#define SH4_SCRATCH_REGISTER "r11"
+
+asm (
+".text" "\n"
+".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
+HIDE_SYMBOL(getHostCallReturnValue) "\n"
+SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
+ "mov r15, r4" "\n"
+ "add -8, r4" "\n"
+ "mov.l 2f, " SH4_SCRATCH_REGISTER "\n"
+ "braf " SH4_SCRATCH_REGISTER "\n"
+ "nop" "\n"
+ "1: .balign 4" "\n"
+ "2: .long " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "-1b\n"
+);
+
+#elif COMPILER(MSVC) && CPU(X86)
+extern "C" {
+ __declspec(naked) EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue()
+ {
+ __asm lea eax, [esp - 4]
+ __asm mov [esp + 4], eax;
+ __asm jmp getHostCallReturnValueWithExecState
+ }
+}
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITOperations.h b/Source/JavaScriptCore/jit/JITOperations.h
new file mode 100644
index 000000000..cacfbcb20
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITOperations.h
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITOperations_h
+#define JITOperations_h
+
+#if ENABLE(JIT)
+
+#include "MacroAssemblerCodeRef.h"
+#include "PropertyOffset.h"
+#include "SlowPathReturnType.h"
+#include "TypedArrayType.h"
+#include <wtf/text/UniquedStringImpl.h>
+
+namespace JSC {
+
+class ArrayAllocationProfile;
+class ArrayProfile;
+class CallLinkInfo;
+class CodeBlock;
+class ExecState;
+class JSArray;
+class JSFunction;
+class JSLexicalEnvironment;
+class JSScope;
+class Register;
+class StructureStubInfo;
+class SymbolTable;
+class WatchpointSet;
+
+struct ByValInfo;
+struct InlineCallFrame;
+
+typedef ExecState CallFrame;
+
+#if CALLING_CONVENTION_IS_STDCALL
+#define JIT_OPERATION CDECL
+#else
+#define JIT_OPERATION
+#endif
+
+extern "C" {
+
+typedef char* UnusedPtr;
+
+// These typedefs provide typechecking when generating calls out to helper routines;
+// this helps prevent calling a helper routine with the wrong arguments!
+/*
+ Key:
+ A: JSArray*
+ Aap: ArrayAllocationProfile*
+ Ap: ArrayProfile*
+ By: ByValInfo*
+ C: JSCell*
+ Cb: CodeBlock*
+ Cli: CallLinkInfo*
+ D: double
+ E: ExecState*
+ F: CallFrame*
+ G: JSGlobalObject*
+ I: UniquedStringImpl*
+ Icf: InlineCallFrame*
+ Idc: const Identifier*
+ J: EncodedJSValue
+ Jcp: const JSValue*
+ Jsc: JSScope*
+ Jsf: JSFunction*
+ Jss: JSString*
+ L: JSLexicalEnvironment*
+ O: JSObject*
+ P: pointer (char*)
+ Pc: Instruction* i.e. bytecode PC
+ Q: int64_t
+ R: Register
+ S: size_t
+ Sprt: SlowPathReturnType
+ Ssi: StructureStubInfo*
+ St: Structure*
+ Symtab: SymbolTable*
+ T: StringImpl*
+ V: void
+ Vm: VM*
+ Ws: WatchpointSet*
+ Z: int32_t
+ Ui: uint32_t
+*/
+
+typedef CallFrame* JIT_OPERATION (*F_JITOperation_EFJZZ)(ExecState*, CallFrame*, EncodedJSValue, int32_t, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_E)(ExecState*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EA)(ExecState*, JSArray*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAZ)(ExecState*, JSArray*, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJ)(ExecState*, ArrayAllocationProfile*, EncodedJSValue);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJcpZ)(ExecState*, ArrayAllocationProfile*, const JSValue*, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EC)(ExecState*, JSCell*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECI)(ExecState*, JSCell*, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EDA)(ExecState*, double, JSArray*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EE)(ExecState*, ExecState*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EI)(ExecState*, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJC)(ExecState*, EncodedJSValue, JSCell*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJI)(ExecState*, EncodedJSValue, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJIdc)(ExecState*, EncodedJSValue, const Identifier*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, ArrayProfile*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, ByValInfo*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJssZ)(ExecState*, JSString*, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJP)(ExecState*, EncodedJSValue, void*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EP)(ExecState*, void*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPP)(ExecState*, void*, void*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPS)(ExecState*, void*, size_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPc)(ExecState*, Instruction*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJscCJ)(ExecState*, JSScope*, JSCell*, EncodedJSValue);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESS)(ExecState*, size_t, size_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZ)(ExecState*, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZIcfZ)(ExecState*, int32_t, InlineCallFrame*, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_E)(ExecState*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EC)(ExecState*, JSCell*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_ECZC)(ExecState*, JSCell*, int32_t, JSCell*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EIcf)(ExecState*, InlineCallFrame*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJsc)(ExecState*, JSScope*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJZC)(ExecState*, EncodedJSValue, int32_t, JSCell*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJJC)(ExecState*, EncodedJSValue, EncodedJSValue, JSCell*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJscZ)(ExecState*, JSScope*, int32_t);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssSt)(ExecState*, JSString*, Structure*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJss)(ExecState*, JSString*, JSString*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJssJss)(ExecState*, JSString*, JSString*, JSString*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EL)(ExecState*, JSLexicalEnvironment*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EO)(ExecState*, JSObject*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EOZ)(ExecState*, JSObject*, int32_t);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_ESt)(ExecState*, Structure*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EStJscSymtabJ)(ExecState*, Structure*, JSScope*, SymbolTable*, EncodedJSValue);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EStRZJsfL)(ExecState*, Structure*, Register*, int32_t, JSFunction*, JSLexicalEnvironment*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EStRZJsf)(ExecState*, Structure*, Register*, int32_t, JSFunction*);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EStZZ)(ExecState*, Structure*, int32_t, int32_t);
+typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t);
+typedef double JIT_OPERATION (*D_JITOperation_D)(double);
+typedef double JIT_OPERATION (*D_JITOperation_G)(JSGlobalObject*);
+typedef double JIT_OPERATION (*D_JITOperation_DD)(double, double);
+typedef double JIT_OPERATION (*D_JITOperation_ZZ)(int32_t, int32_t);
+typedef double JIT_OPERATION (*D_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef int64_t JIT_OPERATION(*Q_JITOperation_J)(EncodedJSValue);
+typedef int64_t JIT_OPERATION(*Q_JITOperation_D)(double);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_D)(double);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_E)(ExecState*);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_EC)(ExecState*, JSCell*);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_ESJss)(ExecState*, size_t, JSString*);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_EJOJ)(ExecState*, EncodedJSValue, JSObject*, EncodedJSValue);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef int32_t JIT_OPERATION (*Z_JITOperation_EJZZ)(ExecState*, EncodedJSValue, int32_t, int32_t);
+typedef size_t JIT_OPERATION (*S_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef size_t JIT_OPERATION (*S_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
+typedef size_t JIT_OPERATION (*S_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef size_t JIT_OPERATION (*S_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef size_t JIT_OPERATION (*S_JITOperation_EOJss)(ExecState*, JSObject*, JSString*);
+typedef size_t JIT_OPERATION (*S_JITOperation_J)(EncodedJSValue);
+typedef SlowPathReturnType JIT_OPERATION (*Sprt_JITOperation_EZ)(ExecState*, int32_t);
+typedef void JIT_OPERATION (*V_JITOperation)();
+typedef void JIT_OPERATION (*V_JITOperation_E)(ExecState*);
+typedef void JIT_OPERATION (*V_JITOperation_EC)(ExecState*, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_ECb)(ExecState*, CodeBlock*);
+typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*);
+typedef void JIT_OPERATION (*V_JITOperation_ECIZC)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_ECIZCC)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, JSCell*, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_ECIZJJ)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, EncodedJSValue, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_ECJZC)(ExecState*, JSCell*, EncodedJSValue, int32_t, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_ECCIcf)(ExecState*, JSCell*, JSCell*, InlineCallFrame*);
+typedef void JIT_OPERATION (*V_JITOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_ECPSPS)(ExecState*, JSCell*, void*, size_t, void*, size_t);
+typedef void JIT_OPERATION (*V_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
+typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, UniquedStringImpl*);
+typedef void JIT_OPERATION (*V_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EJJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ArrayProfile*);
+typedef void JIT_OPERATION (*V_JITOperation_EJJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*);
+typedef void JIT_OPERATION (*V_JITOperation_EJPP)(ExecState*, EncodedJSValue, void*, void*);
+typedef void JIT_OPERATION (*V_JITOperation_EJZJ)(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef void JIT_OPERATION (*V_JITOperation_EOZD)(ExecState*, JSObject*, int32_t, double);
+typedef void JIT_OPERATION (*V_JITOperation_EOZJ)(ExecState*, JSObject*, int32_t, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EPc)(ExecState*, Instruction*);
+typedef void JIT_OPERATION (*V_JITOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*);
+typedef void JIT_OPERATION (*V_JITOperation_EWs)(ExecState*, WatchpointSet*);
+typedef void JIT_OPERATION (*V_JITOperation_EZ)(ExecState*, int32_t);
+typedef void JIT_OPERATION (*V_JITOperation_EZJ)(ExecState*, int32_t, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EZJZZZ)(ExecState*, int32_t, EncodedJSValue, int32_t, int32_t, int32_t);
+typedef void JIT_OPERATION (*V_JITOperation_EVm)(ExecState*, VM*);
+typedef void JIT_OPERATION (*V_JITOperation_J)(EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_Z)(int32_t);
+typedef void JIT_OPERATION (*V_JITOperation_ECRUiUi)(ExecState*, JSCell*, Register*, uint32_t, uint32_t);
+typedef char* JIT_OPERATION (*P_JITOperation_E)(ExecState*);
+typedef char* JIT_OPERATION (*P_JITOperation_EC)(ExecState*, JSCell*);
+typedef char* JIT_OPERATION (*P_JITOperation_ECli)(ExecState*, CallLinkInfo*);
+typedef char* JIT_OPERATION (*P_JITOperation_EJS)(ExecState*, EncodedJSValue, size_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EO)(ExecState*, JSObject*);
+typedef char* JIT_OPERATION (*P_JITOperation_EOS)(ExecState*, JSObject*, size_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EOZ)(ExecState*, JSObject*, int32_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EPS)(ExecState*, void*, size_t);
+typedef char* JIT_OPERATION (*P_JITOperation_ES)(ExecState*, size_t);
+typedef char* JIT_OPERATION (*P_JITOperation_ESJss)(ExecState*, size_t, JSString*);
+typedef char* JIT_OPERATION (*P_JITOperation_ESt)(ExecState*, Structure*);
+typedef char* JIT_OPERATION (*P_JITOperation_EStJ)(ExecState*, Structure*, EncodedJSValue);
+typedef char* JIT_OPERATION (*P_JITOperation_EStPS)(ExecState*, Structure*, void*, size_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EStSS)(ExecState*, Structure*, size_t, size_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
+typedef SlowPathReturnType JIT_OPERATION (*Sprt_JITOperation_ECli)(ExecState*, CallLinkInfo*);
+typedef StringImpl* JIT_OPERATION (*T_JITOperation_EJss)(ExecState*, JSString*);
+typedef JSString* JIT_OPERATION (*Jss_JITOperation_EZ)(ExecState*, int32_t);
+typedef JSString* JIT_OPERATION (*Jss_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+
+// This method is used to lookup an exception hander, keyed by faultLocation, which is
+// the return location from one of the calls out to one of the helper operations above.
+
+void JIT_OPERATION lookupExceptionHandler(VM*, ExecState*) WTF_INTERNAL;
+void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM*, ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationVMHandleException(ExecState*) WTF_INTERNAL;
+
+void JIT_OPERATION operationThrowStackOverflowError(ExecState*, CodeBlock*) WTF_INTERNAL;
+#if ENABLE(WEBASSEMBLY)
+void JIT_OPERATION operationThrowDivideError(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationThrowOutOfBoundsAccessError(ExecState*) WTF_INTERNAL;
+#endif
+int32_t JIT_OPERATION operationCallArityCheck(ExecState*) WTF_INTERNAL;
+int32_t JIT_OPERATION operationConstructArityCheck(ExecState*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState*, JSObject*, Structure*, PropertyOffset, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPutByValOptimize(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+void JIT_OPERATION operationDirectPutByValOptimize(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationCallEval(ExecState*, ExecState*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+
+size_t JIT_OPERATION operationCompareLess(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareGreater(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareGreaterEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationConvertJSValueToBoolean(ExecState*, EncodedJSValue) WTF_INTERNAL;
+size_t JIT_OPERATION operationCompareEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+#if USE(JSVALUE64)
+EncodedJSValue JIT_OPERATION operationCompareStringEq(ExecState*, JSCell* left, JSCell* right) WTF_INTERNAL;
+#else
+size_t JIT_OPERATION operationCompareStringEq(ExecState*, JSCell* left, JSCell* right) WTF_INTERNAL;
+#endif
+size_t JIT_OPERATION operationHasProperty(ExecState*, JSObject*, JSString*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewArrayWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewArrayBufferWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState*, ArrayAllocationProfile*, EncodedJSValue size) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+JSCell* JIT_OPERATION operationNewObject(ExecState*, Structure*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState*, void*) WTF_INTERNAL;
+UnusedPtr JIT_OPERATION operationHandleWatchdogTimer(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationThrowStaticError(ExecState*, EncodedJSValue, int32_t) WTF_INTERNAL;
+void JIT_OPERATION operationThrow(ExecState*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationDebug(ExecState*, int32_t) WTF_INTERNAL;
+#if ENABLE(DFG_JIT)
+SlowPathReturnType JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL;
+#endif
+void JIT_OPERATION operationPutByIndex(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
+void JIT_OPERATION operationPutGetterById(ExecState*, JSCell*, UniquedStringImpl*, int32_t options, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutSetterById(ExecState*, JSCell*, UniquedStringImpl*, int32_t options, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutGetterByVal(ExecState*, JSCell*, EncodedJSValue, int32_t attribute, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutSetterByVal(ExecState*, JSCell*, EncodedJSValue, int32_t attribute, JSCell*) WTF_INTERNAL;
+#if USE(JSVALUE64)
+void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, UniquedStringImpl*, int32_t attribute, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+#else
+void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, UniquedStringImpl*, int32_t attribute, JSCell*, JSCell*) WTF_INTERNAL;
+#endif
+void JIT_OPERATION operationPushFunctionNameScope(ExecState*, int32_t, SymbolTable*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPopScope(ExecState*, int32_t) WTF_INTERNAL;
+void JIT_OPERATION operationProfileDidCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationProfileWillCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValString(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationDeleteById(ExecState*, EncodedJSValue base, const Identifier*) WTF_INTERNAL;
+JSCell* JIT_OPERATION operationGetPNames(ExecState*, JSObject*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState*, EncodedJSValue, EncodedJSValue proto) WTF_INTERNAL;
+int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState*, EncodedJSValue arguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset) WTF_INTERNAL;
+CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState*, CallFrame*, EncodedJSValue arguments, int32_t firstVarArgOffset, int32_t length) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationToObject(ExecState*, EncodedJSValue) WTF_INTERNAL;
+
+char* JIT_OPERATION operationSwitchCharWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
+char* JIT_OPERATION operationSwitchImmWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
+char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;
+void JIT_OPERATION operationPutToScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;
+
+void JIT_OPERATION operationFlushWriteBarrierBuffer(ExecState*, JSCell*);
+void JIT_OPERATION operationWriteBarrier(ExecState*, JSCell*, JSCell*);
+void JIT_OPERATION operationUnconditionalWriteBarrier(ExecState*, JSCell*);
+void JIT_OPERATION operationOSRWriteBarrier(ExecState*, JSCell*);
+
+void JIT_OPERATION operationInitGlobalConst(ExecState*, Instruction*);
+
+void JIT_OPERATION operationExceptionFuzz(ExecState*);
+
+int32_t JIT_OPERATION operationCheckIfExceptionIsUncatchableAndNotifyProfiler(ExecState*);
+int32_t JIT_OPERATION operationInstanceOfCustom(ExecState*, EncodedJSValue encodedValue, JSObject* constructor, EncodedJSValue encodedHasInstance) WTF_INTERNAL;
+
+EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState*, EncodedJSValue, JSCell*);
+EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState*, JSCell*, int32_t);
+JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState*, JSCell*);
+EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState*, JSCell*, int32_t);
+JSCell* JIT_OPERATION operationToIndexString(ExecState*, int32_t);
+
+void JIT_OPERATION operationProcessTypeProfilerLog(ExecState*) WTF_INTERNAL;
+
+} // extern "C"
+
+} // namespace JSC
+
+#else // ENABLE(JIT)
+
+#define JIT_OPERATION
+
+#endif // ENABLE(JIT)
+
+#endif // JITOperations_h
+
diff --git a/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp b/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp
new file mode 100644
index 000000000..544bca394
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64)
+
+#include "CallFrame.h"
+#include "JSCJSValue.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+// FIXME: The following is a workaround that is only needed because JITStubsMSVC64.asm
+// is built unconditionally even when the JIT is disable, and it references this function.
+// We only need to provide a stub to satisfy the linkage. It will never be called.
+extern "C" EncodedJSValue getHostCallReturnValueWithExecState(ExecState*)
+{
+ return JSValue::encode(JSValue());
+}
+
+} // namespace JSC
+
+#endif // !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 2d1b2929d..3781c1df2 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,35 +29,34 @@
#include "JIT.h"
#include "CodeBlock.h"
+#include "DirectArguments.h"
#include "GCAwareJITStubRoutine.h"
#include "GetterSetter.h"
#include "Interpreter.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JSArray.h"
+#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "JSVariableObject.h"
#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "ScopedArguments.h"
+#include "ScopedArgumentsTable.h"
+#include "SlowPathCall.h"
#include <wtf/StringPrintStream.h>
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
namespace JSC {
#if USE(JSVALUE64)
JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
- JSInterfaceJIT jit;
+ JSInterfaceJIT jit(vm);
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
+ failures.append(jit.branchStructure(
+ NotEqual,
+ Address(regT0, JSCell::structureIDOffset()),
+ vm->stringStructure.get()));
// Load string length to regT2, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
@@ -89,31 +88,34 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
-
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
+
emitGetVirtualRegisters(base, regT0, property, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ PatchableJump notIndex = emitPatchableJumpIfNotInt(regT1);
+ addSlowCase(notIndex);
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
// We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
// number was signed since m_vectorLength is always less than intmax (since the total allocation
- // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
- // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign
// extending since it makes it easier to re-tag the value in the slow case.
zeroExtend32ToPtr(regT1, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSite(regT2, regT3, profile);
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
and32(TrustedImm32(IndexingShapeMask), regT2);
PatchableJump badType;
@@ -143,39 +145,41 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
-#if !ASSERT_DISABLED
- Jump resultOK = branchTest64(NonZero, regT0);
- breakpoint();
- resultOK.link(this);
-#endif
+ if (!ASSERT_DISABLED) {
+ Jump resultOK = branchTest64(NonZero, regT0);
+ abortWithReason(JITGetByValResultIsNotEmpty);
+ resultOK.link(this);
+ }
emitValueProfilingSite();
emitPutVirtualRegister(dst);
-
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+
+ Label nextHotPath = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
}
-JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
- moveDoubleTo64(fpRegT0, regT0);
- sub64(tagTypeNumberRegister, regT0);
return slowCases;
}
-JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
load64(BaseIndex(regT2, regT1, TimesEight), regT0);
slowCases.append(branchTest64(Zero, regT0));
@@ -183,7 +187,7 @@ JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType,
return slowCases;
}
-JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
@@ -191,6 +195,7 @@ JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType
badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
@@ -199,18 +204,48 @@ JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType
return slowCases;
}
+JITGetByIdGenerator JIT::emitGetByValWithCachedId(Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
+{
+ // base: regT0
+ // property: regT1
+ // scratch: regT3
+
+ int dst = currentInstruction[1].u.operand;
+
+ slowCases.append(emitJumpIfNotJSCell(regT1));
+ emitIdentifierCheck(regT1, regT3, propertyName, slowCases);
+
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs(regT0), JSValueRegs(regT0));
+ gen.generateFastPath(*this);
+
+ fastDoneCase = jump();
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ slowDoneCase = jump();
+
+ return gen;
+}
+
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // property int32 check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ Jump notString = branchStructure(NotEqual,
+ Address(regT0, JSCell::structureIDOffset()),
+ m_vm->stringStructure.get());
emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
Jump failed = branchTest64(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
@@ -219,21 +254,15 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
notString.link(this);
nonCell.link(this);
- Jump skipProfiling = jump();
-
+ linkSlowCase(iter); // read barrier
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
- emitArrayProfileOutOfBoundsSpecialCase(profile);
-
- skipProfiling.link(this);
-
Label slowPath = label();
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base, regT2);
- stubCall.addArgument(property, regT2);
- Call call = stubCall.call(dst);
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(property, regT1);
+ Call call = callOperation(operationGetByValOptimize, dst, regT0, regT1, byValInfo);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -242,90 +271,20 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
emitValueProfilingSite();
}
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
-{
- ASSERT(sizeof(JSValue) == 8);
-
- if (finalObjectMode == MayBeFinal) {
- Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
- loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
- neg32(offset);
- Jump done = jump();
- isInline.link(this);
- addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
- done.link(this);
- } else {
-#if !ASSERT_DISABLED
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
- breakpoint();
- isOutOfLine.link(this);
-#endif
- loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
- neg32(offset);
- }
- signExtend32ToPtr(offset, offset);
- load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
-}
-
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- unsigned iter = currentInstruction[5].u.operand;
- unsigned i = currentInstruction[6].u.operand;
-
- emitGetVirtualRegister(property, regT0);
- addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
- emitGetVirtualRegisters(base, regT0, iter, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
-
- // Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(TrustedImm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
- add32(TrustedImm32(firstOutOfLineOffset), regT3);
- sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
- inlineProperty.link(this);
- compileGetDirectOffset(regT0, regT0, regT3, regT1);
-
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_get_by_val_generic);
- stubCall.addArgument(base, regT2);
- stubCall.addArgument(property, regT2);
- stubCall.call(dst);
-}
-
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitGetVirtualRegisters(base, regT0, property, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ PatchableJump notIndex = emitPatchableJumpIfNotInt(regT1);
+ addSlowCase(notIndex);
// See comment in op_get_by_val.
zeroExtend32ToPtr(regT1, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSite(regT2, regT3, profile);
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
and32(TrustedImm32(IndexingShapeMask), regT2);
PatchableJump badType;
@@ -355,14 +314,12 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Label done = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
-
- emitWriteBarrier(regT0, regT3, regT1, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
}
JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
{
- unsigned value = currentInstruction[3].u.operand;
+ int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
JumpList slowCases;
@@ -370,17 +327,18 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branchIfNotToSpace(regT2));
Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
Label storeResult = label();
emitGetVirtualRegister(value, regT3);
switch (indexingShape) {
case Int32Shape:
- slowCases.append(emitJumpIfNotImmediateInteger(regT3));
+ slowCases.append(emitJumpIfNotInt(regT3));
store64(regT3, BaseIndex(regT2, regT1, TimesEight));
break;
case DoubleShape: {
- Jump notInt = emitJumpIfNotImmediateInteger(regT3);
+ Jump notInt = emitJumpIfNotInt(regT3);
convertInt32ToDouble(regT3, fpRegT0);
Jump ready = jump();
notInt.link(this);
@@ -393,6 +351,7 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
}
case ContiguousShape:
store64(regT3, BaseIndex(regT2, regT1, TimesEight));
+ emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
break;
default:
CRASH();
@@ -417,13 +376,14 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
{
- unsigned value = currentInstruction[3].u.operand;
+ int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
+ slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
@@ -431,6 +391,7 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
Label storeResult(this);
emitGetVirtualRegister(value, regT3);
store64(regT3, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
+ emitWriteBarrier(currentInstruction[1].u.operand, value, ShouldFilterValue);
Jump end = jump();
empty.link(this);
@@ -448,17 +409,54 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
return slowCases;
}
+JITPutByIdGenerator JIT::emitPutByValWithCachedId(Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
+{
+ // base: regT0
+ // property: regT1
+ // scratch: regT2
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ slowCases.append(emitJumpIfNotJSCell(regT1));
+ emitIdentifierCheck(regT1, regT1, propertyName, slowCases);
+
+ // Write barrier breaks the registers. So after issuing the write barrier,
+ // reload the registers.
+ emitWriteBarrier(base, value, ShouldFilterValue);
+ emitGetVirtualRegisters(base, regT0, value, regT1);
+
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind);
+ gen.generateFastPath(*this);
+ doneCases.append(jump());
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ doneCases.append(jump());
+
+ return gen;
+}
+
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // property int32 check
linkSlowCase(iter); // base not array check
+ linkSlowCase(iter); // read barrier
+ linkSlowCase(iter); // out of bounds
+
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
case JITInt32:
@@ -469,18 +467,13 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
break;
}
- Jump skipProfiling = jump();
- linkSlowCase(iter); // out of bounds
- emitArrayProfileOutOfBoundsSpecialCase(profile);
- skipProfiling.link(this);
-
Label slowPath = label();
- JITStubCall stubPutByValCall(this, cti_op_put_by_val);
- stubPutByValCall.addArgument(regT0);
- stubPutByValCall.addArgument(property, regT2);
- stubPutByValCall.addArgument(value, regT2);
- Call call = stubPutByValCall.call();
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(property, regT1);
+ emitGetVirtualRegister(value, regT2);
+ bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
+ Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -489,114 +482,112 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_put_by_index);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(TrustedImm32(currentInstruction[2].u.operand));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.call();
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+ callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1);
}
-void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_put_getter_setter);
- stubCall.addArgument(currentInstruction[1].u.operand, regT2);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[2].u.operand)));
- stubCall.addArgument(currentInstruction[3].u.operand, regT2);
- stubCall.addArgument(currentInstruction[4].u.operand, regT2);
- stubCall.call();
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ int32_t options = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1);
}
-void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
{
- JITStubCall stubCall(this, cti_op_del_by_id);
- stubCall.addArgument(currentInstruction[2].u.operand, regT2);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(currentInstruction[3].u.operand)));
- stubCall.call(currentInstruction[1].u.operand);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ int32_t options = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1);
}
-void JIT::emit_op_get_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction)
{
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-
- emitGetVirtualRegister(baseVReg, regT0);
- compileGetByIdHotPath(baseVReg, ident);
- emitValueProfilingSite();
- emitPutVirtualRegister(resultVReg);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ int32_t attribute = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ emitGetVirtualRegister(currentInstruction[5].u.operand, regT2);
+ callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), attribute, regT1, regT2);
}
-void JIT::compileGetByIdHotPath(int baseVReg, Identifier* ident)
+void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
-
- emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
-
- if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset);
- }
-
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
- Label hotPathBegin(this);
-
- DataLabelPtr structureToCompare;
- PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- addSlowCase(structureCheck);
-
- ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- DataLabelCompact displacementLabel = load64WithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0);
-
- Label putResult(this);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ int32_t attributes = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
+ callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2);
+}
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ int32_t attributes = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
+ callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2);
+}
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel, putResult));
+void JIT::emit_op_del_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(base, regT0);
+ callOperation(operationDeleteById, dst, regT0, &m_codeBlock->identifier(property));
}
-void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_get_by_id(Instruction* currentInstruction)
{
- unsigned resultVReg = currentInstruction[1].u.operand;
- unsigned baseVReg = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
+ emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT1, m_bytecodeOffset);
+
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs(regT0), JSValueRegs(regT0));
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIds.append(gen);
- compileGetByIdSlowCase(resultVReg, baseVReg, ident, iter);
emitValueProfilingSite();
+ emitPutVirtualRegister(resultVReg);
}
-void JIT::compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
- // so that we only need track one pointer into the slow case code - we track a pointer to the location
- // of the call (which we can use to look up the patch information), but should a array-length or
- // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
- // the distance from the call to the head of the slow case.
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
- Label coldPathBegin(this);
- JITStubCall stubCall(this, cti_op_get_by_id);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- Call call = stubCall.call(resultVReg);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
+ JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
+
+ Label coldPathBegin = label();
+
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
+ gen.reportSlowPathCall(coldPathBegin, call);
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
- unsigned baseVReg = currentInstruction[1].u.operand;
- unsigned valueVReg = currentInstruction[3].u.operand;
+ int baseVReg = currentInstruction[1].u.operand;
+ int valueVReg = currentInstruction[3].u.operand;
+ unsigned direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
+
+ emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase);
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
@@ -604,796 +595,598 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
- // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
- BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- Label hotPathBegin(this);
-
- // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
- DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
-
- ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- DataLabel32 displacementLabel = store64WithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset));
-
- END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
-
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel));
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(),
+ direct ? Direct : NotDirect);
+
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+
+ m_putByIds.append(gen);
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned baseVReg = currentInstruction[1].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
- unsigned direct = currentInstruction[8].u.operand;
+ int baseVReg = currentInstruction[1].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
- JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- stubCall.addArgument(regT1);
- move(regT0, nonArgGPR1);
- Call call = stubCall.call();
+ Label coldPathBegin(this);
+
+ JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
-}
+ Call call = callOperation(
+ gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, ident->impl());
-// Compile a store into an object's property storage. May overwrite the
-// value in objectReg.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
-{
- if (isInlineOffset(cachedOffset)) {
- store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
- return;
- }
-
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
+ gen.reportSlowPathCall(coldPathBegin, call);
}
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
+void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
{
- if (isInlineOffset(cachedOffset)) {
- load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
+ if (!needsVarInjectionChecks)
return;
- }
-
- loadPtr(Address(base, JSObject::butterflyOffset()), result);
- load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
+ addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
}
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
+void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
{
- if (isInlineOffset(cachedOffset)) {
- load64(base->locationForOffset(cachedOffset), result);
- return;
- }
-
- loadPtr(base->butterflyAddress(), result);
- load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
+ emitVarInjectionCheck(needsVarInjectionChecks);
+ emitGetVirtualRegister(scope, regT0);
+ for (unsigned i = 0; i < depth; ++i)
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ emitPutVirtualRegister(dst);
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
{
- move(nonArgGPR1, regT0);
-
- JumpList failureCases;
- // Check eax is an object of the right Structure.
- failureCases.append(emitJumpIfNotJSCell(regT0));
- failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
-
- testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo);
-
- ASSERT(oldStructure->storedPrototype().isNull() || oldStructure->storedPrototype().asCell()->structure() == chain->head()->get());
-
- // ecx = baseObject->m_structure
- if (!direct) {
- for (WriteBarrier<Structure>* it = chain->head(); *it; ++it) {
- ASSERT((*it)->storedPrototype().isNull() || (*it)->storedPrototype().asCell()->structure() == it[1].get());
- testPrototype((*it)->storedPrototype(), failureCases, stubInfo);
+ int dst = currentInstruction[1].u.operand;
+ int scope = currentInstruction[2].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
+ unsigned depth = currentInstruction[5].u.operand;
+
+ auto emitCode = [&] (ResolveType resolveType) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ move(TrustedImmPtr(constantScope), regT0);
+ emitPutVirtualRegister(dst);
+ break;
}
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
+ break;
+ case ModuleVar:
+ move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
+ emitPutVirtualRegister(dst);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case LocalClosureVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
+
+ switch (resolveType) {
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+
+ Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
+ emitCode(GlobalProperty);
+ skipToEnd.append(jump());
+ notGlobalProperty.link(this);
+
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ emitCode(GlobalPropertyWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
+ addSlowCase(jump());
+ skipToEnd.link(this);
+ break;
}
- // If we succeed in all of our checks, and the code was optimizable, then make sure we
- // decrement the rare case counter.
-#if ENABLE(VALUE_PROFILER)
- if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
- sub32(
- TrustedImm32(1),
- AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
- }
-#endif
-
- // emit a call only if storage realloc is needed
- bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
- if (willNeedStorageRealloc) {
- // This trampoline was called to like a JIT stub; before we can can call again we need to
- // remove the return address from the stack, to prevent the stack from becoming misaligned.
- preserveReturnAddressAfterCall(regT3);
-
- JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
- stubCall.skipArgument(); // base
- stubCall.skipArgument(); // ident
- stubCall.skipArgument(); // value
- stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
- stubCall.addArgument(TrustedImmPtr(newStructure));
- stubCall.call(regT0);
- emitGetJITStubArg(2, regT1);
-
- restoreReturnAddressBeforeReturn(regT3);
- }
-
- // Planting the new structure triggers the write barrier so we need
- // an unconditional barrier here.
- emitWriteBarrier(regT0, regT1, regT2, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
-
- ASSERT(newStructure->classInfo() == oldStructure->classInfo());
- storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
- compilePutDirectOffset(regT0, regT1, cachedOffset);
-
- ret();
-
- ASSERT(!failureCases.empty());
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- Call failureCall = tailRecursiveCall();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
-
- if (willNeedStorageRealloc) {
- ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ default:
+ emitCode(resolveType);
+ break;
}
-
- stubInfo->stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline put_by_id transition for %s, return point %p",
- toCString(*m_codeBlock).data(), returnAddress.value())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- willNeedStorageRealloc,
- newStructure);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
}
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
+void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- RepatchBuffer repatchBuffer(codeBlock);
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
+ if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar || resolveType == GlobalLexicalVar || resolveType == ModuleVar)
+ return;
- // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ linkSlowCase(iter); // var injections check for GlobalPropertyWithVarInjectionChecks.
+ linkSlowCase(iter); // var injections check for GlobalLexicalVarWithVarInjectionChecks.
+ }
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
- repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resolve_scope);
+ slowPathCall.call();
}
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
- repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset));
+ emitGetVirtualRegister(scope, regT0);
+ loadPtr(structureSlot, regT1);
+ addSlowCase(branchTestPtr(Zero, regT1));
+ load32(Address(regT1, Structure::structureIDOffset()), regT1);
+ addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1));
}
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg reg)
{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
- // Check eax is an array
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray));
- Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
- load32(Address(regT3, ArrayStorage::lengthOffset()), regT2);
- Jump failureCases3 = branch32(LessThan, regT2, TrustedImm32(0));
-
- emitFastArithIntToImmNoCheck(regT2, regT0);
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
- patchBuffer.link(failureCases3, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- // Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
- ("Basline JIT get_by_id array length stub for %s, return point %p",
- toCString(*m_codeBlock).data(),
- stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+ loadPtr(operand, reg);
}
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg reg)
{
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
-
- bool needsStubLink = false;
-
- // Checks out okay!
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
- Jump success = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
- patchBuffer.link(failureCases1, slowCaseBegin);
- if (failureCases2.isSet())
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
- // Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id proto stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ loadPtr(operand, reg);
+ loadPtr(reg, reg);
}
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
+void JIT::emitGetClosureVar(int scope, uintptr_t operand)
{
- Jump failureCase = checkStructure(regT0, structure);
- bool needsStubLink = false;
- bool isDirect = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(regT0, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else {
- isDirect = true;
- compileGetDirectOffset(regT0, regT0, cachedOffset);
- }
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine));
- if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
-
- patchBuffer.link(failureCase, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- RefPtr<JITStubRoutine> stubCode = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id list stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
-
- polymorphicStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code()));
+ emitGetVirtualRegister(scope, regT0);
+ loadPtr(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)), regT0);
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
+void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
{
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
-
- // Checks out okay!
- bool needsStubLink = false;
- bool isDirect = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else {
- isDirect = true;
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
- }
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ int dst = currentInstruction[1].u.operand;
+ int scope = currentInstruction[2].u.operand;
+ ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
+ Structure** structureSlot = currentInstruction[5].u.structure.slot();
+ uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ GPRReg base = regT0;
+ GPRReg result = regT0;
+ GPRReg offset = regT1;
+ GPRReg scratch = regT2;
+
+ load32(operandSlot, offset);
+ if (!ASSERT_DISABLED) {
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+ abortWithReason(JITOffsetIsNotOutOfLine);
+ isOutOfLine.link(this);
+ }
+ loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+ addSlowCase(branchIfNotToSpace(scratch));
+ neg32(offset);
+ signExtend32ToPtr(offset, offset);
+ load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
+ break;
}
- }
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
- patchBuffer.link(failureCases1, lastProtoBegin);
- if (failureCases2.isSet())
- patchBuffer.link(failureCases2, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- RefPtr<JITStubRoutine> stubCode = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id proto list stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
- prototypeStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code()));
-}
-
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
-{
- ASSERT(count);
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- Jump baseObjectCheck = checkStructure(regT0, structure);
- bucketsOfFail.append(baseObjectCheck);
-
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail, stubInfo);
- }
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- bool isDirect = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else {
- isDirect = true;
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
- }
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
+ if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
+ addSlowCase(branchTest64(Zero, regT0));
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetClosureVar(scope, *operandSlot);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case LocalClosureVar:
+ case ModuleVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
}
+ };
+
+ switch (resolveType) {
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
+ addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
}
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
-
- patchBuffer.link(bucketsOfFail, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id chain list stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
-
- // Track the stub we have created so that it will be deleted later.
- prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
+ default:
+ emitCode(resolveType, false);
+ break;
+ }
+ emitPutVirtualRegister(dst);
+ emitValueProfilingSite();
}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ASSERT(count);
+ int dst = currentInstruction[1].u.operand;
+ ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
+ if (resolveType == GlobalVar || resolveType == ClosureVar)
+ return;
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail, stubInfo);
+ if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
+ linkSlowCase(iter); // bad structure
+ linkSlowCase(iter); // read barrier
}
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT0, cachedOffset);
- Jump success = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
+ if (resolveType == GlobalLexicalVarWithVarInjectionChecks) // Var injections check.
+ linkSlowCase(iter);
+
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionChecks
+ linkSlowCase(iter); // emitLoadWithStructureCheck
+ linkSlowCase(iter); // emitLoadWithStructureCheck
+ linkSlowCase(iter); // read barrier
+ // GlobalLexicalVar
+ linkSlowCase(iter); // TDZ check.
+ // GlobalLexicalVarWithVarInjectionChecks.
+ linkSlowCase(iter); // var injection check.
+ linkSlowCase(iter); // TDZ check.
}
- // Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- // Track the stub we have created so that it will be deleted later.
- RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT get_by_id chain stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
- stubInfo->stubRoutine = stubRoutine;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ linkSlowCase(iter);
+
+ callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
}
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
{
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT0);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
-
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
- loadPtr(Address(regT0, currentInstruction[2].u.operand * sizeof(Register)), regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitGetVirtualRegister(value, regT0);
+ emitNotifyWrite(set);
+ storePtr(regT0, operand);
}
-
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
{
- int skip = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branchTestPtr(Zero, addressFor(m_codeBlock->activationRegister()));
- loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT1, JSScope::offsetOfNext()), regT1);
-
- emitWriteBarrier(regT1, regT0, regT2, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
-
- loadPtr(Address(regT1, JSVariableObject::offsetOfRegisters()), regT1);
- storePtr(regT0, Address(regT1, currentInstruction[1].u.operand * sizeof(Register)));
+ emitGetVirtualRegister(value, regT0);
+ loadPtr(indirectWatchpointSet, regT1);
+ emitNotifyWrite(regT1);
+ loadPtr(addressOfOperand, regT1);
+ storePtr(regT0, regT1);
}
-void JIT::emit_op_init_global_const(Instruction* currentInstruction)
+void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
{
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
-
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-
- store64(regT0, currentInstruction[1].u.registerPointer);
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ emitGetVirtualRegister(value, regT1);
+ emitGetVirtualRegister(scope, regT0);
+ emitNotifyWrite(set);
+ storePtr(regT1, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)));
}
-void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
+void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
-
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer)));
-
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ int scope = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+ GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
+ Structure** structureSlot = currentInstruction[5].u.structure.slot();
+ uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitGetVirtualRegister(value, regT2);
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ addSlowCase(branchIfNotToSpace(regT0));
+ loadPtr(operandSlot, regT1);
+ negPtr(regT1);
+ storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitWriteBarrier(constantScope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
+ // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
+ addSlowCase(branchTest64(Zero, regT0));
+ }
+ if (indirectLoadForOperand)
+ emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(&currentInstruction[5]));
+ else
+ emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
+ break;
+ }
+ case LocalClosureVar:
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitWriteBarrier(scope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
+ break;
+ case ModuleVar:
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ };
+
+ switch (resolveType) {
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
+ addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
+ }
- store64(regT0, currentInstruction[1].u.registerPointer);
- if (Heap::isWriteBarrierEnabled())
- emitWriteBarrier(globalObject, regT0, regT2, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ default:
+ emitCode(resolveType, false);
+ break;
+ }
}
-void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter);
+ GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
+ unsigned linkCount = 0;
+ if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar && resolveType != GlobalLexicalVar)
+ linkCount++;
+ if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks
+ || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks
+ || resolveType == LocalClosureVar)
+ && currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
+ linkCount++;
+ if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
+ linkCount++; // bad structure
+ linkCount++; // read barrier
+ }
+ if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) // TDZ check.
+ linkCount++;
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionsCheck
+ linkCount++; // emitLoadWithStructureCheck
+ linkCount++; // emitLoadWithStructureCheck
+ linkCount++; // read barrier
+
+ // GlobalLexicalVar
+ bool needsTDZCheck = getPutInfo.initializationMode() != Initialization;
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
+
+ // GlobalLexicalVarWithVarInjectionsCheck
+ linkCount++; // var injection check.
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
+ }
+ if (!linkCount)
+ return;
+ while (linkCount--)
+ linkSlowCase(iter);
- JITStubCall stubCall(this, cti_op_init_global_const_check);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
- stubCall.call();
+ if (resolveType == ModuleVar) {
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
+ slowPathCall.call();
+ } else
+ callOperation(operationPutToScope, currentInstruction);
}
-void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
{
- repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(unusedPointer));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), 0);
- repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
+ int dst = currentInstruction[1].u.operand;
+ int arguments = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(arguments, regT0);
+ load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst);
}
-void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
+void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
{
- if (isDirectPutById(stubInfo))
- repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
- else
- repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(unusedPointer));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), 0);
+ int arguments = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitWriteBarrier(arguments, value, ShouldFilterValue);
+
+ emitGetVirtualRegister(arguments, regT0);
+ emitGetVirtualRegister(value, regT1);
+ store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)));
}
#endif // USE(JSVALUE64)
-void JIT::emitWriteBarrier(RegisterID owner, RegisterID value, RegisterID scratch, RegisterID scratch2, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+#if USE(JSVALUE64)
+void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
{
- UNUSED_PARAM(owner);
- UNUSED_PARAM(scratch);
- UNUSED_PARAM(scratch2);
- UNUSED_PARAM(useKind);
- UNUSED_PARAM(value);
- UNUSED_PARAM(mode);
- ASSERT(owner != scratch);
- ASSERT(owner != scratch2);
-
-#if ENABLE(WRITE_BARRIER_PROFILING)
- emitCount(WriteBarrierCounters::jitCounterFor(useKind));
-#endif
+ Jump valueNotCell;
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
+ emitGetVirtualRegister(value, regT0);
+ valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
+ }
+
+ emitGetVirtualRegister(owner, regT0);
+ Jump ownerNotCell;
+ if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
+ ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
+
+ Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT0);
+ callOperation(operationUnconditionalWriteBarrier, regT0);
+ ownerIsRememberedOrInEden.link(this);
+
+ if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
+ ownerNotCell.link(this);
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
+ valueNotCell.link(this);
}
-void JIT::emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode mode, WriteBarrierUseKind useKind)
+void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
{
- UNUSED_PARAM(owner);
- UNUSED_PARAM(scratch);
- UNUSED_PARAM(useKind);
- UNUSED_PARAM(value);
- UNUSED_PARAM(mode);
-
-#if ENABLE(WRITE_BARRIER_PROFILING)
- emitCount(WriteBarrierCounters::jitCounterFor(useKind));
-#endif
+ emitGetVirtualRegister(value, regT0);
+ Jump valueNotCell;
+ if (mode == ShouldFilterValue)
+ valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
+
+ emitWriteBarrier(owner);
+
+ if (mode == ShouldFilterValue)
+ valueNotCell.link(this);
}
-JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch)
+#else // USE(JSVALUE64)
+
+void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
{
- if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
- structure->addTransitionWatchpoint(stubInfo->addWatchpoint(m_codeBlock));
-#if !ASSERT_DISABLED
- move(TrustedImmPtr(object), scratch);
- Jump ok = branchPtr(Equal, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
- breakpoint();
- ok.link(this);
-#endif
- Jump result; // Returning an unset jump this way because otherwise VC++ would complain.
- return result;
+ Jump valueNotCell;
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
+ emitLoadTag(value, regT0);
+ valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
}
- move(TrustedImmPtr(object), scratch);
- return branchPtr(NotEqual, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
+ emitLoad(owner, regT0, regT1);
+ Jump ownerNotCell;
+ if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
+ ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
+
+ Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT1);
+ callOperation(operationUnconditionalWriteBarrier, regT1);
+ ownerIsRememberedOrInEden.link(this);
+
+ if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
+ ownerNotCell.link(this);
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
+ valueNotCell.link(this);
}
-void JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, JumpList& failureCases, RegisterID scratch)
+void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
{
- Jump failureCase = addStructureTransitionCheck(object, structure, stubInfo, scratch);
- if (!failureCase.isSet())
- return;
-
- failureCases.append(failureCase);
+ Jump valueNotCell;
+ if (mode == ShouldFilterValue) {
+ emitLoadTag(value, regT0);
+ valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
+ }
+
+ emitWriteBarrier(owner);
+
+ if (mode == ShouldFilterValue)
+ valueNotCell.link(this);
}
-void JIT::testPrototype(JSValue prototype, JumpList& failureCases, StructureStubInfo* stubInfo)
-{
- if (prototype.isNull())
- return;
+#endif // USE(JSVALUE64)
- ASSERT(prototype.isCell());
- addStructureTransitionCheck(prototype.asCell(), prototype.asCell()->structure(), stubInfo, failureCases, regT3);
+void JIT::emitWriteBarrier(JSCell* owner)
+{
+ if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
+ Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(owner);
+ callOperation(operationUnconditionalWriteBarrier, owner);
+ ownerIsRememberedOrInEden.link(this);
+ } else
+ callOperation(operationUnconditionalWriteBarrier, owner);
}
-bool JIT::isDirectPutById(StructureStubInfo* stubInfo)
+void JIT::emitIdentifierCheck(RegisterID cell, RegisterID scratch, const Identifier& propertyName, JumpList& slowCases)
{
- switch (stubInfo->accessType) {
- case access_put_by_id_transition_normal:
- return false;
- case access_put_by_id_transition_direct:
- return true;
- case access_put_by_id_replace:
- case access_put_by_id_generic: {
- void* oldCall = MacroAssembler::readCallTarget(stubInfo->callReturnLocation).executableAddress();
- if (oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct)
- || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_generic)
- || oldCall == bitwise_cast<void*>(cti_op_put_by_id_direct_fail))
- return true;
- ASSERT(oldCall == bitwise_cast<void*>(cti_op_put_by_id)
- || oldCall == bitwise_cast<void*>(cti_op_put_by_id_generic)
- || oldCall == bitwise_cast<void*>(cti_op_put_by_id_fail));
- return false;
- }
- default:
- RELEASE_ASSERT_NOT_REACHED();
- return false;
+ if (propertyName.isSymbol()) {
+ slowCases.append(branchStructure(NotEqual, Address(cell, JSCell::structureIDOffset()), m_vm->symbolStructure.get()));
+ loadPtr(Address(cell, Symbol::offsetOfPrivateName()), scratch);
+ } else {
+ slowCases.append(branchStructure(NotEqual, Address(cell, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
+ loadPtr(Address(cell, JSString::offsetOfValue()), scratch);
}
+ slowCases.append(branchPtr(NotEqual, scratch, TrustedImmPtr(propertyName.impl())));
}
void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -1416,40 +1209,24 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
case JITArrayStorage:
slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
break;
- case JITInt8Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray);
- break;
- case JITInt16Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray);
- break;
- case JITInt32Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray);
- break;
- case JITUint8Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray);
- break;
- case JITUint8ClampedArray:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray);
- break;
- case JITUint16Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray);
+ case JITDirectArguments:
+ slowCases = emitDirectArgumentsGetByVal(currentInstruction, badType);
break;
- case JITUint32Array:
- slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray);
- break;
- case JITFloat32Array:
- slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4);
- break;
- case JITFloat64Array:
- slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8);
+ case JITScopedArguments:
+ slowCases = emitScopedArgumentsGetByVal(currentInstruction, badType);
break;
default:
- CRASH();
+ TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
+ if (isInt(type))
+ slowCases = emitIntTypedArrayGetByVal(currentInstruction, badType, type);
+ else
+ slowCases = emitFloatTypedArrayGetByVal(currentInstruction, badType, type);
+ break;
}
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
@@ -1457,12 +1234,42 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
+ m_codeBlock, patchBuffer,
("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_val_generic));
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
+}
+
+void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ Jump fastDoneCase;
+ Jump slowDoneCase;
+ JumpList slowCases;
+
+ JITGetByIdGenerator gen = emitGetByValWithCachedId(currentInstruction, propertyName, fastDoneCase, slowDoneCase, slowCases);
+
+ ConcurrentJITLocker locker(m_codeBlock->m_lock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(fastDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+ patchBuffer.link(slowDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToNextHotPath));
+
+ for (const auto& callSite : m_calls) {
+ if (callSite.to)
+ patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
+ }
+ gen.finalize(patchBuffer);
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline get_by_val with cached property name '%s' stub for %s, return point %p", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
+ byValInfo->stubInfo = gen.stubInfo();
+
+ MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
}
void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -1471,7 +1278,9 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
PatchableJump badType;
JumpList slowCases;
-
+
+ bool needsLinkForWriteBarrier = false;
+
switch (arrayMode) {
case JITInt32:
slowCases = emitInt32PutByVal(currentInstruction, badType);
@@ -1481,62 +1290,149 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
break;
case JITContiguous:
slowCases = emitContiguousPutByVal(currentInstruction, badType);
+ needsLinkForWriteBarrier = true;
break;
case JITArrayStorage:
slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
- break;
- case JITInt8Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int8ArrayDescriptor(), 1, SignedTypedArray, TruncateRounding);
- break;
- case JITInt16Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int16ArrayDescriptor(), 2, SignedTypedArray, TruncateRounding);
- break;
- case JITInt32Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->int32ArrayDescriptor(), 4, SignedTypedArray, TruncateRounding);
- break;
- case JITUint8Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ArrayDescriptor(), 1, UnsignedTypedArray, TruncateRounding);
- break;
- case JITUint8ClampedArray:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint8ClampedArrayDescriptor(), 1, UnsignedTypedArray, ClampRounding);
- break;
- case JITUint16Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint16ArrayDescriptor(), 2, UnsignedTypedArray, TruncateRounding);
- break;
- case JITUint32Array:
- slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, m_vm->uint32ArrayDescriptor(), 4, UnsignedTypedArray, TruncateRounding);
- break;
- case JITFloat32Array:
- slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float32ArrayDescriptor(), 4);
- break;
- case JITFloat64Array:
- slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, m_vm->float64ArrayDescriptor(), 8);
+ needsLinkForWriteBarrier = true;
break;
default:
- CRASH();
+ TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
+ if (isInt(type))
+ slowCases = emitIntTypedArrayPutByVal(currentInstruction, badType, type);
+ else
+ slowCases = emitFloatTypedArrayPutByVal(currentInstruction, badType, type);
break;
}
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
-
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+ if (needsLinkForWriteBarrier) {
+ ASSERT(m_calls.last().to == operationUnconditionalWriteBarrier);
+ patchBuffer.link(m_calls.last().from, operationUnconditionalWriteBarrier);
+ }
+ bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
+ if (!isDirect) {
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+
+ } else {
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+ }
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
+}
+
+void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ JumpList doneCases;
+ JumpList slowCases;
+
+ JITPutByIdGenerator gen = emitPutByValWithCachedId(currentInstruction, putKind, propertyName, doneCases, slowCases);
+
+ ConcurrentJITLocker locker(m_codeBlock->m_lock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(doneCases, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+ for (const auto& callSite : m_calls) {
+ if (callSite.to)
+ patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
+ }
+ gen.finalize(patchBuffer);
+
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
- ("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+ m_codeBlock, patchBuffer,
+ ("Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p", (putKind == Direct) ? "_direct" : "", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
+ byValInfo->stubInfo = gen.stubInfo();
+
+ MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
+}
+
+
+JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ JSValueRegs result = JSValueRegs(regT0);
+ RegisterID scratch = regT3;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ JSValueRegs result = JSValueRegs(regT1, regT0);
+ RegisterID scratch = regT3;
+#endif
+
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType));
+
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, DirectArguments::offsetOfLength())));
+ slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfOverrides())));
+
+ zeroExtend32ToPtr(property, scratch);
+ loadValue(BaseIndex(base, scratch, TimesEight, DirectArguments::storageOffset()), result);
+
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ JSValueRegs result = JSValueRegs(regT0);
+ RegisterID scratch = regT3;
+ RegisterID scratch2 = regT4;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ JSValueRegs result = JSValueRegs(regT1, regT0);
+ RegisterID scratch = regT3;
+ RegisterID scratch2 = regT4;
+#endif
+
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(ScopedArgumentsType));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, ScopedArguments::offsetOfTotalLength())));
+
+ loadPtr(Address(base, ScopedArguments::offsetOfTable()), scratch);
+ load32(Address(scratch, ScopedArgumentsTable::offsetOfLength()), scratch2);
+ Jump overflowCase = branch32(AboveOrEqual, property, scratch2);
+ loadPtr(Address(base, ScopedArguments::offsetOfScope()), scratch2);
+ loadPtr(Address(scratch, ScopedArgumentsTable::offsetOfArguments()), scratch);
+ load32(BaseIndex(scratch, property, TimesFour), scratch);
+ slowCases.append(branch32(Equal, scratch, TrustedImm32(ScopeOffset::invalidOffset)));
+ loadValue(BaseIndex(scratch2, scratch, TimesEight, JSEnvironmentRecord::offsetOfVariables()), result);
+ Jump done = jump();
+ overflowCase.link(this);
+ sub32(property, scratch2);
+ neg32(scratch2);
+ loadValue(BaseIndex(base, scratch2, TimesEight, ScopedArguments::overflowStorageOffset()), result);
+ slowCases.append(branchIfEmpty(result));
+ done.link(this);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_put_by_val_generic));
+ return slowCases;
}
-JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness)
+JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
{
+ ASSERT(isInt(type));
+
// The best way to test the array type is to use the classInfo. We need to do so without
// clobbering the register that holds the indexing type, base, and property.
@@ -1555,33 +1451,33 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
JumpList slowCases;
- loadPtr(Address(base, JSCell::structureOffset()), scratch);
- badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
- slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
- loadPtr(Address(base, descriptor.m_storageOffset), base);
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
+ slowCases.append(loadTypedArrayVector(base, scratch));
- switch (elementSize) {
+ switch (elementSize(type)) {
case 1:
- if (signedness == SignedTypedArray)
- load8Signed(BaseIndex(base, property, TimesOne), resultPayload);
+ if (JSC::isSigned(type))
+ load8SignedExtendTo32(BaseIndex(scratch, property, TimesOne), resultPayload);
else
- load8(BaseIndex(base, property, TimesOne), resultPayload);
+ load8(BaseIndex(scratch, property, TimesOne), resultPayload);
break;
case 2:
- if (signedness == SignedTypedArray)
- load16Signed(BaseIndex(base, property, TimesTwo), resultPayload);
+ if (JSC::isSigned(type))
+ load16SignedExtendTo32(BaseIndex(scratch, property, TimesTwo), resultPayload);
else
- load16(BaseIndex(base, property, TimesTwo), resultPayload);
+ load16(BaseIndex(scratch, property, TimesTwo), resultPayload);
break;
case 4:
- load32(BaseIndex(base, property, TimesFour), resultPayload);
+ load32(BaseIndex(scratch, property, TimesFour), resultPayload);
break;
default:
CRASH();
}
Jump done;
- if (elementSize == 4 && signedness == UnsignedTypedArray) {
+ if (type == TypeUint32) {
Jump canBeInt = branch32(GreaterThanOrEqual, resultPayload, TrustedImm32(0));
convertInt32ToDouble(resultPayload, fpRegT0);
@@ -1607,8 +1503,10 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
return slowCases;
}
-JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize)
+JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
{
+ ASSERT(isFloat(type));
+
#if USE(JSVALUE64)
RegisterID base = regT0;
RegisterID property = regT1;
@@ -1623,19 +1521,19 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
#endif
JumpList slowCases;
+
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
+ slowCases.append(loadTypedArrayVector(base, scratch));
- loadPtr(Address(base, JSCell::structureOffset()), scratch);
- badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
- slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
- loadPtr(Address(base, descriptor.m_storageOffset), base);
-
- switch (elementSize) {
+ switch (elementSize(type)) {
case 4:
- loadFloat(BaseIndex(base, property, TimesFour), fpRegT0);
+ loadFloat(BaseIndex(scratch, property, TimesFour), fpRegT0);
convertFloatToDouble(fpRegT0, fpRegT0);
break;
case 8: {
- loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
+ loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
break;
}
default:
@@ -1643,8 +1541,8 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
}
Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
- static const double NaN = QNaN;
- loadDouble(&NaN, fpRegT0);
+ static const double NaN = PNaN;
+ loadDouble(TrustedImmPtr(&NaN), fpRegT0);
notNaN.link(this);
#if USE(JSVALUE64)
@@ -1656,9 +1554,12 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
return slowCases;
}
-JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize, TypedArraySignedness signedness, TypedArrayRounding rounding)
+JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
{
- unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ASSERT(isInt(type));
+
+ int value = currentInstruction[3].u.operand;
#if USE(JSVALUE64)
RegisterID base = regT0;
@@ -1674,13 +1575,16 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
JumpList slowCases;
- loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
- badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
- slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+ load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
+ badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
+ Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ Jump done = jump();
+ inBounds.link(this);
#if USE(JSVALUE64)
emitGetVirtualRegister(value, earlyScratch);
- slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch));
+ slowCases.append(emitJumpIfNotInt(earlyScratch));
#else
emitLoad(value, lateScratch, earlyScratch);
slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
@@ -1688,11 +1592,11 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
// We would be loading this into base as in get_by_val, except that the slow
// path expects the base to be unclobbered.
- loadPtr(Address(base, descriptor.m_storageOffset), lateScratch);
+ slowCases.append(loadTypedArrayVector(base, lateScratch));
- if (rounding == ClampRounding) {
- ASSERT(elementSize == 1);
- ASSERT_UNUSED(signedness, signedness = UnsignedTypedArray);
+ if (isClamped(type)) {
+ ASSERT(elementSize(type) == 1);
+ ASSERT(!JSC::isSigned(type));
Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
xor32(earlyScratch, earlyScratch);
@@ -1703,7 +1607,7 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
inBounds.link(this);
}
- switch (elementSize) {
+ switch (elementSize(type)) {
case 1:
store8(earlyScratch, BaseIndex(lateScratch, property, TimesOne));
break;
@@ -1717,12 +1621,17 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
CRASH();
}
+ done.link(this);
+
return slowCases;
}
-JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, const TypedArrayDescriptor& descriptor, size_t elementSize)
+JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
{
- unsigned value = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ASSERT(isFloat(type));
+
+ int value = currentInstruction[3].u.operand;
#if USE(JSVALUE64)
RegisterID base = regT0;
@@ -1738,17 +1647,20 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
JumpList slowCases;
- loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
- badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(descriptor.m_classInfo));
- slowCases.append(branch32(AboveOrEqual, property, Address(base, descriptor.m_lengthOffset)));
+ load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
+ badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
+ Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ Jump done = jump();
+ inBounds.link(this);
#if USE(JSVALUE64)
emitGetVirtualRegister(value, earlyScratch);
- Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch);
+ Jump doubleCase = emitJumpIfNotInt(earlyScratch);
convertInt32ToDouble(earlyScratch, fpRegT0);
Jump ready = jump();
doubleCase.link(this);
- slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
+ slowCases.append(emitJumpIfNotNumber(earlyScratch));
add64(tagTypeNumberRegister, earlyScratch);
move64ToDouble(earlyScratch, fpRegT0);
ready.link(this);
@@ -1765,9 +1677,9 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
// We would be loading this into base as in get_by_val, except that the slow
// path expects the base to be unclobbered.
- loadPtr(Address(base, descriptor.m_storageOffset), lateScratch);
+ slowCases.append(loadTypedArrayVector(base, lateScratch));
- switch (elementSize) {
+ switch (elementSize(type)) {
case 4:
convertDoubleToFloat(fpRegT0, fpRegT0);
storeFloat(fpRegT0, BaseIndex(lateScratch, property, TimesFour));
@@ -1779,6 +1691,8 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
CRASH();
}
+ done.link(this);
+
return slowCases;
}
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 1cc98ef66..ce93d4140 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,73 +30,111 @@
#include "JIT.h"
#include "CodeBlock.h"
+#include "DirectArguments.h"
#include "GCAwareJITStubRoutine.h"
#include "Interpreter.h"
#include "JITInlines.h"
-#include "JITStubCall.h"
#include "JSArray.h"
+#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "JSVariableObject.h"
#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
+#include "SlowPathCall.h"
#include <wtf/StringPrintStream.h>
-#ifndef NDEBUG
-#include <stdio.h>
-#endif
-
-using namespace std;
namespace JSC {
void JIT::emit_op_put_by_index(Instruction* currentInstruction)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_by_index);
- stubCall.addArgument(base);
- stubCall.addArgument(TrustedImm32(property));
- stubCall.addArgument(value);
- stubCall.call();
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitLoad(base, regT1, regT0);
+ emitLoad(value, regT3, regT2);
+ callOperation(operationPutByIndex, regT1, regT0, property, regT3, regT2);
+}
+
+void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int options = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT1);
+ emitLoadPayload(getter, regT3);
+ callOperation(operationPutGetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
+}
+
+void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ unsigned options = currentInstruction[3].u.operand;
+ int setter = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT1);
+ emitLoadPayload(setter, regT3);
+ callOperation(operationPutSetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
+}
+
+void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ unsigned attribute = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+ int setter = currentInstruction[5].u.operand;
+
+ emitLoadPayload(base, regT1);
+ emitLoadPayload(getter, regT3);
+ emitLoadPayload(setter, regT4);
+ callOperation(operationPutGetterSetter, regT1, m_codeBlock->identifier(property).impl(), attribute, regT3, regT4);
}
-void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned getter = currentInstruction[3].u.operand;
- unsigned setter = currentInstruction[4].u.operand;
-
- JITStubCall stubCall(this, cti_op_put_getter_setter);
- stubCall.addArgument(base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
- stubCall.addArgument(getter);
- stubCall.addArgument(setter);
- stubCall.call();
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int32_t attributes = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT2);
+ emitLoad(property, regT1, regT0);
+ emitLoadPayload(getter, regT3);
+ callOperation(operationPutGetterByVal, regT2, regT1, regT0, attributes, regT3);
+}
+
+void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int32_t attributes = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT2);
+ emitLoad(property, regT1, regT0);
+ emitLoadPayload(getter, regT3);
+ callOperation(operationPutSetterByVal, regT2, regT1, regT0, attributes, regT3);
}
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- JITStubCall stubCall(this, cti_op_del_by_id);
- stubCall.addArgument(base);
- stubCall.addArgument(TrustedImmPtr(&m_codeBlock->identifier(property)));
- stubCall.call(dst);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ emitLoad(base, regT1, regT0);
+ callOperation(operationDeleteById, dst, regT1, regT0, &m_codeBlock->identifier(property));
}
JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
- JSInterfaceJIT jit;
+ JSInterfaceJIT jit(vm);
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
+ failures.append(jit.branchStructure(NotEqual, Address(regT0, JSCell::structureIDOffset()), vm->stringStructure.get()));
// Load string length to regT1, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
@@ -130,23 +168,24 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
void JIT::emit_op_get_by_val(Instruction* currentInstruction)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitLoad2(base, regT1, regT0, property, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSite(regT1, regT3, profile);
+ PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(notIndex);
+ emitArrayProfilingSiteWithCell(regT0, regT1, profile);
and32(TrustedImm32(IndexingShapeMask), regT1);
PatchableJump badType;
@@ -175,28 +214,27 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
-#if !ASSERT_DISABLED
- Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
- breakpoint();
- resultOK.link(this);
-#endif
+ if (!ASSERT_DISABLED) {
+ Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ abortWithReason(JITGetByValResultIsNotEmpty);
+ resultOK.link(this);
+ }
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_val), dst, regT1, regT0);
+
+ Label nextHotPath = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
}
-JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape));
-
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
@@ -204,52 +242,75 @@ JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType,
return slowCases;
}
-JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape));
-
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-
loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0);
slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
- moveDoubleToInts(fpRegT0, regT0, regT1);
return slowCases;
}
-JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
add32(TrustedImm32(-ArrayStorageShape), regT1, regT3);
badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
-
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
-
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
return slowCases;
}
-
+
+JITGetByIdGenerator JIT::emitGetByValWithCachedId(Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ // base: tag(regT1), payload(regT0)
+ // property: tag(regT3), payload(regT2)
+ // scratch: regT4
+
+ slowCases.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
+ emitIdentifierCheck(regT2, regT4, propertyName, slowCases);
+
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0));
+ gen.generateFastPath(*this);
+
+ fastDoneCase = jump();
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT1, regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ slowDoneCase = jump();
+
+ return gen;
+}
+
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
-
- linkSlowCase(iter); // property int32 check
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
+
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // property int32 check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ Jump notString = branchStructure(NotEqual, Address(regT0, JSCell::structureIDOffset()), m_vm->stringStructure.get());
emitNakedCall(m_vm->getCTIStub(stringGetByValStubGenerator).code());
Jump failed = branchTestPtr(Zero, regT0);
emitStore(dst, regT1, regT0);
@@ -257,22 +318,15 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
failed.link(this);
notString.link(this);
nonCell.link(this);
-
- Jump skipProfiling = jump();
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
- emitArrayProfileOutOfBoundsSpecialCase(profile);
-
- skipProfiling.link(this);
-
Label slowPath = label();
- JITStubCall stubCall(this, cti_op_get_by_val);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- Call call = stubCall.call(dst);
+ emitLoad(base, regT1, regT0);
+ emitLoad(property, regT3, regT2);
+ Call call = callOperation(operationGetByValOptimize, dst, regT1, regT0, regT3, regT2, byValInfo);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -283,16 +337,17 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitLoad2(base, regT1, regT0, property, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSite(regT1, regT3, profile);
+ PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(notIndex);
+ emitArrayProfilingSiteWithCell(regT0, regT1, profile);
and32(TrustedImm32(IndexingShapeMask), regT1);
PatchableJump badType;
@@ -322,12 +377,13 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Label done = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
}
JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
{
- unsigned value = currentInstruction[3].u.operand;
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
JumpList slowCases;
@@ -342,10 +398,14 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
switch (indexingShape) {
case Int32Shape:
slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- // Fall through.
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ break;
case ContiguousShape:
store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ emitLoad(base, regT2, regT3);
+ emitWriteBarrier(base, value, ShouldFilterValue);
break;
case DoubleShape: {
Jump notInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
@@ -376,14 +436,13 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
done.link(this);
- emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
-
return slowCases;
}
JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, PatchableJump& badType)
{
- unsigned value = currentInstruction[3].u.operand;
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
JumpList slowCases;
@@ -412,20 +471,57 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
end.link(this);
- emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+ emitWriteBarrier(base, value, ShouldFilterValue);
return slowCases;
}
+JITPutByIdGenerator JIT::emitPutByValWithCachedId(Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
+{
+ // base: tag(regT1), payload(regT0)
+ // property: tag(regT3), payload(regT2)
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ slowCases.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
+ emitIdentifierCheck(regT2, regT2, propertyName, slowCases);
+
+ // Write barrier breaks the registers. So after issuing the write barrier,
+ // reload the registers.
+ emitWriteBarrier(base, value, ShouldFilterBase);
+ emitLoadPayload(base, regT0);
+ emitLoad(value, regT3, regT2);
+
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, m_codeBlock->ecmaMode(), putKind);
+ gen.generateFastPath(*this);
+ doneCases.append(jump());
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
+ emitLoadTag(base, regT1);
+
+ Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT3, regT2, regT1, regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ doneCases.append(jump());
+
+ return gen;
+}
+
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned base = currentInstruction[1].u.operand;
- unsigned property = currentInstruction[2].u.operand;
- unsigned value = currentInstruction[3].u.operand;
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // property int32 check
linkSlowCase(iter); // base not array check
JITArrayMode mode = chooseArrayMode(profile);
@@ -445,12 +541,34 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
Label slowPath = label();
- JITStubCall stubPutByValCall(this, cti_op_put_by_val);
- stubPutByValCall.addArgument(base);
- stubPutByValCall.addArgument(property);
- stubPutByValCall.addArgument(value);
- Call call = stubPutByValCall.call();
-
+ bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
+
+#if CPU(X86)
+ // FIXME: We only have 5 temp registers, but need 6 to make this call, therefore we materialize
+ // our own call. When we finish moving JSC to the C call stack, we'll get another register so
+ // we can use the normal case.
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ emitLoad(base, regT0, regT1);
+ addCallArgument(regT1);
+ addCallArgument(regT0);
+ emitLoad(property, regT0, regT1);
+ addCallArgument(regT1);
+ addCallArgument(regT0);
+ emitLoad(value, regT0, regT1);
+ addCallArgument(regT1);
+ addCallArgument(regT0);
+ addCallArgument(TrustedImmPtr(byValInfo));
+ Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize);
+#else
+ // The register selection below is chosen to reduce register swapping on ARM.
+ // Swapping shouldn't happen on other platforms.
+ emitLoad(base, regT2, regT1);
+ emitLoad(property, regT3, regT0);
+ emitLoad(value, regT5, regT4);
+ Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT2, regT1, regT3, regT0, regT5, regT4, byValInfo);
+#endif
+
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
m_byValInstructionIndex++;
@@ -460,79 +578,41 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
- Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
emitLoad(base, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(base, regT1);
- compileGetByIdHotPath(ident);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_id), dst, regT1, regT0);
-}
-void JIT::compileGetByIdHotPath(Identifier* ident)
-{
- // As for put_by_id, get_by_id requires the offset of the Structure and the offset of the access to be patched.
- // Additionally, for get_by_id we need patch the offset of the branch to the slow case (we patch this to jump
- // to array-length / prototype access tranpolines, and finally we also the the property-map access offset as a label
- // to jump back to if one of these trampolies finds a match.
-
- if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset);
- }
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
+ emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT2, m_bytecodeOffset);
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
-
- Label hotPathBegin(this);
-
- DataLabelPtr structureToCompare;
- PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)));
- addSlowCase(structureCheck);
-
- ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload
- DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag
-
- Label putResult(this);
-
- END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath);
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0));
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIds.append(gen);
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel1, displacementLabel2, putResult));
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
}
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int ident = currentInstruction[3].u.operand;
-
- compileGetByIdSlowCase(dst, base, &(m_codeBlock->identifier(ident)), iter);
- emitValueProfilingSite();
-}
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
-void JIT::compileGetByIdSlowCase(int dst, int base, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter)
-{
- // As for the hot path of get_by_id, above, we ensure that we can use an architecture specific offset
- // so that we only need track one pointer into the slow case code - we track a pointer to the location
- // of the call (which we can use to look up the patch information), but should a array-length or
- // prototype access trampoline fail we want to bail out back to here. To do so we can subtract back
- // the distance from the call to the head of the slow case.
- linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
linkSlowCase(iter);
+
+ JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
- BEGIN_UNINTERRUPTED_SEQUENCE(sequenceGetByIdSlowCase);
-
- Label coldPathBegin(this);
- JITStubCall stubCall(this, cti_op_get_by_id);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(TrustedImmPtr(ident));
- Call call = stubCall.call(dst);
+ Label coldPathBegin = label();
- END_UNINTERRUPTED_SEQUENCE_FOR_PUT(sequenceGetByIdSlowCase, dst);
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, resultVReg, gen.stubInfo(), regT1, regT0, ident->impl());
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubGetById, coldPathBegin, call);
+ gen.reportSlowPathCall(coldPathBegin, call);
}
void JIT::emit_op_put_by_id(Instruction* currentInstruction)
@@ -543,829 +623,498 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
int base = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
+ int direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
+ emitWriteBarrier(base, value, ShouldFilterBase);
+
emitLoad2(base, regT1, regT0, value, regT3, regT2);
emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2),
+ regT1, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
- BEGIN_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- Label hotPathBegin(this);
-
- // It is important that the following instruction plants a 32bit immediate, in order that it can be patched over.
- DataLabelPtr structureToCompare;
- addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))));
-
- ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::butterflyOffset()), regT1);
- DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset)); // payload
- DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT1, patchPutByIdDefaultOffset)); // tag
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
- END_UNINTERRUPTED_SEQUENCE(sequencePutById);
-
- emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess);
-
- m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel1, displacementLabel2));
+ m_putByIds.append(gen);
}
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int base = currentInstruction[1].u.operand;
- int ident = currentInstruction[2].u.operand;
- int direct = currentInstruction[8].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[2].u.operand));
linkSlowCaseIfNotJSCell(iter, base);
linkSlowCase(iter);
- JITStubCall stubCall(this, direct ? cti_op_put_by_id_direct : cti_op_put_by_id);
- stubCall.addArgument(base);
- stubCall.addArgument(TrustedImmPtr(&(m_codeBlock->identifier(ident))));
- stubCall.addArgument(regT3, regT2);
- Call call = stubCall.call();
-
- // Track the location of the call; this will be used to recover patch information.
- m_propertyAccessCompilationInfo[m_propertyAccessInstructionIndex++].slowCaseInfo(PropertyStubPutById, call);
-}
+ Label coldPathBegin(this);
-// Compile a store into an object's property storage. May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset)
-{
- if (isOutOfLineOffset(cachedOffset))
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- emitStore(indexRelativeToBase(cachedOffset), valueTag, valuePayload, base);
-}
+ // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
+ emitLoadTag(base, regT1);
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
-{
- if (isInlineOffset(cachedOffset)) {
- emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, base);
- return;
- }
+ JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
- RegisterID temp = resultPayload;
- loadPtr(Address(base, JSObject::butterflyOffset()), temp);
- emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, temp);
+ Call call = callOperation(
+ gen.slowPathFunction(), gen.stubInfo(), regT3, regT2, regT1, regT0, ident->impl());
+
+ gen.reportSlowPathCall(coldPathBegin, call);
}
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
+void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
{
- if (isInlineOffset(cachedOffset)) {
- move(TrustedImmPtr(base->locationForOffset(cachedOffset)), resultTag);
- load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ if (!needsVarInjectionChecks)
return;
- }
-
- loadPtr(base->butterflyAddress(), resultTag);
- load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
}
-void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct)
+void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
{
- // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack.
-#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
- // For MIPS, we don't add sizeof(void*) to the stack offset.
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- // For MIPS, we don't add sizeof(void*) to the stack offset.
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
-#else
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
-#endif
+ emitVarInjectionCheck(needsVarInjectionChecks);
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ emitLoadPayload(scope, regT0);
+ for (unsigned i = 0; i < depth; ++i)
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ emitStore(dst, regT1, regT0);
+}
- JumpList failureCases;
- failureCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- failureCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(oldStructure)));
- testPrototype(oldStructure->storedPrototype(), failureCases, stubInfo);
-
- if (!direct) {
- // Verify that nothing in the prototype chain has a setter for this property.
- for (WriteBarrier<Structure>* it = chain->head(); *it; ++it)
- testPrototype((*it)->storedPrototype(), failureCases, stubInfo);
+void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int scope = currentInstruction[2].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
+ unsigned depth = currentInstruction[5].u.operand;
+ auto emitCode = [&] (ResolveType resolveType) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalLexicalVar:
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ move(TrustedImmPtr(constantScope), regT0);
+ emitStore(dst, regT1, regT0);
+ break;
+ }
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
+ break;
+ case ModuleVar:
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
+ emitStore(dst, regT1, regT0);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case LocalClosureVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
+ switch (resolveType) {
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+
+ Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
+ emitCode(GlobalProperty);
+ skipToEnd.append(jump());
+ notGlobalProperty.link(this);
+
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ emitCode(GlobalPropertyWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
+ addSlowCase(jump());
+ skipToEnd.link(this);
+ break;
}
- // If we succeed in all of our checks, and the code was optimizable, then make sure we
- // decrement the rare case counter.
-#if ENABLE(VALUE_PROFILER)
- if (m_codeBlock->canCompileWithDFG() >= DFG::MayInline) {
- sub32(
- TrustedImm32(1),
- AbsoluteAddress(&m_codeBlock->rareCaseProfileForBytecodeOffset(stubInfo->bytecodeIndex)->m_counter));
- }
-#endif
-
- // Reallocate property storage if needed.
- Call callTarget;
- bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity();
- if (willNeedStorageRealloc) {
- // This trampoline was called to like a JIT stub; before we can can call again we need to
- // remove the return address from the stack, to prevent the stack from becoming misaligned.
- preserveReturnAddressAfterCall(regT3);
-
- JITStubCall stubCall(this, cti_op_put_by_id_transition_realloc);
- stubCall.skipArgument(); // base
- stubCall.skipArgument(); // ident
- stubCall.skipArgument(); // value
- stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity()));
- stubCall.addArgument(TrustedImmPtr(newStructure));
- stubCall.call(regT0);
-
- restoreReturnAddressBeforeReturn(regT3);
-
-#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
- // For MIPS, we don't add sizeof(void*) to the stack offset.
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- // For MIPS, we don't add sizeof(void*) to the stack offset.
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
-#else
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[0]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
-#endif
+ default:
+ emitCode(resolveType);
+ break;
}
+}
- emitWriteBarrier(regT0, regT1, regT1, regT3, UnconditionalWriteBarrier, WriteBarrierForPropertyAccess);
+void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
- storePtr(TrustedImmPtr(newStructure), Address(regT0, JSCell::structureOffset()));
-#if CPU(MIPS) || CPU(SH4) || CPU(ARM)
- // For MIPS, we don't add sizeof(void*) to the stack offset.
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
-#else
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT3);
- load32(Address(stackPointerRegister, OBJECT_OFFSETOF(JITStackFrame, args[2]) + sizeof(void*) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT2);
-#endif
- compilePutDirectOffset(regT0, regT2, regT3, cachedOffset);
-
- ret();
-
- ASSERT(!failureCases.empty());
- failureCases.link(this);
- restoreArgumentReferenceForTrampoline();
- Call failureCall = tailRecursiveCall();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- patchBuffer.link(failureCall, FunctionPtr(direct ? cti_op_put_by_id_direct_fail : cti_op_put_by_id_fail));
-
- if (willNeedStorageRealloc) {
- ASSERT(m_calls.size() == 1);
- patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc));
+ if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar || resolveType == GlobalLexicalVar || resolveType == ModuleVar)
+ return;
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ linkSlowCase(iter); // Var injections check for GlobalPropertyWithVarInjectionChecks.
+ linkSlowCase(iter); // Var injections check for GlobalLexicalVarWithVarInjectionChecks.
}
-
- stubInfo->stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline put_by_id transition stub for %s, return point %p",
- toCString(*m_codeBlock).data(), returnAddress.value())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- willNeedStorageRealloc,
- newStructure);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
-}
-void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress)
-{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_get_by_id_generic.
- // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure);
- repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resolve_scope);
+ slowPathCall.call();
}
-void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct)
+void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
{
- RepatchBuffer repatchBuffer(codeBlock);
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
-
- // Patch the offset into the propoerty map to load from, then patch the Structure to look for.
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure);
- repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag
+ emitLoad(scope, regT1, regT0);
+ loadPtr(structureSlot, regT2);
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT2));
}
-void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress)
+void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload)
{
- StructureStubInfo* stubInfo = &m_codeBlock->getStubInfo(returnAddress);
-
- // regT0 holds a JSCell*
-
- // Check for array
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump failureCases1 = branchTest32(Zero, regT2, TrustedImm32(IsArray));
- Jump failureCases2 = branchTest32(Zero, regT2, TrustedImm32(IndexingShapeMask));
-
- // Checks out okay! - get the length from the storage
- loadPtr(Address(regT0, JSArray::butterflyOffset()), regT2);
- load32(Address(regT2, ArrayStorage::lengthOffset()), regT2);
-
- Jump failureCases3 = branch32(Above, regT2, TrustedImm32(INT_MAX));
- move(regT2, regT0);
- move(TrustedImm32(JSValue::Int32Tag), regT1);
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
- patchBuffer.link(failureCases1, slowCaseBegin);
- patchBuffer.link(failureCases2, slowCaseBegin);
- patchBuffer.link(failureCases3, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- // Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
- ("Baseline get_by_id array length stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress()));
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail));
+ uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand);
+ load32(bitwise_cast<void*>(rawAddress + TagOffset), tag);
+ load32(bitwise_cast<void*>(rawAddress + PayloadOffset), payload);
}
-
-void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
+void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload)
{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
-
- bool needsStubLink = false;
- // Checks out okay!
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel slowCaseBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
- patchBuffer.link(failureCases1, slowCaseBegin);
- if (failureCases2.isSet())
- patchBuffer.link(failureCases2, slowCaseBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
-
- // Track the stub we have created so that it will be deleted later.
- stubInfo->stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id proto stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code()));
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ loadPtr(operand, payload);
+ load32(Address(payload, TagOffset), tag);
+ load32(Address(payload, PayloadOffset), payload);
}
-
-void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset)
+void JIT::emitGetClosureVar(int scope, uintptr_t operand)
{
- // regT0 holds a JSCell*
- Jump failureCase = checkStructure(regT0, structure);
- bool needsStubLink = false;
- bool isDirect = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(regT0, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else {
- isDirect = true;
- compileGetDirectOffset(regT0, regT1, regT0, cachedOffset);
- }
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
- }
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine));
- if (!lastProtoBegin)
- lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin);
-
- patchBuffer.link(failureCase, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id self list stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
-
- polymorphicStructures->list[currentIndex].set(*m_vm, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
+ emitLoad(scope, regT1, regT0);
+ load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset), regT1);
+ load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset), regT0);
}
-void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
+void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
{
- // regT0 holds a JSCell*
-
- // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is
- // referencing the prototype object - let's speculatively load it's table nice and early!)
- JSObject* protoObject = asObject(structure->prototypeForLookup(callFrame));
-
- // Check eax is an object of the right Structure.
- Jump failureCases1 = checkStructure(regT0, structure);
-
- // Check the prototype object's Structure had not changed.
- Jump failureCases2 = addStructureTransitionCheck(protoObject, prototypeStructure, stubInfo, regT3);
-
- bool needsStubLink = false;
- bool isDirect = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else {
- isDirect = true;
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
- }
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ int dst = currentInstruction[1].u.operand;
+ int scope = currentInstruction[2].u.operand;
+ ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
+ Structure** structureSlot = currentInstruction[5].u.structure.slot();
+ uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ GPRReg base = regT2;
+ GPRReg resultTag = regT1;
+ GPRReg resultPayload = regT0;
+ GPRReg offset = regT3;
+
+ move(regT0, base);
+ load32(operandSlot, offset);
+ if (!ASSERT_DISABLED) {
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+ abortWithReason(JITOffsetIsNotOutOfLine);
+ isOutOfLine.link(this);
+ }
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ neg32(offset);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
+ break;
}
- }
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
- patchBuffer.link(failureCases1, lastProtoBegin);
- if (failureCases2.isSet())
- patchBuffer.link(failureCases2, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id proto list stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
-
- prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
-}
-
-void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail, stubInfo);
- }
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- bool isDirect = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else {
- isDirect = true;
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
- }
-
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0);
+ if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetClosureVar(scope, *operandSlot);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case ModuleVar:
+ case LocalClosureVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
}
+ };
+
+ switch (resolveType) {
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
+ addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
}
- // Use the patch information to link the failure cases back to the original slow case routine.
- CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine));
-
- patchBuffer.link(bucketsOfFail, lastProtoBegin);
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id chain list stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
-
- // Track the stub we have created so that it will be deleted later.
- prototypeStructures->list[currentIndex].set(callFrame->vm(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect);
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
-}
-void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame)
-{
- // regT0 holds a JSCell*
- ASSERT(count);
-
- JumpList bucketsOfFail;
-
- // Check eax is an object of the right Structure.
- bucketsOfFail.append(checkStructure(regT0, structure));
-
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(callFrame));
- currStructure = it->get();
- testPrototype(protoObject, bucketsOfFail, stubInfo);
- }
- ASSERT(protoObject);
-
- bool needsStubLink = false;
- if (slot.cachedPropertyType() == PropertySlot::Getter) {
- needsStubLink = true;
- compileGetDirectOffset(protoObject, regT2, regT1, cachedOffset);
- JITStubCall stubCall(this, cti_op_get_by_id_getter_stub);
- stubCall.addArgument(regT1);
- stubCall.addArgument(regT0);
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else if (slot.cachedPropertyType() == PropertySlot::Custom) {
- needsStubLink = true;
- JITStubCall stubCall(this, cti_op_get_by_id_custom_stub);
- stubCall.addArgument(TrustedImmPtr(protoObject));
- stubCall.addArgument(TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()));
- stubCall.addArgument(TrustedImmPtr(const_cast<Identifier*>(&ident)));
- stubCall.addArgument(TrustedImmPtr(stubInfo->callReturnLocation.executableAddress()));
- stubCall.call();
- } else
- compileGetDirectOffset(protoObject, regT1, regT0, cachedOffset);
- Jump success = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
- if (needsStubLink) {
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
- }
+ default:
+ emitCode(resolveType, false);
+ break;
}
- // Use the patch information to link the failure cases back to the original slow case routine.
- patchBuffer.link(bucketsOfFail, stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
-
- // On success return back to the hot patch code, at a point it will perform the store to dest for us.
- patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult));
-
- // Track the stub we have created so that it will be deleted later.
- RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline get_by_id chain stub for %s, return point %p",
- toCString(*m_codeBlock).data(), stubInfo->hotPathBegin.labelAtOffset(
- stubInfo->patch.baseline.u.get.putResult).executableAddress())),
- *m_vm,
- m_codeBlock->ownerExecutable(),
- needsStubLink);
- stubInfo->stubRoutine = stubRoutine;
-
- // Finally patch the jump to slow case back in the hot path to jump here instead.
- CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck);
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code()));
-
- // We don't want to patch more than once - in future go to cti_op_put_by_id_generic.
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list));
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
}
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode finalObjectMode)
+void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ASSERT(sizeof(JSValue) == 8);
-
- if (finalObjectMode == MayBeFinal) {
- Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- neg32(offset);
- Jump done = jump();
- isInline.link(this);
- addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base);
- done.link(this);
- } else {
-#if !ASSERT_DISABLED
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
- breakpoint();
- isOutOfLine.link(this);
-#endif
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- neg32(offset);
+ int dst = currentInstruction[1].u.operand;
+ ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
+
+ if (resolveType == GlobalVar || resolveType == ClosureVar)
+ return;
+
+ if (resolveType == GlobalLexicalVarWithVarInjectionChecks) // Var Injections check.
+ linkSlowCase(iter);
+
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionChecks
+ linkSlowCase(iter); // emitLoadWithStructureCheck
+ // GlobalLexicalVar
+ linkSlowCase(iter); // TDZ check.
+ // GlobalLexicalVarWithVarInjectionChecks.
+ linkSlowCase(iter); // var injection check.
+ linkSlowCase(iter); // TDZ check.
}
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
+
+ linkSlowCase(iter);
+ callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
}
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- unsigned iter = currentInstruction[5].u.operand;
- unsigned i = currentInstruction[6].u.operand;
-
- emitLoad2(property, regT1, regT0, base, regT3, regT2);
- emitJumpSlowCaseIfNotJSCell(property, regT1);
- addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
- // Property registers are now available as the property is known
- emitJumpSlowCaseIfNotJSCell(base, regT3);
- emitLoadPayload(iter, regT1);
-
- // Test base's structure
- loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
- addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(TrustedImm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
- add32(TrustedImm32(firstOutOfLineOffset), regT3);
- sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
- inlineProperty.link(this);
- compileGetDirectOffset(regT2, regT1, regT0, regT3);
-
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_by_pname), dst, regT1, regT0);
+ emitLoad(value, regT1, regT0);
+ emitNotifyWrite(set);
+ uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand);
+ store32(regT1, bitwise_cast<void*>(rawAddress + TagOffset));
+ store32(regT0, bitwise_cast<void*>(rawAddress + PayloadOffset));
}
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
{
- unsigned dst = currentInstruction[1].u.operand;
- unsigned base = currentInstruction[2].u.operand;
- unsigned property = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, property);
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITStubCall stubCall(this, cti_op_get_by_val_generic);
- stubCall.addArgument(base);
- stubCall.addArgument(property);
- stubCall.call(dst);
+ emitLoad(value, regT1, regT0);
+ loadPtr(indirectWatchpointSet, regT2);
+ emitNotifyWrite(regT2);
+ loadPtr(addressOfOperand, regT2);
+ store32(regT1, Address(regT2, TagOffset));
+ store32(regT0, Address(regT2, PayloadOffset));
}
-void JIT::emit_op_get_scoped_var(Instruction* currentInstruction)
+void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
{
- int dst = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int skip = currentInstruction[3].u.operand;
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
- activationNotCreated.link(this);
- }
- while (skip--)
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
-
- loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT2);
-
- emitLoad(index, regT1, regT0, regT2);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
- map(m_bytecodeOffset + OPCODE_LENGTH(op_get_scoped_var), dst, regT1, regT0);
+ emitLoad(value, regT3, regT2);
+ emitLoad(scope, regT1, regT0);
+ emitNotifyWrite(set);
+ store32(regT3, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset));
+ store32(regT2, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset));
}
-void JIT::emit_op_put_scoped_var(Instruction* currentInstruction)
+void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
{
- int index = currentInstruction[1].u.operand;
- int skip = currentInstruction[2].u.operand;
+ int scope = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
-
- emitLoad(value, regT1, regT0);
-
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT2);
- bool checkTopLevel = m_codeBlock->codeType() == FunctionCode && m_codeBlock->needsFullScopeChain();
- ASSERT(skip || !checkTopLevel);
- if (checkTopLevel && skip--) {
- Jump activationNotCreated;
- if (checkTopLevel)
- activationNotCreated = branch32(Equal, tagFor(m_codeBlock->activationRegister()), TrustedImm32(JSValue::EmptyValueTag));
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
- activationNotCreated.link(this);
+ GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
+ Structure** structureSlot = currentInstruction[5].u.structure.slot();
+ uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitLoad(value, regT3, regT2);
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ loadPtr(operandSlot, regT1);
+ negPtr(regT1);
+ store32(regT3, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ store32(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitWriteBarrier(constantScope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
+ // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0);
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ }
+ if (indirectLoadForOperand)
+ emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(&currentInstruction[5]));
+ else
+ emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
+ break;
+ }
+ case LocalClosureVar:
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitWriteBarrier(scope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
+ break;
+ case ModuleVar:
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
+
+ switch (resolveType) {
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
+ addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
}
- while (skip--)
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT2);
- loadPtr(Address(regT2, JSVariableObject::offsetOfRegisters()), regT3);
- emitStore(index, regT1, regT0, regT3);
- emitWriteBarrier(regT2, regT1, regT0, regT1, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ default:
+ emitCode(resolveType, false);
+ break;
+ }
}
-void JIT::emit_op_init_global_const(Instruction* currentInstruction)
+void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
- int value = currentInstruction[2].u.operand;
-
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
-
- emitLoad(value, regT1, regT0);
-
- if (Heap::isWriteBarrierEnabled()) {
- move(TrustedImmPtr(globalObject), regT2);
-
- emitWriteBarrier(globalObject, regT1, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
+ GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
+ unsigned linkCount = 0;
+ if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar && resolveType != GlobalLexicalVar)
+ linkCount++;
+ if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar
+ || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
+ && currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
+ linkCount++;
+ if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) // TDZ check.
+ linkCount++;
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionsCheck
+ linkCount++; // emitLoadWithStructureCheck
+
+ // GlobalLexicalVar
+ bool needsTDZCheck = getPutInfo.initializationMode() != Initialization;
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
+
+ // GlobalLexicalVarWithVarInjectionsCheck
+ linkCount++; // var injection check.
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
}
+ if (!linkCount)
+ return;
+ while (linkCount--)
+ linkSlowCase(iter);
- store32(regT1, registerPointer->tagPointer());
- store32(regT0, registerPointer->payloadPointer());
- map(m_bytecodeOffset + OPCODE_LENGTH(op_init_global_const), value, regT1, regT0);
+ if (resolveType == ModuleVar) {
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
+ slowPathCall.call();
+ } else
+ callOperation(operationPutToScope, currentInstruction);
}
-void JIT::emit_op_init_global_const_check(Instruction* currentInstruction)
+void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
{
- WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
- int value = currentInstruction[2].u.operand;
-
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
-
- emitLoad(value, regT1, regT0);
-
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(currentInstruction[3].u.predicatePointer)));
-
- if (Heap::isWriteBarrierEnabled()) {
- move(TrustedImmPtr(globalObject), regT2);
- emitWriteBarrier(globalObject, regT1, regT3, ShouldFilterImmediates, WriteBarrierForVariableAccess);
- }
+ int dst = currentInstruction[1].u.operand;
+ int arguments = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
- store32(regT1, registerPointer->tagPointer());
- store32(regT0, registerPointer->payloadPointer());
- unmap();
+ emitLoadPayload(arguments, regT0);
+ load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset), regT1);
+ load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset), regT0);
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
}
-void JIT::emitSlow_op_init_global_const_check(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
{
- linkSlowCase(iter);
+ int arguments = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
- JITStubCall stubCall(this, cti_op_init_global_const_check);
- stubCall.addArgument(regT1, regT0);
- stubCall.addArgument(TrustedImm32(currentInstruction[4].u.operand));
- stubCall.call();
-}
-
-void JIT::resetPatchGetById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
-{
- repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_get_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), reinterpret_cast<void*>(unusedPointer));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), 0);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), 0);
- repatchBuffer.relink(stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck), stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin));
-}
-
-void JIT::resetPatchPutById(RepatchBuffer& repatchBuffer, StructureStubInfo* stubInfo)
-{
- if (isDirectPutById(stubInfo))
- repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id_direct);
- else
- repatchBuffer.relink(stubInfo->callReturnLocation, cti_op_put_by_id);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), reinterpret_cast<void*>(unusedPointer));
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), 0);
- repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), 0);
+ emitWriteBarrier(arguments, value, ShouldFilterValue);
+
+ emitLoadPayload(arguments, regT0);
+ emitLoad(value, regT1, regT2);
+ store32(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset));
+ store32(regT2, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset));
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp b/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp
new file mode 100644
index 000000000..4e75fafc0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITRightShiftGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITRightShiftGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_rightOperand.isConstInt32()) {
+ // Try to do (intVar >> intConstant).
+ CCallHelpers::Jump notInt = jit.branchIfNotInt32(m_left);
+
+ jit.moveValueRegs(m_left, m_result);
+ int32_t shiftAmount = m_rightOperand.asConstInt32() & 0x1f;
+ if (shiftAmount) {
+ if (m_shiftType == SignedShift)
+ jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
+ else
+ jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
+#if USE(JSVALUE64)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#endif
+ }
+
+ if (jit.supportsFloatingPointTruncate()) {
+ m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.
+
+ // Try to do (doubleVar >> intConstant).
+ notInt.link(&jit);
+
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));
+
+ if (shiftAmount) {
+ if (m_shiftType == SignedShift)
+ jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
+ else
+ jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
+ }
+ jit.boxInt32(m_scratchGPR, m_result);
+
+ } else
+ m_slowPathJumpList.append(notInt);
+
+ } else {
+ // Try to do (intConstant >> intVar) or (intVar >> intVar).
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ GPRReg rightOperandGPR = m_right.payloadGPR();
+ if (rightOperandGPR == m_result.payloadGPR())
+ rightOperandGPR = m_scratchGPR;
+
+ CCallHelpers::Jump leftNotInt;
+ if (m_leftOperand.isConstInt32()) {
+ jit.move(m_right.payloadGPR(), rightOperandGPR);
+#if USE(JSVALUE32_64)
+ jit.move(m_right.tagGPR(), m_result.tagGPR());
+#endif
+ jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR());
+ } else {
+ leftNotInt = jit.branchIfNotInt32(m_left);
+ jit.move(m_right.payloadGPR(), rightOperandGPR);
+ jit.moveValueRegs(m_left, m_result);
+ }
+
+ if (m_shiftType == SignedShift)
+ jit.rshift32(rightOperandGPR, m_result.payloadGPR());
+ else
+ jit.urshift32(rightOperandGPR, m_result.payloadGPR());
+#if USE(JSVALUE64)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#endif
+ if (m_leftOperand.isConstInt32())
+ return;
+
+ if (jit.supportsFloatingPointTruncate()) {
+ m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.
+
+ // Try to do (doubleVar >> intVar).
+ leftNotInt.link(&jit);
+
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));
+
+ if (m_shiftType == SignedShift)
+ jit.rshift32(m_right.payloadGPR(), m_scratchGPR);
+ else
+ jit.urshift32(m_right.payloadGPR(), m_scratchGPR);
+ jit.boxInt32(m_scratchGPR, m_result);
+
+ } else
+ m_slowPathJumpList.append(leftNotInt);
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITRightShiftGenerator.h b/Source/JavaScriptCore/jit/JITRightShiftGenerator.h
new file mode 100644
index 000000000..a3676ec3c
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITRightShiftGenerator.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITRightShiftGenerator_h
+#define JITRightShiftGenerator_h
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITRightShiftGenerator : public JITBitBinaryOpGenerator {
+public:
+ enum ShiftType {
+ SignedShift,
+ UnsignedShift
+ };
+
+ JITRightShiftGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, GPRReg scratchGPR, FPRReg scratchFPR, ShiftType type = SignedShift)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
+ , m_shiftType(type)
+ , m_leftFPR(leftFPR)
+ , m_scratchFPR(scratchFPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+
+private:
+ ShiftType m_shiftType;
+ FPRReg m_leftFPR;
+ FPRReg m_scratchFPR;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITRightShiftGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITStubCall.h b/Source/JavaScriptCore/jit/JITStubCall.h
deleted file mode 100644
index 25755886a..000000000
--- a/Source/JavaScriptCore/jit/JITStubCall.h
+++ /dev/null
@@ -1,303 +0,0 @@
-/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubCall_h
-#define JITStubCall_h
-
-#include "MacroAssemblerCodeRef.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
- class JITStubCall {
- public:
- JITStubCall(JIT* jit, JSObject* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- , m_returnType(Cell)
-#endif
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, JSPropertyNameIterator* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- , m_returnType(Cell)
-#endif
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, void* (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- , m_returnType(VoidPtr)
-#endif
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, int (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- , m_returnType(Int)
-#endif
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, bool (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- , m_returnType(Int)
-#endif
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, void (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- , m_returnType(Void)
-#endif
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- JITStubCall(JIT* jit, EncodedJSValue (JIT_STUB *stub)(STUB_ARGS_DECLARATION))
- : m_jit(jit)
- , m_stub(stub)
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- , m_returnType(Value)
-#endif
- , m_stackIndex(JITSTACKFRAME_ARGS_INDEX)
- {
- }
-
- // Arguments are added first to last.
-
- void skipArgument()
- {
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::TrustedImm32 argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::Imm32 argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::TrustedImmPtr argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::ImmPtr argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::RegisterID argument)
- {
-#if USE(JSVALUE32_64)
- m_jit->poke(argument, m_stackIndex);
-#else
- m_jit->poke64(argument, m_stackIndex);
-#endif
- m_stackIndex += stackIndexStep;
- }
-
-#if USE(JSVALUE32_64)
- void addArgument(const JSValue& value)
- {
- m_jit->poke(JIT::Imm32(value.payload()), m_stackIndex);
- m_jit->poke(JIT::Imm32(value.tag()), m_stackIndex + 1);
- m_stackIndex += stackIndexStep;
- }
-#else
- void addArgument(JIT::TrustedImm64 argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-
- void addArgument(JIT::Imm64 argument)
- {
- m_jit->poke(argument, m_stackIndex);
- m_stackIndex += stackIndexStep;
- }
-#endif
-
- void addArgument(JIT::RegisterID tag, JIT::RegisterID payload)
- {
- m_jit->poke(payload, m_stackIndex);
- m_jit->poke(tag, m_stackIndex + 1);
- m_stackIndex += stackIndexStep;
- }
-
-#if USE(JSVALUE32_64)
- void addArgument(unsigned srcVirtualRegister)
- {
- if (m_jit->m_codeBlock->isConstantRegisterIndex(srcVirtualRegister)) {
- addArgument(m_jit->getConstantOperand(srcVirtualRegister));
- return;
- }
-
- m_jit->emitLoad(srcVirtualRegister, JIT::regT1, JIT::regT0);
- addArgument(JIT::regT1, JIT::regT0);
- }
-
- void getArgument(size_t argumentNumber, JIT::RegisterID tag, JIT::RegisterID payload)
- {
- size_t stackIndex = JITSTACKFRAME_ARGS_INDEX + (argumentNumber * stackIndexStep);
- m_jit->peek(payload, stackIndex);
- m_jit->peek(tag, stackIndex + 1);
- }
-#else
- void addArgument(unsigned src, JIT::RegisterID scratchRegister) // src is a virtual register.
- {
- if (m_jit->m_codeBlock->isConstantRegisterIndex(src))
- addArgument(JIT::Imm64(JSValue::encode(m_jit->m_codeBlock->getConstant(src))));
- else {
- m_jit->load64(JIT::Address(JIT::callFrameRegister, src * sizeof(Register)), scratchRegister);
- addArgument(scratchRegister);
- }
- m_jit->killLastResultRegister();
- }
-#endif
-
- JIT::Call call()
- {
-#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
-#endif
-
- m_jit->restoreArgumentReference();
- m_jit->updateTopCallFrame();
- JIT::Call call = m_jit->call();
- m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
-
-#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != (unsigned)-1)
- m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
-#endif
-
-#if USE(JSVALUE32_64)
- m_jit->unmap();
-#else
- m_jit->killLastResultRegister();
-#endif
- return call;
- }
-
-#if USE(JSVALUE32_64)
- JIT::Call call(unsigned dst) // dst is a virtual register.
- {
- ASSERT(m_returnType == Value || m_returnType == Cell);
- JIT::Call call = this->call();
- if (m_returnType == Value)
- m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
- else
- m_jit->emitStoreCell(dst, JIT::returnValueRegister);
- return call;
- }
-
- JIT::Call callWithValueProfiling(unsigned dst)
- {
- ASSERT(m_returnType == Value || m_returnType == Cell);
- JIT::Call call = this->call();
- ASSERT(JIT::returnValueRegister == JIT::regT0);
- if (m_returnType == Cell)
- m_jit->move(JIT::TrustedImm32(JSValue::CellTag), JIT::regT1);
- m_jit->emitValueProfilingSite();
- if (m_returnType == Value)
- m_jit->emitStore(dst, JIT::regT1, JIT::regT0);
- else
- m_jit->emitStoreCell(dst, JIT::returnValueRegister);
- return call;
- }
-#else
- JIT::Call call(unsigned dst) // dst is a virtual register.
- {
- ASSERT(m_returnType == Value || m_returnType == Cell);
- JIT::Call call = this->call();
- m_jit->emitPutVirtualRegister(dst);
- return call;
- }
-
- JIT::Call callWithValueProfiling(unsigned dst)
- {
- ASSERT(m_returnType == Value || m_returnType == Cell);
- JIT::Call call = this->call();
- ASSERT(JIT::returnValueRegister == JIT::regT0);
- m_jit->emitValueProfilingSite();
- m_jit->emitPutVirtualRegister(dst);
- return call;
- }
-#endif
-
- JIT::Call call(JIT::RegisterID dst) // dst is a machine register.
- {
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- ASSERT(m_returnType == Value || m_returnType == VoidPtr || m_returnType == Int || m_returnType == Cell);
-#endif
- JIT::Call call = this->call();
- if (dst != JIT::returnValueRegister)
- m_jit->move(JIT::returnValueRegister, dst);
- return call;
- }
-
- private:
- static const size_t stackIndexStep = sizeof(EncodedJSValue) == 2 * sizeof(void*) ? 2 : 1;
-
- JIT* m_jit;
- FunctionPtr m_stub;
-#if USE(JSVALUE32_64) || !ASSERT_DISABLED
- enum { Void, VoidPtr, Int, Value, Cell } m_returnType;
-#endif
- size_t m_stackIndex;
- };
-}
-
-#endif // ENABLE(JIT)
-
-#endif // JITStubCall_h
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.cpp b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
index 28543a8b8..74e537747 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,13 +29,18 @@
#if ENABLE(JIT)
#include "JSObject.h"
-
+#include "JSCInlines.h"
#include "SlotVisitor.h"
namespace JSC {
JITStubRoutine::~JITStubRoutine() { }
+bool JITStubRoutine::visitWeak(VM&)
+{
+ return true;
+}
+
void JITStubRoutine::observeZeroRefCount()
{
RELEASE_ASSERT(!m_refCount);
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h
index 020ef6907..db9aaa770 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +26,6 @@
#ifndef JITStubRoutine_h
#define JITStubRoutine_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
#include "ExecutableAllocator.h"
@@ -61,13 +59,14 @@ public:
// Use this if you want to pass a CodePtr to someone who insists on taking
// a RefPtr<JITStubRoutine>.
- static PassRefPtr<JITStubRoutine> createSelfManagedRoutine(
+ static Ref<JITStubRoutine> createSelfManagedRoutine(
MacroAssemblerCodePtr rawCodePointer)
{
- return adoptRef(new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer)));
+ return adoptRef(*new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer)));
}
virtual ~JITStubRoutine();
+ virtual void aboutToDie() { }
// MacroAssemblerCodeRef is copyable, but at the cost of reference
// counting churn. Returning a reference is a good way of reducing
@@ -141,6 +140,11 @@ public:
return true;
}
+
+ // Return true if you are still valid after. Return false if you are now invalid. If you return
+ // false, you will usually not do any clearing because the idea is that you will simply be
+ // destroyed.
+ virtual bool visitWeak(VM&);
protected:
virtual void observeZeroRefCount();
@@ -150,11 +154,8 @@ protected:
};
// Helper for the creation of simple stub routines that need no help from the GC.
-#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogFArguments) \
- (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogFArguments))))
-
-#define FINALIZE_CODE_FOR_DFG_STUB(patchBuffer, dataLogFArguments) \
- (adoptRef(new JITStubRoutine(FINALIZE_DFG_CODE((patchBuffer), dataLogFArguments))))
+#define FINALIZE_CODE_FOR_STUB(codeBlock, patchBuffer, dataLogFArguments) \
+ (adoptRef(new JITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments))))
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
deleted file mode 100644
index 9be418e56..000000000
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ /dev/null
@@ -1,3576 +0,0 @@
-/*
- * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
- * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
- * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#include "JITStubs.h"
-
-#include "CommonSlowPaths.h"
-#include "Arguments.h"
-#include "ArrayConstructor.h"
-#include "CallFrame.h"
-#include "CodeBlock.h"
-#include "CodeProfiling.h"
-#include "DFGOSREntry.h"
-#include "Debugger.h"
-#include "ExceptionHelpers.h"
-#include "GetterSetter.h"
-#include "Heap.h"
-#include <wtf/InlineASM.h>
-#include "JIT.h"
-#include "JITExceptions.h"
-#include "JSActivation.h"
-#include "JSArray.h"
-#include "JSFunction.h"
-#include "JSGlobalObjectFunctions.h"
-#include "JSNameScope.h"
-#include "JSNotAnObject.h"
-#include "JSPropertyNameIterator.h"
-#include "JSString.h"
-#include "JSWithScope.h"
-#include "LegacyProfiler.h"
-#include "NameInstance.h"
-#include "ObjectConstructor.h"
-#include "ObjectPrototype.h"
-#include "Operations.h"
-#include "Parser.h"
-#include "RegExpObject.h"
-#include "RegExpPrototype.h"
-#include "Register.h"
-#include "RepatchBuffer.h"
-#include "SamplingTool.h"
-#include "Strong.h"
-#include "StructureRareDataInlines.h"
-#include <wtf/StdLibExtras.h>
-#include <stdarg.h>
-#include <stdio.h>
-
-using namespace std;
-
-namespace JSC {
-
-#if USE(JSVALUE32_64)
-
-#if COMPILER(GCC) && CPU(X86)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
-
-asm (
-".text\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushl %ebp" "\n"
- "movl %esp, %ebp" "\n"
- "pushl %esi" "\n"
- "pushl %edi" "\n"
- "pushl %ebx" "\n"
- "subl $0x3c, %esp" "\n"
- "movw $0x02FF, %bx" "\n"
- "movw %bx, 0(%esp)" "\n"
- "fldcw 0(%esp)" "\n"
- "movl 0x58(%esp), %edi" "\n"
- "call *0x50(%esp)" "\n"
- "addl $0x3c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ffree %st(1)" "\n"
- "ret" "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movl %esp, %ecx" "\n"
- "call " LOCAL_REFERENCE(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addl $0x3c, %esp" "\n"
- "popl %ebx" "\n"
- "popl %edi" "\n"
- "popl %esi" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(X86_64)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 32 == 0x0, JITStackFrame_maintains_32byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x48, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x90, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x80, JITStackFrame_code_offset_matches_ctiTrampoline);
-
-asm (
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushq %rbp" "\n"
- "movq %rsp, %rbp" "\n"
- "pushq %r12" "\n"
- "pushq %r13" "\n"
- "pushq %r14" "\n"
- "pushq %r15" "\n"
- "pushq %rbx" "\n"
- "subq $0x48, %rsp" "\n"
- "movq $512, %r12" "\n"
- "movq $0xFFFF000000000000, %r14" "\n"
- "movq $0xFFFF000000000002, %r15" "\n"
- "movq 0x90(%rsp), %r13" "\n"
- "call *0x80(%rsp)" "\n"
- "addq $0x48, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movq %rsp, %rdi" "\n"
- "call " LOCAL_REFERENCE(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addq $0x48, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-#elif (COMPILER(GCC) || COMPILER(RVCT)) && CPU(ARM_THUMB2)
-
-#define THUNK_RETURN_ADDRESS_OFFSET 0x38
-#define PRESERVED_RETURN_ADDRESS_OFFSET 0x3C
-#define PRESERVED_R4_OFFSET 0x40
-#define PRESERVED_R5_OFFSET 0x44
-#define PRESERVED_R6_OFFSET 0x48
-#define PRESERVED_R7_OFFSET 0x4C
-#define PRESERVED_R8_OFFSET 0x50
-#define PRESERVED_R9_OFFSET 0x54
-#define PRESERVED_R10_OFFSET 0x58
-#define PRESERVED_R11_OFFSET 0x5C
-#define REGISTER_FILE_OFFSET 0x60
-#define FIRST_STACK_ARGUMENT 0x68
-
-#elif (COMPILER(GCC) || COMPILER(MSVC) || COMPILER(RVCT)) && CPU(ARM_TRADITIONAL)
-
-// Also update the MSVC section (defined at DEFINE_STUB_FUNCTION)
-// when changing one of the following values.
-#define THUNK_RETURN_ADDRESS_OFFSET 64
-#define PRESERVEDR4_OFFSET 68
-
-#elif COMPILER(MSVC) && CPU(X86)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedEBX) == 0x3c, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x50, JITStackFrame_code_offset_matches_ctiTrampoline);
-
-extern "C" {
-
- __declspec(naked) EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*)
- {
- __asm {
- push ebp;
- mov ebp, esp;
- push esi;
- push edi;
- push ebx;
- sub esp, 0x3c;
- mov ecx, esp;
- mov edi, [esp + 0x58];
- call [esp + 0x50];
- add esp, 0x3c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiVMThrowTrampoline()
- {
- __asm {
- mov ecx, esp;
- call cti_vm_throw;
- add esp, 0x3c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void ctiOpThrowNotCaught()
- {
- __asm {
- add esp, 0x3c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-}
-
-#elif CPU(MIPS)
-
-#define PRESERVED_GP_OFFSET 60
-#define PRESERVED_S0_OFFSET 64
-#define PRESERVED_S1_OFFSET 68
-#define PRESERVED_S2_OFFSET 72
-#define PRESERVED_S3_OFFSET 76
-#define PRESERVED_S4_OFFSET 80
-#define PRESERVED_RETURN_ADDRESS_OFFSET 84
-#define THUNK_RETURN_ADDRESS_OFFSET 88
-#define REGISTER_FILE_OFFSET 92
-#define GLOBAL_DATA_OFFSET 108
-#define STACK_LENGTH 112
-
-#elif CPU(SH4)
-#define SYMBOL_STRING(name) #name
-/* code (r4), JSStack* (r5), CallFrame* (r6), void* unused1 (r7), void* unused2(sp), VM (sp)*/
-
-#define THUNK_RETURN_ADDRESS_OFFSET 56
-#define SAVED_R8_OFFSET 60
-
-asm volatile (
-".text\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "mov.l r7, @-r15" "\n"
- "mov.l r6, @-r15" "\n"
- "mov.l r5, @-r15" "\n"
- "mov.l r14, @-r15" "\n"
- "sts.l pr, @-r15" "\n"
- "mov.l r13, @-r15" "\n"
- "mov.l r11, @-r15" "\n"
- "mov.l r10, @-r15" "\n"
- "mov.l r9, @-r15" "\n"
- "mov.l r8, @-r15" "\n"
- "add #-" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
- "mov r6, r14" "\n"
- "jsr @r4" "\n"
- "nop" "\n"
- "add #" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
- "mov.l @r15+,r8" "\n"
- "mov.l @r15+,r9" "\n"
- "mov.l @r15+,r10" "\n"
- "mov.l @r15+,r11" "\n"
- "mov.l @r15+,r13" "\n"
- "lds.l @r15+,pr" "\n"
- "mov.l @r15+,r14" "\n"
- "add #12, r15" "\n"
- "rts" "\n"
- "nop" "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm volatile (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "mov.l .L2" SYMBOL_STRING(cti_vm_throw) ",r0" "\n"
- "mov r15, r4" "\n"
- "mov.l @(r0,r12),r11" "\n"
- "jsr @r11" "\n"
- "nop" "\n"
- "add #" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
- "mov.l @r15+,r8" "\n"
- "mov.l @r15+,r9" "\n"
- "mov.l @r15+,r10" "\n"
- "mov.l @r15+,r11" "\n"
- "mov.l @r15+,r13" "\n"
- "lds.l @r15+,pr" "\n"
- "mov.l @r15+,r14" "\n"
- "add #12, r15" "\n"
- "rts" "\n"
- "nop" "\n"
- ".align 2" "\n"
- ".L2" SYMBOL_STRING(cti_vm_throw) ":.long " SYMBOL_STRING(cti_vm_throw) "@GOT \n"
-);
-
-asm volatile (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "add #" STRINGIZE_VALUE_OF(SAVED_R8_OFFSET) ", r15" "\n"
- "mov.l @r15+,r8" "\n"
- "mov.l @r15+,r9" "\n"
- "mov.l @r15+,r10" "\n"
- "mov.l @r15+,r11" "\n"
- "mov.l @r15+,r13" "\n"
- "lds.l @r15+,pr" "\n"
- "mov.l @r15+,r14" "\n"
- "add #12, r15" "\n"
- "rts" "\n"
- "nop" "\n"
-);
-#else
- #error "JIT not supported on this platform."
-#endif
-
-#else // USE(JSVALUE32_64)
-
-#if COMPILER(GCC) && CPU(X86_64) && !OS(WINDOWS)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, callFrame) == 0x58, JITStackFrame_callFrame_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) == 0x48, JITStackFrame_code_offset_matches_ctiTrampoline);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x78, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-
-asm (
-".text\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "pushq %rbp" "\n"
- "movq %rsp, %rbp" "\n"
- "pushq %r12" "\n"
- "pushq %r13" "\n"
- "pushq %r14" "\n"
- "pushq %r15" "\n"
- "pushq %rbx" "\n"
- // Form the JIT stubs area
- "pushq %r9" "\n"
- "pushq %r8" "\n"
- "pushq %rcx" "\n"
- "pushq %rdx" "\n"
- "pushq %rsi" "\n"
- "pushq %rdi" "\n"
- "subq $0x48, %rsp" "\n"
- "movq $512, %r12" "\n"
- "movq $0xFFFF000000000000, %r14" "\n"
- "movq $0xFFFF000000000002, %r15" "\n"
- "movq %rdx, %r13" "\n"
- "call *%rdi" "\n"
- "addq $0x78, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movq %rsp, %rdi" "\n"
- "call " LOCAL_REFERENCE(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addq $0x78, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(X86_64) && OS(WINDOWS)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines below to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x58, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-
-asm (
-".text\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- // Dump register parameters to their home address
- "movq %r9, 0x20(%rsp)" "\n"
- "movq %r8, 0x18(%rsp)" "\n"
- "movq %rdx, 0x10(%rsp)" "\n"
- "movq %rcx, 0x8(%rsp)" "\n"
-
- "pushq %rbp" "\n"
- "movq %rsp, %rbp" "\n"
- "pushq %r12" "\n"
- "pushq %r13" "\n"
- "pushq %r14" "\n"
- "pushq %r15" "\n"
- "pushq %rbx" "\n"
-
- // Decrease rsp to point to the start of our JITStackFrame
- "subq $0x58, %rsp" "\n"
- "movq $512, %r12" "\n"
- "movq $0xFFFF000000000000, %r14" "\n"
- "movq $0xFFFF000000000002, %r15" "\n"
- "movq %r8, %r13" "\n"
- "call *%rcx" "\n"
- "addq $0x58, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "movq %rsp, %rcx" "\n"
- "call " LOCAL_REFERENCE(cti_vm_throw) "\n"
- "int3" "\n"
-);
-
-asm (
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "addq $0x58, %rsp" "\n"
- "popq %rbx" "\n"
- "popq %r15" "\n"
- "popq %r14" "\n"
- "popq %r13" "\n"
- "popq %r12" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-
-#elif COMPILER(MSVC) && CPU(X86_64)
-
-// These ASSERTs remind you that, if you change the layout of JITStackFrame, you
-// need to change the assembly trampolines in JITStubsMSVC64.asm to match.
-COMPILE_ASSERT(offsetof(struct JITStackFrame, code) % 16 == 0x0, JITStackFrame_maintains_16byte_stack_alignment);
-COMPILE_ASSERT(offsetof(struct JITStackFrame, savedRBX) == 0x58, JITStackFrame_stub_argument_space_matches_ctiTrampoline);
-
-#else
- #error "JIT not supported on this platform."
-#endif
-
-#endif // USE(JSVALUE32_64)
-
-#if CPU(MIPS)
-asm (
-".text" "\n"
-".align 2" "\n"
-".set noreorder" "\n"
-".set nomacro" "\n"
-".set nomips16" "\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-".ent " SYMBOL_STRING(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "addiu $29,$29,-" STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
- "sw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "sw $20," STRINGIZE_VALUE_OF(PRESERVED_S4_OFFSET) "($29)" "\n"
- "sw $19," STRINGIZE_VALUE_OF(PRESERVED_S3_OFFSET) "($29)" "\n"
- "sw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "sw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "sw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
-#if WTF_MIPS_PIC
- "sw $28," STRINGIZE_VALUE_OF(PRESERVED_GP_OFFSET) "($29)" "\n"
-#endif
- "move $16,$6 # set callFrameRegister" "\n"
- "move $25,$4 # move executableAddress to t9" "\n"
- "sw $5," STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "($29) # store JSStack to current stack" "\n"
- "lw $9," STRINGIZE_VALUE_OF(STACK_LENGTH + 20) "($29) # load vm from previous stack" "\n"
- "jalr $25" "\n"
- "sw $9," STRINGIZE_VALUE_OF(GLOBAL_DATA_OFFSET) "($29) # store vm to current stack" "\n"
- "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
- "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "lw $19," STRINGIZE_VALUE_OF(PRESERVED_S3_OFFSET) "($29)" "\n"
- "lw $20," STRINGIZE_VALUE_OF(PRESERVED_S4_OFFSET) "($29)" "\n"
- "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "jr $31" "\n"
- "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
-".set reorder" "\n"
-".set macro" "\n"
-".end " SYMBOL_STRING(ctiTrampoline) "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".set noreorder" "\n"
-".set nomacro" "\n"
-".set nomips16" "\n"
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-".ent " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
-#if WTF_MIPS_PIC
-".set macro" "\n"
-".cpload $31" "\n"
- "la $25," SYMBOL_STRING(cti_vm_throw) "\n"
-".set nomacro" "\n"
- "bal " SYMBOL_STRING(cti_vm_throw) "\n"
- "move $4,$29" "\n"
-#else
- "jal " SYMBOL_STRING(cti_vm_throw) "\n"
- "move $4,$29" "\n"
-#endif
- "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
- "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "lw $19," STRINGIZE_VALUE_OF(PRESERVED_S3_OFFSET) "($29)" "\n"
- "lw $20," STRINGIZE_VALUE_OF(PRESERVED_S4_OFFSET) "($29)" "\n"
- "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "jr $31" "\n"
- "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
-".set reorder" "\n"
-".set macro" "\n"
-".end " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".set noreorder" "\n"
-".set nomacro" "\n"
-".set nomips16" "\n"
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-".ent " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "lw $16," STRINGIZE_VALUE_OF(PRESERVED_S0_OFFSET) "($29)" "\n"
- "lw $17," STRINGIZE_VALUE_OF(PRESERVED_S1_OFFSET) "($29)" "\n"
- "lw $18," STRINGIZE_VALUE_OF(PRESERVED_S2_OFFSET) "($29)" "\n"
- "lw $19," STRINGIZE_VALUE_OF(PRESERVED_S3_OFFSET) "($29)" "\n"
- "lw $20," STRINGIZE_VALUE_OF(PRESERVED_S4_OFFSET) "($29)" "\n"
- "lw $31," STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "($29)" "\n"
- "jr $31" "\n"
- "addiu $29,$29," STRINGIZE_VALUE_OF(STACK_LENGTH) "\n"
-".set reorder" "\n"
-".set macro" "\n"
-".end " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-);
-#endif
-
-#if COMPILER(GCC) && CPU(ARM_THUMB2)
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiTrampoline) "\n"
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "sub sp, sp, #" STRINGIZE_VALUE_OF(FIRST_STACK_ARGUMENT) "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "str r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "str r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "str r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "str r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
- "str r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
- "str r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
- "str r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
- "str r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
- "str r1, [sp, #" STRINGIZE_VALUE_OF(REGISTER_FILE_OFFSET) "]" "\n"
- "mov r5, r2" "\n"
- "blx r0" "\n"
- "ldr r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
- "ldr r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
- "ldr r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
- "ldr r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
- "ldr r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
- "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(FIRST_STACK_ARGUMENT) "\n"
- "bx lr" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiVMThrowTrampoline) "\n"
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "mov r0, sp" "\n"
- "bl " LOCAL_REFERENCE(cti_vm_throw) "\n"
- "ldr r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
- "ldr r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
- "ldr r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
- "ldr r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
- "ldr r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
- "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(FIRST_STACK_ARGUMENT) "\n"
- "bx lr" "\n"
-);
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiOpThrowNotCaught) "\n"
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "ldr r11, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R11_OFFSET) "]" "\n"
- "ldr r10, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R10_OFFSET) "]" "\n"
- "ldr r9, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R9_OFFSET) "]" "\n"
- "ldr r8, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R8_OFFSET) "]" "\n"
- "ldr r7, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R7_OFFSET) "]" "\n"
- "ldr r6, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R6_OFFSET) "]" "\n"
- "ldr r5, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R5_OFFSET) "]" "\n"
- "ldr r4, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_R4_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PRESERVED_RETURN_ADDRESS_OFFSET) "]" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(FIRST_STACK_ARGUMENT) "\n"
- "bx lr" "\n"
-);
-
-#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
-
-asm (
-".text" "\n"
-".globl " SYMBOL_STRING(ctiTrampoline) "\n"
-HIDE_SYMBOL(ctiTrampoline) "\n"
-INLINE_ARM_FUNCTION(ctiTrampoline)
-SYMBOL_STRING(ctiTrampoline) ":" "\n"
- "stmdb sp!, {r1-r3}" "\n"
- "stmdb sp!, {r4-r6, r8-r11, lr}" "\n"
- "sub sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "mov r5, r2" "\n"
- // r0 contains the code
- "blx r0" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "ldmia sp!, {r4-r6, r8-r11, lr}" "\n"
- "add sp, sp, #12" "\n"
- "bx lr" "\n"
-".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n"
-HIDE_SYMBOL(ctiTrampolineEnd) "\n"
-SYMBOL_STRING(ctiTrampolineEnd) ":" "\n"
-);
-
-asm (
-".text" "\n"
-".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n"
-HIDE_SYMBOL(ctiVMThrowTrampoline) "\n"
-INLINE_ARM_FUNCTION(ctiVMThrowTrampoline)
-SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n"
- "mov r0, sp" "\n"
- "bl " SYMBOL_STRING(cti_vm_throw) "\n"
-
-// Both has the same return sequence
-".text" "\n"
-".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n"
-HIDE_SYMBOL(ctiOpThrowNotCaught) "\n"
-INLINE_ARM_FUNCTION(ctiOpThrowNotCaught)
-SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n"
- "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n"
- "ldmia sp!, {r4-r6, r8-r11, lr}" "\n"
- "add sp, sp, #12" "\n"
- "bx lr" "\n"
-);
-
-#elif COMPILER(RVCT) && CPU(ARM_THUMB2)
-
-__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*)
-{
- PRESERVE8
- sub sp, sp, # FIRST_STACK_ARGUMENT
- str lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
- str r4, [sp, # PRESERVED_R4_OFFSET ]
- str r5, [sp, # PRESERVED_R5_OFFSET ]
- str r6, [sp, # PRESERVED_R6_OFFSET ]
- str r7, [sp, # PRESERVED_R7_OFFSET ]
- str r8, [sp, # PRESERVED_R8_OFFSET ]
- str r9, [sp, # PRESERVED_R9_OFFSET ]
- str r10, [sp, # PRESERVED_R10_OFFSET ]
- str r11, [sp, # PRESERVED_R11_OFFSET ]
- str r1, [sp, # REGISTER_FILE_OFFSET ]
- mov r5, r2
- blx r0
- ldr r11, [sp, # PRESERVED_R11_OFFSET ]
- ldr r10, [sp, # PRESERVED_R10_OFFSET ]
- ldr r9, [sp, # PRESERVED_R9_OFFSET ]
- ldr r8, [sp, # PRESERVED_R8_OFFSET ]
- ldr r7, [sp, # PRESERVED_R7_OFFSET ]
- ldr r6, [sp, # PRESERVED_R6_OFFSET ]
- ldr r5, [sp, # PRESERVED_R5_OFFSET ]
- ldr r4, [sp, # PRESERVED_R4_OFFSET ]
- ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
- add sp, sp, # FIRST_STACK_ARGUMENT
- bx lr
-}
-
-__asm void ctiVMThrowTrampoline()
-{
- PRESERVE8
- mov r0, sp
- bl cti_vm_throw
- ldr r11, [sp, # PRESERVED_R11_OFFSET ]
- ldr r10, [sp, # PRESERVED_R10_OFFSET ]
- ldr r9, [sp, # PRESERVED_R9_OFFSET ]
- ldr r8, [sp, # PRESERVED_R8_OFFSET ]
- ldr r7, [sp, # PRESERVED_R7_OFFSET ]
- ldr r6, [sp, # PRESERVED_R6_OFFSET ]
- ldr r6, [sp, # PRESERVED_R6_OFFSET ]
- ldr r5, [sp, # PRESERVED_R5_OFFSET ]
- ldr r4, [sp, # PRESERVED_R4_OFFSET ]
- ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
- add sp, sp, # FIRST_STACK_ARGUMENT
- bx lr
-}
-
-__asm void ctiOpThrowNotCaught()
-{
- PRESERVE8
- ldr r11, [sp, # PRESERVED_R11_OFFSET ]
- ldr r10, [sp, # PRESERVED_R10_OFFSET ]
- ldr r9, [sp, # PRESERVED_R9_OFFSET ]
- ldr r8, [sp, # PRESERVED_R8_OFFSET ]
- ldr r7, [sp, # PRESERVED_R7_OFFSET ]
- ldr r6, [sp, # PRESERVED_R6_OFFSET ]
- ldr r6, [sp, # PRESERVED_R6_OFFSET ]
- ldr r5, [sp, # PRESERVED_R5_OFFSET ]
- ldr r4, [sp, # PRESERVED_R4_OFFSET ]
- ldr lr, [sp, # PRESERVED_RETURN_ADDRESS_OFFSET ]
- add sp, sp, # FIRST_STACK_ARGUMENT
- bx lr
-}
-
-#elif COMPILER(RVCT) && CPU(ARM_TRADITIONAL)
-
-__asm EncodedJSValue ctiTrampoline(void*, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*)
-{
- ARM
- stmdb sp!, {r1-r3}
- stmdb sp!, {r4-r6, r8-r11, lr}
- sub sp, sp, # PRESERVEDR4_OFFSET
- mov r5, r2
- mov lr, pc
- bx r0
- add sp, sp, # PRESERVEDR4_OFFSET
- ldmia sp!, {r4-r6, r8-r11, lr}
- add sp, sp, #12
- bx lr
-}
-__asm void ctiTrampolineEnd()
-{
-}
-
-__asm void ctiVMThrowTrampoline()
-{
- ARM
- PRESERVE8
- mov r0, sp
- bl cti_vm_throw
- add sp, sp, # PRESERVEDR4_OFFSET
- ldmia sp!, {r4-r6, r8-r11, lr}
- add sp, sp, #12
- bx lr
-}
-
-__asm void ctiOpThrowNotCaught()
-{
- ARM
- add sp, sp, # PRESERVEDR4_OFFSET
- ldmia sp!, {r4-r8, lr}
- add sp, sp, #12
- bx lr
-}
-#endif
-
-#if ENABLE(OPCODE_SAMPLING)
- #define CTI_SAMPLER stackFrame.vm->interpreter->sampler()
-#else
- #define CTI_SAMPLER 0
-#endif
-
-void performPlatformSpecificJITAssertions(VM* vm)
-{
- if (!vm->canUseJIT())
- return;
-
-#if CPU(ARM_THUMB2)
- // Unfortunate the arm compiler does not like the use of offsetof on JITStackFrame (since it contains non POD types),
- // and the OBJECT_OFFSETOF macro does not appear constantish enough for it to be happy with its use in COMPILE_ASSERT
- // macros.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVED_R4_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR5) == PRESERVED_R5_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR6) == PRESERVED_R6_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR7) == PRESERVED_R7_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR8) == PRESERVED_R8_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR9) == PRESERVED_R9_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR10) == PRESERVED_R10_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR11) == PRESERVED_R11_OFFSET);
-
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, stack) == REGISTER_FILE_OFFSET);
- // The fifth argument is the first item already on the stack.
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, unused1) == FIRST_STACK_ARGUMENT);
-
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
-
-#elif CPU(ARM_TRADITIONAL)
-
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedR4) == PRESERVEDR4_OFFSET);
-
-
-#elif CPU(MIPS)
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedGP) == PRESERVED_GP_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS0) == PRESERVED_S0_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS1) == PRESERVED_S1_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedS2) == PRESERVED_S2_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, preservedReturnAddress) == PRESERVED_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, stack) == REGISTER_FILE_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, vm) == GLOBAL_DATA_OFFSET);
-
-#elif CPU(SH4)
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, thunkReturnAddress) == THUNK_RETURN_ADDRESS_OFFSET);
- ASSERT(OBJECT_OFFSETOF(struct JITStackFrame, savedR8) == SAVED_R8_OFFSET);
-#endif
-}
-
-NEVER_INLINE static void tryCachePutByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const PutPropertySlot& slot, StructureStubInfo* stubInfo, bool direct)
-{
- // The interpreter checks for recursion here; I do not believe this can occur in CTI.
-
- if (!baseValue.isCell())
- return;
-
- // Uncacheable: give up.
- if (!slot.isCacheable()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
-
- if (structure->isUncacheableDictionary() || structure->typeInfo().prohibitsPropertyCaching()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- // If baseCell != base, then baseCell must be a proxy for another object.
- if (baseCell != slot.base()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- // Cache hit: Specialize instruction and ref Structures.
-
- // Structure transition, cache transition info
- if (slot.type() == PutPropertySlot::NewProperty) {
- if (structure->isDictionary()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- // put_by_id_transition checks the prototype chain for setters.
- if (normalizePrototypeChain(callFrame, baseCell) == InvalidPrototypeChain) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic));
- return;
- }
-
- StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated());
- stubInfo->initPutByIdTransition(callFrame->vm(), codeBlock->ownerExecutable(), structure->previousID(), structure, prototypeChain, direct);
- JIT::compilePutByIdTransition(callFrame->scope()->vm(), codeBlock, stubInfo, structure->previousID(), structure, slot.cachedOffset(), prototypeChain, returnAddress, direct);
- return;
- }
-
- stubInfo->initPutByIdReplace(callFrame->vm(), codeBlock->ownerExecutable(), structure);
-
- JIT::patchPutByIdReplace(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress, direct);
-}
-
-NEVER_INLINE static void tryCacheGetByID(CallFrame* callFrame, CodeBlock* codeBlock, ReturnAddressPtr returnAddress, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo* stubInfo)
-{
- // FIXME: Write a test that proves we need to check for recursion here just
- // like the interpreter does, then add a check for recursion.
-
- // FIXME: Cache property access for immediates.
- if (!baseValue.isCell()) {
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- VM* vm = &callFrame->vm();
-
- if (isJSArray(baseValue) && propertyName == callFrame->propertyNames().length) {
- JIT::compilePatchGetArrayLength(callFrame->scope()->vm(), codeBlock, returnAddress);
- return;
- }
-
- if (isJSString(baseValue) && propertyName == callFrame->propertyNames().length) {
- // The tradeoff of compiling an patched inline string length access routine does not seem
- // to pay off, so we currently only do this for arrays.
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, vm->getCTIStub(stringLengthTrampolineGenerator).code());
- return;
- }
-
- // Uncacheable: give up.
- if (!slot.isCacheable()) {
- stubInfo->accessType = access_get_by_id_generic;
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
-
- if (structure->isUncacheableDictionary() || structure->typeInfo().prohibitsPropertyCaching()) {
- stubInfo->accessType = access_get_by_id_generic;
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- // Cache hit: Specialize instruction and ref Structures.
-
- if (slot.slotBase() == baseValue) {
- RELEASE_ASSERT(stubInfo->accessType == access_unset);
- if ((slot.cachedPropertyType() != PropertySlot::Value)
- || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset())))
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_self_fail));
- else {
- JIT::patchGetByIdSelf(codeBlock, stubInfo, structure, slot.cachedOffset(), returnAddress);
- stubInfo->initGetByIdSelf(callFrame->vm(), codeBlock->ownerExecutable(), structure);
- }
- return;
- }
-
- if (structure->isDictionary()) {
- stubInfo->accessType = access_get_by_id_generic;
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- if (slot.slotBase() == structure->prototypeForLookup(callFrame)) {
- ASSERT(slot.slotBase().isObject());
-
- JSObject* slotBaseObject = asObject(slot.slotBase());
- size_t offset = slot.cachedOffset();
-
- if (structure->typeInfo().hasImpureGetOwnPropertySlot()) {
- stubInfo->accessType = access_get_by_id_generic;
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- // Since we're accessing a prototype in a loop, it's a good bet that it
- // should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary()) {
- slotBaseObject->flattenDictionaryObject(callFrame->vm());
- offset = slotBaseObject->structure()->get(callFrame->vm(), propertyName);
- }
-
- stubInfo->initGetByIdProto(callFrame->vm(), codeBlock->ownerExecutable(), structure, slotBaseObject->structure(), slot.cachedPropertyType() == PropertySlot::Value);
-
- ASSERT(!structure->isDictionary());
- ASSERT(!slotBaseObject->structure()->isDictionary());
- JIT::compileGetByIdProto(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, structure, slotBaseObject->structure(), propertyName, slot, offset, returnAddress);
- return;
- }
-
- PropertyOffset offset = slot.cachedOffset();
- size_t count = normalizePrototypeChainForChainAccess(callFrame, baseValue, slot.slotBase(), propertyName, offset);
- if (count == InvalidPrototypeChain) {
- stubInfo->accessType = access_get_by_id_generic;
- ctiPatchCallByReturnAddress(codeBlock, returnAddress, FunctionPtr(cti_op_get_by_id_generic));
- return;
- }
-
- StructureChain* prototypeChain = structure->prototypeChain(callFrame);
- stubInfo->initGetByIdChain(callFrame->vm(), codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.cachedPropertyType() == PropertySlot::Value);
- JIT::compileGetByIdChain(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, structure, prototypeChain, count, propertyName, slot, offset, returnAddress);
-}
-
-#if !defined(NDEBUG)
-
-extern "C" {
-
-static void jscGeneratedNativeCode()
-{
- // When executing a JIT stub function (which might do an allocation), we hack the return address
- // to pretend to be executing this function, to keep stack logging tools from blowing out
- // memory.
-}
-
-}
-
-struct StackHack {
- ALWAYS_INLINE StackHack(JITStackFrame& stackFrame)
- : stackFrame(stackFrame)
- , savedReturnAddress(*stackFrame.returnAddressSlot())
- {
- if (!CodeProfiling::enabled())
- *stackFrame.returnAddressSlot() = ReturnAddressPtr(FunctionPtr(jscGeneratedNativeCode));
- }
-
- ALWAYS_INLINE ~StackHack()
- {
- *stackFrame.returnAddressSlot() = savedReturnAddress;
- }
-
- JITStackFrame& stackFrame;
- ReturnAddressPtr savedReturnAddress;
-};
-
-#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS); StackHack stackHack(stackFrame)
-#define STUB_SET_RETURN_ADDRESS(returnAddress) stackHack.savedReturnAddress = ReturnAddressPtr(returnAddress)
-#define STUB_RETURN_ADDRESS stackHack.savedReturnAddress
-
-#else
-
-#define STUB_INIT_STACK_FRAME(stackFrame) JITStackFrame& stackFrame = *reinterpret_cast_ptr<JITStackFrame*>(STUB_ARGS)
-#define STUB_SET_RETURN_ADDRESS(returnAddress) *stackFrame.returnAddressSlot() = ReturnAddressPtr(returnAddress)
-#define STUB_RETURN_ADDRESS *stackFrame.returnAddressSlot()
-
-#endif
-
-// The reason this is not inlined is to avoid having to do a PIC branch
-// to get the address of the ctiVMThrowTrampoline function. It's also
-// good to keep the code size down by leaving as much of the exception
-// handling code out of line as possible.
-static NEVER_INLINE void returnToThrowTrampoline(VM* vm, ReturnAddressPtr exceptionLocation, ReturnAddressPtr& returnAddressSlot)
-{
- RELEASE_ASSERT(vm->exception);
- vm->exceptionLocation = exceptionLocation;
- returnAddressSlot = ReturnAddressPtr(FunctionPtr(ctiVMThrowTrampoline));
-}
-
-#define VM_THROW_EXCEPTION() \
- do { \
- VM_THROW_EXCEPTION_AT_END(); \
- return 0; \
- } while (0)
-#define VM_THROW_EXCEPTION_AT_END() \
- do {\
- returnToThrowTrampoline(stackFrame.vm, STUB_RETURN_ADDRESS, STUB_RETURN_ADDRESS);\
- } while (0)
-
-#define CHECK_FOR_EXCEPTION() \
- do { \
- if (UNLIKELY(stackFrame.vm->exception)) \
- VM_THROW_EXCEPTION(); \
- } while (0)
-#define CHECK_FOR_EXCEPTION_AT_END() \
- do { \
- if (UNLIKELY(stackFrame.vm->exception)) \
- VM_THROW_EXCEPTION_AT_END(); \
- } while (0)
-#define CHECK_FOR_EXCEPTION_VOID() \
- do { \
- if (UNLIKELY(stackFrame.vm->exception)) { \
- VM_THROW_EXCEPTION_AT_END(); \
- return; \
- } \
- } while (0)
-
-class ErrorFunctor {
-public:
- virtual ~ErrorFunctor() { }
- virtual JSValue operator()(ExecState*) = 0;
-};
-
-class ErrorWithExecFunctor : public ErrorFunctor {
-public:
- typedef JSObject* (*Factory)(ExecState* exec);
-
- ErrorWithExecFunctor(Factory factory)
- : m_factory(factory)
- {
- }
- JSValue operator()(ExecState* exec)
- {
- return m_factory(exec);
- }
-
-private:
- Factory m_factory;
-};
-
-class ErrorWithExecAndCalleeFunctor : public ErrorFunctor {
-public:
- typedef JSObject* (*Factory)(ExecState* exec, JSValue callee);
-
- ErrorWithExecAndCalleeFunctor(Factory factory, JSValue callee)
- : m_factory(factory), m_callee(callee)
- {
- }
- JSValue operator()(ExecState* exec)
- {
- return m_factory(exec, m_callee);
- }
-private:
- Factory m_factory;
- JSValue m_callee;
-};
-
-class ErrorWithExceptionFunctor : public ErrorFunctor {
- public:
- ErrorWithExceptionFunctor(JSValue exception)
- : m_exception(exception)
- {
- }
- JSValue operator()(ExecState*)
- {
- return m_exception;
- }
-
-private:
- JSValue m_exception;
-};
-
-// Helper function for JIT stubs that may throw an exception in the middle of
-// processing a function call. This function rolls back the stack to
-// our caller, so exception processing can proceed from a valid state.
-template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot, ErrorFunctor& createError )
-{
- CallFrame* callFrame = newCallFrame->callerFrame();
- jitStackFrame.callFrame = callFrame;
- callFrame->vm().topCallFrame = callFrame;
- callFrame->vm().exception = createError(callFrame);
- ASSERT(callFrame->vm().exception);
- returnToThrowTrampoline(&callFrame->vm(), ReturnAddressPtr(newCallFrame->returnPC()), returnAddressSlot);
- return T();
-}
-
-template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFrame, CallFrame* newCallFrame, ReturnAddressPtr& returnAddressSlot)
-{
- CallFrame* callFrame = newCallFrame->callerFrame();
- ASSERT(callFrame->vm().exception);
- ErrorWithExceptionFunctor functor = ErrorWithExceptionFunctor(callFrame->vm().exception);
- return throwExceptionFromOpCall<T>(jitStackFrame, newCallFrame, returnAddressSlot, functor);
-}
-
-#if CPU(ARM_THUMB2) && COMPILER(GCC)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- HIDE_SYMBOL(cti_##op) "\n" \
- ".thumb" "\n" \
- ".thumb_func " THUMB_FUNC_PARAM(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "bx lr" "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) \
-
-#elif CPU(MIPS)
-#if WTF_MIPS_PIC
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".set noreorder" "\n" \
- ".set nomacro" "\n" \
- ".set nomips16" "\n" \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- ".ent " SYMBOL_STRING(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- ".set macro" "\n" \
- ".cpload $25" "\n" \
- "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- "la $25," SYMBOL_STRING(JITStubThunked_##op) "\n" \
- ".set nomacro" "\n" \
- ".reloc 1f,R_MIPS_JALR," SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "1: jalr $25" "\n" \
- "nop" "\n" \
- "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- "jr $31" "\n" \
- "nop" "\n" \
- ".set reorder" "\n" \
- ".set macro" "\n" \
- ".end " SYMBOL_STRING(cti_##op) "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-#else // WTF_MIPS_PIC
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".set noreorder" "\n" \
- ".set nomacro" "\n" \
- ".set nomips16" "\n" \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- ".ent " SYMBOL_STRING(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "sw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- "jal " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "nop" "\n" \
- "lw $31," STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "($29)" "\n" \
- "jr $31" "\n" \
- "nop" "\n" \
- ".set reorder" "\n" \
- ".set macro" "\n" \
- ".end " SYMBOL_STRING(cti_##op) "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-#endif
-
-#elif CPU(ARM_TRADITIONAL) && COMPILER(GCC)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm ( \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- INLINE_ARM_FUNCTION(cti_##op) \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \
- "bx lr" "\n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-#elif (CPU(ARM_THUMB2) || CPU(ARM_TRADITIONAL)) && COMPILER(RVCT)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-/* The following is a workaround for RVCT toolchain; precompiler macros are not expanded before the code is passed to the assembler */
-
-/* The following section is a template to generate code for GeneratedJITStubs_RVCT.h */
-/* The pattern "#xxx#" will be replaced with "xxx" */
-
-/*
-RVCT(extern "C" #rtype# JITStubThunked_#op#(STUB_ARGS_DECLARATION);)
-RVCT(__asm #rtype# cti_#op#(STUB_ARGS_DECLARATION))
-RVCT({)
-RVCT( PRESERVE8)
-RVCT( IMPORT JITStubThunked_#op#)
-RVCT( str lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
-RVCT( bl JITStubThunked_#op#)
-RVCT( ldr lr, [sp, # THUNK_RETURN_ADDRESS_OFFSET])
-RVCT( bx lr)
-RVCT(})
-RVCT()
-*/
-
-/* Include the generated file */
-#include "GeneratedJITStubs_RVCT.h"
-
-#elif CPU(ARM_TRADITIONAL) && COMPILER(MSVC)
-
-#define DEFINE_STUB_FUNCTION(rtype, op) extern "C" rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-
-/* The following is a workaround for MSVC toolchain; inline assembler is not supported */
-
-/* The following section is a template to generate code for GeneratedJITStubs_MSVC.asm */
-/* The pattern "#xxx#" will be replaced with "xxx" */
-
-/*
-MSVC_BEGIN( AREA Trampoline, CODE)
-MSVC_BEGIN()
-MSVC_BEGIN( EXPORT ctiTrampoline)
-MSVC_BEGIN( EXPORT ctiTrampolineEnd)
-MSVC_BEGIN( EXPORT ctiVMThrowTrampoline)
-MSVC_BEGIN( EXPORT ctiOpThrowNotCaught)
-MSVC_BEGIN()
-MSVC_BEGIN(ctiTrampoline PROC)
-MSVC_BEGIN( stmdb sp!, {r1-r3})
-MSVC_BEGIN( stmdb sp!, {r4-r6, r8-r11, lr})
-MSVC_BEGIN( sub sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
-MSVC_BEGIN( mov r5, r2)
-MSVC_BEGIN( ; r0 contains the code)
-MSVC_BEGIN( mov lr, pc)
-MSVC_BEGIN( bx r0)
-MSVC_BEGIN( add sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
-MSVC_BEGIN( ldmia sp!, {r4-r6, r8-r11, lr})
-MSVC_BEGIN( add sp, sp, #12)
-MSVC_BEGIN( bx lr)
-MSVC_BEGIN(ctiTrampolineEnd)
-MSVC_BEGIN(ctiTrampoline ENDP)
-MSVC_BEGIN()
-MSVC_BEGIN(ctiVMThrowTrampoline PROC)
-MSVC_BEGIN( mov r0, sp)
-MSVC_BEGIN( bl cti_vm_throw)
-MSVC_BEGIN(ctiOpThrowNotCaught)
-MSVC_BEGIN( add sp, sp, #68 ; sync with PRESERVEDR4_OFFSET)
-MSVC_BEGIN( ldmia sp!, {r4-r6, r8-r11, lr})
-MSVC_BEGIN( add sp, sp, #12)
-MSVC_BEGIN( bx lr)
-MSVC_BEGIN(ctiVMThrowTrampoline ENDP)
-MSVC_BEGIN()
-
-MSVC( EXPORT cti_#op#)
-MSVC( IMPORT JITStubThunked_#op#)
-MSVC(cti_#op# PROC)
-MSVC( str lr, [sp, #64] ; sync with THUNK_RETURN_ADDRESS_OFFSET)
-MSVC( bl JITStubThunked_#op#)
-MSVC( ldr lr, [sp, #64] ; sync with THUNK_RETURN_ADDRESS_OFFSET)
-MSVC( bx lr)
-MSVC(cti_#op# ENDP)
-MSVC()
-
-MSVC_END( END)
-*/
-
-#elif CPU(SH4)
-#define DEFINE_STUB_FUNCTION(rtype, op) \
- extern "C" { \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION); \
- }; \
- asm volatile( \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(cti_##op) "\n" \
- SYMBOL_STRING(cti_##op) ":" "\n" \
- "sts pr, r11" "\n" \
- "mov.l r11, @(" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) ", r15)" "\n" \
- "mov.l .L2" SYMBOL_STRING(JITStubThunked_##op) ",r0" "\n" \
- "mov.l @(r0,r12),r11" "\n" \
- "jsr @r11" "\n" \
- "nop" "\n" \
- "mov.l @(" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) ", r15), r11 " "\n" \
- "lds r11, pr " "\n" \
- "rts" "\n" \
- "nop" "\n" \
- ".align 2" "\n" \
- ".L2" SYMBOL_STRING(JITStubThunked_##op) ":.long " SYMBOL_STRING(JITStubThunked_##op)"@GOT \n" \
- ); \
- rtype JITStubThunked_##op(STUB_ARGS_DECLARATION)
-#else
-#define DEFINE_STUB_FUNCTION(rtype, op) rtype JIT_STUB cti_##op(STUB_ARGS_DECLARATION)
-#endif
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_this)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- size_t inlineCapacity = stackFrame.args[0].int32();
-
- JSFunction* constructor = jsCast<JSFunction*>(callFrame->callee());
-#if !ASSERT_DISABLED
- ConstructData constructData;
- ASSERT(constructor->methodTable()->getConstructData(constructor, constructData) == ConstructTypeJS);
-#endif
-
- Structure* structure = constructor->allocationProfile(callFrame, inlineCapacity)->structure();
- JSValue result = constructEmptyObject(callFrame, structure);
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_convert_this)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v1 = stackFrame.args[0].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- ASSERT(v1.isPrimitive());
-
- JSObject* result = v1.toThisObject(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_add)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v1 = stackFrame.args[0].jsValue();
- JSValue v2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- if (v1.isString() && !v2.isObject()) {
- JSValue result = jsString(callFrame, asString(v1), v2.toString(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
- }
-
- if (v1.isNumber() && v2.isNumber())
- return JSValue::encode(jsNumber(v1.asNumber() + v2.asNumber()));
-
- // All other cases are pretty uncommon
- JSValue result = jsAddSlowCase(callFrame, v1, v2);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_inc)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(v.toNumber(callFrame) + 1);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(void, handle_watchdog_timer)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- VM* vm = stackFrame.vm;
- if (UNLIKELY(vm->watchdog.didFire(callFrame))) {
- vm->exception = createTerminatedExecutionException(vm);
- VM_THROW_EXCEPTION_AT_END();
- return;
- }
-}
-
-DEFINE_STUB_FUNCTION(void*, stack_check)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
-
- if (UNLIKELY(!stackFrame.stack->grow(&callFrame->registers()[callFrame->codeBlock()->m_numCalleeRegisters]))) {
- ErrorWithExecFunctor functor = ErrorWithExecFunctor(createStackOverflowError);
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
- }
-
- return callFrame;
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_object)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return constructEmptyObject(stackFrame.callFrame, stackFrame.args[0].structure());
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().put(stackFrame.callFrame, stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- PutPropertySlot slot(stackFrame.callFrame->codeBlock()->isStrictMode());
- JSValue baseValue = stackFrame.args[0].jsValue();
- ASSERT(baseValue.isObject());
- asObject(baseValue)->putDirect(stackFrame.callFrame->vm(), stackFrame.args[1].identifier(), stackFrame.args[2].jsValue(), slot);
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
-
- if (accessType == static_cast<AccessType>(stubInfo->accessType)) {
- stubInfo->setSeen();
- tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, false);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_direct)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- JSValue baseValue = stackFrame.args[0].jsValue();
- ASSERT(baseValue.isObject());
-
- asObject(baseValue)->putDirect(callFrame->vm(), ident, stackFrame.args[2].jsValue(), slot);
-
- if (accessType == static_cast<AccessType>(stubInfo->accessType)) {
- stubInfo->setSeen();
- tryCachePutByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, stackFrame.args[0].jsValue(), slot, stubInfo, true);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- stackFrame.args[0].jsValue().put(callFrame, ident, stackFrame.args[2].jsValue(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_id_direct_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- JSValue baseValue = stackFrame.args[0].jsValue();
- ASSERT(baseValue.isObject());
- asObject(baseValue)->putDirect(callFrame->vm(), ident, stackFrame.args[2].jsValue(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- int32_t oldSize = stackFrame.args[3].int32();
- Structure* newStructure = stackFrame.args[4].structure();
- int32_t newSize = newStructure->outOfLineCapacity();
-
- ASSERT(oldSize >= 0);
- ASSERT(newSize > oldSize);
-
- ASSERT(baseValue.isObject());
- JSObject* base = asObject(baseValue);
- VM& vm = *stackFrame.vm;
- Butterfly* butterfly = base->growOutOfLineStorage(vm, oldSize, newSize);
- base->setButterfly(vm, butterfly, newStructure);
-
- return base;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- CodeBlock* codeBlock = stackFrame.callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- if (accessType != static_cast<AccessType>(stubInfo->accessType))
- return JSValue::encode(result);
-
- if (!stubInfo->seenOnce())
- stubInfo->setSeen();
- else
- tryCacheGetByID(callFrame, codeBlock, STUB_RETURN_ADDRESS, baseValue, ident, slot, stubInfo);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- Identifier& ident = stackFrame.args[1].identifier();
-
- CodeBlock* codeBlock = callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, ident, slot);
-
- if (accessType != static_cast<AccessType>(stubInfo->accessType))
- return JSValue::encode(result);
-
- CHECK_FOR_EXCEPTION();
-
- if (baseValue.isCell()
- && slot.isCacheable()
- && !baseValue.asCell()->structure()->isUncacheableDictionary()
- && slot.slotBase() == baseValue) {
-
- ASSERT(slot.slotBase().isObject());
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex = 1;
-
- if (stubInfo->accessType == access_unset)
- stubInfo->initGetByIdSelf(callFrame->vm(), codeBlock->ownerExecutable(), baseValue.asCell()->structure());
-
- if (stubInfo->accessType == access_get_by_id_self) {
- ASSERT(!stubInfo->stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->vm(), codeBlock->ownerExecutable(), 0, stubInfo->u.getByIdSelf.baseObjectStructure.get(), true);
- stubInfo->initGetByIdSelfList(polymorphicStructureList, 1);
- } else {
- polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList;
- listIndex = stubInfo->u.getByIdSelfList.listSize;
- }
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- stubInfo->u.getByIdSelfList.listSize++;
- JIT::compileGetByIdSelfList(callFrame->scope()->vm(), codeBlock, stubInfo, polymorphicStructureList, listIndex, baseValue.asCell()->structure(), ident, slot, slot.cachedOffset());
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
- }
- } else
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_generic));
- return JSValue::encode(result);
-}
-
-static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(VM& vm, ScriptExecutable* owner, StructureStubInfo* stubInfo, int& listIndex)
-{
- PolymorphicAccessStructureList* prototypeStructureList = 0;
- listIndex = 1;
-
- switch (stubInfo->accessType) {
- case access_get_by_id_proto:
- prototypeStructureList = new PolymorphicAccessStructureList(vm, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get(), true);
- stubInfo->stubRoutine.clear();
- stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
- break;
- case access_get_by_id_chain:
- prototypeStructureList = new PolymorphicAccessStructureList(vm, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), true);
- stubInfo->stubRoutine.clear();
- stubInfo->initGetByIdProtoList(prototypeStructureList, 2);
- break;
- case access_get_by_id_proto_list:
- prototypeStructureList = stubInfo->u.getByIdProtoList.structureList;
- listIndex = stubInfo->u.getByIdProtoList.listSize;
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE)
- stubInfo->u.getByIdProtoList.listSize++;
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- ASSERT(listIndex <= POLYMORPHIC_LIST_CACHE_SIZE);
- return prototypeStructureList;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_getter_stub)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- GetterSetter* getterSetter = asGetterSetter(stackFrame.args[0].jsObject());
- if (!getterSetter->getter())
- return JSValue::encode(jsUndefined());
- JSObject* getter = asObject(getterSetter->getter());
- CallData callData;
- CallType callType = getter->methodTable()->getCallData(getter, callData);
- JSValue result = call(callFrame, getter, callType, callData, stackFrame.args[1].jsObject(), ArgList());
- if (callFrame->hadException())
- returnToThrowTrampoline(&callFrame->vm(), stackFrame.args[2].returnAddress(), STUB_RETURN_ADDRESS);
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_custom_stub)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
- JSObject* slotBase = stackFrame.args[0].jsObject();
- PropertySlot::GetValueFunc getter = reinterpret_cast<PropertySlot::GetValueFunc>(stackFrame.args[1].asPointer);
- const Identifier& ident = stackFrame.args[2].identifier();
- JSValue result = getter(callFrame, slotBase, ident);
- if (callFrame->hadException())
- returnToThrowTrampoline(&callFrame->vm(), stackFrame.args[3].returnAddress(), STUB_RETURN_ADDRESS);
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- const Identifier& propertyName = stackFrame.args[1].identifier();
-
- CodeBlock* codeBlock = callFrame->codeBlock();
- StructureStubInfo* stubInfo = &codeBlock->getStubInfo(STUB_RETURN_ADDRESS);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(callFrame, propertyName, slot);
-
- CHECK_FOR_EXCEPTION();
-
- if (accessType != static_cast<AccessType>(stubInfo->accessType)
- || !baseValue.isCell()
- || !slot.isCacheable()
- || baseValue.asCell()->structure()->isDictionary()
- || baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()) {
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- return JSValue::encode(result);
- }
-
- Structure* structure = baseValue.asCell()->structure();
-
- ASSERT(slot.slotBase().isObject());
- JSObject* slotBaseObject = asObject(slot.slotBase());
-
- PropertyOffset offset = slot.cachedOffset();
-
- if (slot.slotBase() == baseValue)
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- else if (slot.slotBase() == baseValue.asCell()->structure()->prototypeForLookup(callFrame)) {
- ASSERT(!baseValue.asCell()->structure()->isDictionary());
-
- if (baseValue.asCell()->structure()->typeInfo().hasImpureGetOwnPropertySlot()) {
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- return JSValue::encode(result);
- }
-
- // Since we're accessing a prototype in a loop, it's a good bet that it
- // should not be treated as a dictionary.
- if (slotBaseObject->structure()->isDictionary()) {
- slotBaseObject->flattenDictionaryObject(callFrame->vm());
- offset = slotBaseObject->structure()->get(callFrame->vm(), propertyName);
- }
-
- int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->vm(), codeBlock->ownerExecutable(), stubInfo, listIndex);
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- JIT::compileGetByIdProtoList(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, slotBaseObject->structure(), propertyName, slot, offset);
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
- }
- } else {
- size_t count = normalizePrototypeChainForChainAccess(callFrame, baseValue, slot.slotBase(), propertyName, offset);
- if (count == InvalidPrototypeChain) {
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail));
- return JSValue::encode(result);
- }
-
- ASSERT(!baseValue.asCell()->structure()->isDictionary());
- int listIndex;
- PolymorphicAccessStructureList* prototypeStructureList = getPolymorphicAccessStructureListSlot(callFrame->vm(), codeBlock->ownerExecutable(), stubInfo, listIndex);
-
- if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) {
- StructureChain* protoChain = structure->prototypeChain(callFrame);
- JIT::compileGetByIdChainList(callFrame->scope()->vm(), callFrame, codeBlock, stubInfo, prototypeStructureList, listIndex, structure, protoChain, count, propertyName, slot, offset);
-
- if (listIndex == (POLYMORPHIC_LIST_CACHE_SIZE - 1))
- ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_list_full));
- }
- }
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list_full)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_array_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_string_fail)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(stackFrame.callFrame, stackFrame.args[1].identifier(), slot);
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_check_has_instance)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue value = stackFrame.args[0].jsValue();
- JSValue baseVal = stackFrame.args[1].jsValue();
-
- if (baseVal.isObject()) {
- JSObject* baseObject = asObject(baseVal);
- ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance());
- if (baseObject->structure()->typeInfo().implementsHasInstance()) {
- bool result = baseObject->methodTable()->customHasInstance(baseObject, callFrame, value);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
- }
- }
-
- stackFrame.vm->exception = createInvalidParameterError(callFrame, "instanceof", baseVal);
- VM_THROW_EXCEPTION_AT_END();
- return JSValue::encode(JSValue());
-}
-
-#if ENABLE(DFG_JIT)
-DEFINE_STUB_FUNCTION(void, optimize)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- unsigned bytecodeIndex = stackFrame.args[0].int32();
-
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog(
- *codeBlock, ": Entered optimize with bytecodeIndex = ", bytecodeIndex,
- ", executeCounter = ", codeBlock->jitExecuteCounter(),
- ", optimizationDelayCounter = ", codeBlock->reoptimizationRetryCounter(),
- ", exitCounter = ");
- if (codeBlock->hasOptimizedReplacement())
- dataLog(codeBlock->replacement()->osrExitCounter());
- else
- dataLog("N/A");
- dataLog("\n");
-#endif
-
- if (!codeBlock->checkIfOptimizationThresholdReached()) {
- codeBlock->updateAllPredictions();
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Choosing not to optimize ", *codeBlock, " yet.\n");
-#endif
- return;
- }
-
- if (codeBlock->hasOptimizedReplacement()) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Considering OSR ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
-#endif
- // If we have an optimized replacement, then it must be the case that we entered
- // cti_optimize from a loop. That's because is there's an optimized replacement,
- // then all calls to this function will be relinked to the replacement and so
- // the prologue OSR will never fire.
-
- // This is an interesting threshold check. Consider that a function OSR exits
- // in the middle of a loop, while having a relatively low exit count. The exit
- // will reset the execution counter to some target threshold, meaning that this
- // code won't be reached until that loop heats up for >=1000 executions. But then
- // we do a second check here, to see if we should either reoptimize, or just
- // attempt OSR entry. Hence it might even be correct for
- // shouldReoptimizeFromLoopNow() to always return true. But we make it do some
- // additional checking anyway, to reduce the amount of recompilation thrashing.
- if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Triggering reoptimization of ", *codeBlock, "(", *codeBlock->replacement(), ") (in loop).\n");
-#endif
- codeBlock->reoptimize();
- return;
- }
- } else {
- if (!codeBlock->shouldOptimizeNow()) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Delaying optimization for ", *codeBlock, " (in loop) because of insufficient profiling.\n");
-#endif
- return;
- }
-
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Triggering optimized compilation of ", *codeBlock, "\n");
-#endif
-
- JSScope* scope = callFrame->scope();
- JSObject* error = codeBlock->compileOptimized(callFrame, scope, bytecodeIndex);
-#if ENABLE(JIT_VERBOSE_OSR)
- if (error)
- dataLog("WARNING: optimized compilation failed.\n");
-#else
- UNUSED_PARAM(error);
-#endif
-
- if (codeBlock->replacement() == codeBlock) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Optimizing ", *codeBlock, " failed.\n");
-#endif
-
- ASSERT(codeBlock->getJITType() == JITCode::BaselineJIT);
- codeBlock->dontOptimizeAnytimeSoon();
- return;
- }
- }
-
- CodeBlock* optimizedCodeBlock = codeBlock->replacement();
- ASSERT(optimizedCodeBlock->getJITType() == JITCode::DFGJIT);
-
- if (void* address = DFG::prepareOSREntry(callFrame, optimizedCodeBlock, bytecodeIndex)) {
- if (Options::showDFGDisassembly()) {
- dataLog(
- "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ", address ",
- RawPointer((STUB_RETURN_ADDRESS).value()), " -> ", RawPointer(address), ".\n");
- }
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Optimizing ", *codeBlock, " succeeded, performing OSR after a delay of ", codeBlock->optimizationDelayCounter(), ".\n");
-#endif
-
- codeBlock->optimizeSoon();
- STUB_SET_RETURN_ADDRESS(address);
- return;
- }
-
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Optimizing ", *codeBlock, " succeeded, OSR failed, after a delay of ", codeBlock->optimizationDelayCounter(), ".\n");
-#endif
-
- // Count the OSR failure as a speculation failure. If this happens a lot, then
- // reoptimize.
- optimizedCodeBlock->countOSRExit();
-
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Encountered OSR failure ", *codeBlock, " -> ", *codeBlock->replacement(), ".\n");
-#endif
-
- // We are a lot more conservative about triggering reoptimization after OSR failure than
- // before it. If we enter the optimize_from_loop trigger with a bucket full of fail
- // already, then we really would like to reoptimize immediately. But this case covers
- // something else: there weren't many (or any) speculation failures before, but we just
- // failed to enter the speculative code because some variable had the wrong value or
- // because the OSR code decided for any spurious reason that it did not want to OSR
- // right now. So, we only trigger reoptimization only upon the more conservative (non-loop)
- // reoptimization trigger.
- if (optimizedCodeBlock->shouldReoptimizeNow()) {
-#if ENABLE(JIT_VERBOSE_OSR)
- dataLog("Triggering reoptimization of ", *codeBlock, " -> ", *codeBlock->replacement(), " (after OSR fail).\n");
-#endif
- codeBlock->reoptimize();
- return;
- }
-
- // OSR failed this time, but it might succeed next time! Let the code run a bit
- // longer and then try again.
- codeBlock->optimizeAfterWarmUp();
-}
-#endif // ENABLE(DFG_JIT)
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_instanceof)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue value = stackFrame.args[0].jsValue();
- JSValue proto = stackFrame.args[1].jsValue();
-
- ASSERT(!value.isObject() || !proto.isObject());
-
- bool result = JSObject::defaultHasInstance(callFrame, value, proto);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_id)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSObject* baseObj = stackFrame.args[0].jsValue().toObject(callFrame);
-
- bool couldDelete = baseObj->methodTable()->deleteProperty(baseObj, callFrame, stackFrame.args[1].identifier());
- JSValue result = jsBoolean(couldDelete);
- if (!couldDelete && callFrame->codeBlock()->isStrictMode())
- stackFrame.vm->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_mul)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- if (src1.isNumber() && src2.isNumber())
- return JSValue::encode(jsNumber(src1.asNumber() * src2.asNumber()));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toNumber(callFrame) * src2.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_func)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(stackFrame.callFrame->codeBlock()->codeType() != FunctionCode || !stackFrame.callFrame->codeBlock()->needsFullScopeChain() || stackFrame.callFrame->uncheckedR(stackFrame.callFrame->codeBlock()->activationRegister()).jsValue());
- return JSFunction::create(stackFrame.callFrame, stackFrame.args[0].function(), stackFrame.callFrame->scope());
-}
-
-inline void* jitCompileFor(CallFrame* callFrame, CodeSpecializationKind kind)
-{
- // This function is called by cti_op_call_jitCompile() and
- // cti_op_construct_jitCompile() JIT glue trampolines to compile the
- // callee function that we want to call. Both cti glue trampolines are
- // called by JIT'ed code which has pushed a frame and initialized most of
- // the frame content except for the codeBlock.
- //
- // Normally, the prologue of the callee is supposed to set the frame's cb
- // pointer to the cb of the callee. But in this case, the callee code does
- // not exist yet until it is compiled below. The compilation process will
- // allocate memory which may trigger a GC. The GC, in turn, will scan the
- // JSStack, and will expect the frame's cb to either be valid or 0. If
- // we don't initialize it, the GC will be accessing invalid memory and may
- // crash.
- //
- // Hence, we should nullify it here before proceeding with the compilation.
- callFrame->setCodeBlock(0);
-
- JSFunction* function = jsCast<JSFunction*>(callFrame->callee());
- ASSERT(!function->isHostFunction());
- FunctionExecutable* executable = function->jsExecutable();
- JSScope* callDataScopeChain = function->scope();
- JSObject* error = executable->compileFor(callFrame, callDataScopeChain, kind);
- if (!error)
- return function;
- callFrame->vm().exception = error;
- return 0;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_call_jitCompile)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
-#if !ASSERT_DISABLED
- CallData callData;
- ASSERT(stackFrame.callFrame->callee()->methodTable()->getCallData(stackFrame.callFrame->callee(), callData) == CallTypeJS);
-#endif
-
- CallFrame* callFrame = stackFrame.callFrame;
- void* result = jitCompileFor(callFrame, CodeForCall);
- if (!result)
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
-
- return result;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_construct_jitCompile)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
-#if !ASSERT_DISABLED
- ConstructData constructData;
- ASSERT(jsCast<JSFunction*>(stackFrame.callFrame->callee())->methodTable()->getConstructData(stackFrame.callFrame->callee(), constructData) == ConstructTypeJS);
-#endif
-
- CallFrame* callFrame = stackFrame.callFrame;
- void* result = jitCompileFor(callFrame, CodeForConstruct);
- if (!result)
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
-
- return result;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_call_arityCheck)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.stack, CodeForCall);
- if (!newCallFrame) {
- ErrorWithExecFunctor functor = ErrorWithExecFunctor(createStackOverflowError);
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
- }
- return newCallFrame;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_construct_arityCheck)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- CallFrame* newCallFrame = CommonSlowPaths::arityCheckFor(callFrame, stackFrame.stack, CodeForConstruct);
- if (!newCallFrame) {
- ErrorWithExecFunctor functor = ErrorWithExecFunctor(createStackOverflowError);
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
- }
- return newCallFrame;
-}
-
-inline void* lazyLinkFor(CallFrame* callFrame, CodeSpecializationKind kind)
-{
- JSFunction* callee = jsCast<JSFunction*>(callFrame->callee());
- ExecutableBase* executable = callee->executable();
-
- MacroAssemblerCodePtr codePtr;
- CodeBlock* codeBlock = 0;
- CallLinkInfo* callLinkInfo = &callFrame->callerFrame()->codeBlock()->getCallLinkInfo(callFrame->returnPC());
-
- // This function is called by cti_vm_lazyLinkCall() and
- // cti_lazyLinkConstruct JIT glue trampolines to link the callee function
- // that we want to call. Both cti glue trampolines are called by JIT'ed
- // code which has pushed a frame and initialized most of the frame content
- // except for the codeBlock.
- //
- // Normally, the prologue of the callee is supposed to set the frame's cb
- // field to the cb of the callee. But in this case, the callee may not
- // exist yet, and if not, it will be generated in the compilation below.
- // The compilation will allocate memory which may trigger a GC. The GC, in
- // turn, will scan the JSStack, and will expect the frame's cb to be valid
- // or 0. If we don't initialize it, the GC will be accessing invalid
- // memory and may crash.
- //
- // Hence, we should nullify it here before proceeding with the compilation.
- callFrame->setCodeBlock(0);
-
- if (executable->isHostFunction())
- codePtr = executable->generatedJITCodeFor(kind).addressForCall();
- else {
- FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- if (JSObject* error = functionExecutable->compileFor(callFrame, callee->scope(), kind)) {
- callFrame->vm().exception = error;
- return 0;
- }
- codeBlock = &functionExecutable->generatedBytecodeFor(kind);
- if (callFrame->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters())
- || callLinkInfo->callType == CallLinkInfo::CallVarargs)
- codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
- else
- codePtr = functionExecutable->generatedJITCodeFor(kind).addressForCall();
- }
-
- if (!callLinkInfo->seenOnce())
- callLinkInfo->setSeen();
- else
- JIT::linkFor(callee, callFrame->callerFrame()->codeBlock(), codeBlock, codePtr, callLinkInfo, &callFrame->vm(), kind);
-
- return codePtr.executableAddress();
-}
-
-DEFINE_STUB_FUNCTION(void*, vm_lazyLinkCall)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- void* result = lazyLinkFor(callFrame, CodeForCall);
- if (!result)
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
-
- return result;
-}
-
-DEFINE_STUB_FUNCTION(void*, vm_lazyLinkClosureCall)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- CodeBlock* callerCodeBlock = callFrame->callerFrame()->codeBlock();
- VM* vm = callerCodeBlock->vm();
- CallLinkInfo* callLinkInfo = &callerCodeBlock->getCallLinkInfo(callFrame->returnPC());
- JSFunction* callee = jsCast<JSFunction*>(callFrame->callee());
- ExecutableBase* executable = callee->executable();
- Structure* structure = callee->structure();
-
- ASSERT(callLinkInfo->callType == CallLinkInfo::Call);
- ASSERT(callLinkInfo->isLinked());
- ASSERT(callLinkInfo->callee);
- ASSERT(callee != callLinkInfo->callee.get());
-
- bool shouldLink = false;
- CodeBlock* calleeCodeBlock = 0;
- MacroAssemblerCodePtr codePtr;
-
- if (executable == callLinkInfo->callee.get()->executable()
- && structure == callLinkInfo->callee.get()->structure()) {
-
- shouldLink = true;
-
- ASSERT(executable->hasJITCodeForCall());
- codePtr = executable->generatedJITCodeForCall().addressForCall();
- if (!callee->executable()->isHostFunction()) {
- calleeCodeBlock = &jsCast<FunctionExecutable*>(executable)->generatedBytecodeForCall();
- if (callFrame->argumentCountIncludingThis() < static_cast<size_t>(calleeCodeBlock->numParameters())) {
- shouldLink = false;
- codePtr = executable->generatedJITCodeWithArityCheckFor(CodeForCall);
- }
- }
- } else if (callee->isHostFunction())
- codePtr = executable->generatedJITCodeForCall().addressForCall();
- else {
- // Need to clear the code block before compilation, because compilation can GC.
- callFrame->setCodeBlock(0);
-
- FunctionExecutable* functionExecutable = jsCast<FunctionExecutable*>(executable);
- JSScope* scopeChain = callee->scope();
- JSObject* error = functionExecutable->compileFor(callFrame, scopeChain, CodeForCall);
- if (error) {
- callFrame->vm().exception = error;
- return 0;
- }
-
- codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(CodeForCall);
- }
-
- if (shouldLink) {
- ASSERT(codePtr);
- JIT::compileClosureCall(vm, callLinkInfo, callerCodeBlock, calleeCodeBlock, structure, executable, codePtr);
- callLinkInfo->hasSeenClosure = true;
- } else
- JIT::linkSlowCall(callerCodeBlock, callLinkInfo);
-
- return codePtr.executableAddress();
-}
-
-DEFINE_STUB_FUNCTION(void*, vm_lazyLinkConstruct)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- void* result = lazyLinkFor(callFrame, CodeForConstruct);
- if (!result)
- return throwExceptionFromOpCall<void*>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
-
- return result;
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_push_activation)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSActivation* activation = JSActivation::create(stackFrame.callFrame->vm(), stackFrame.callFrame, stackFrame.callFrame->codeBlock());
- stackFrame.callFrame->setScope(activation);
- return activation;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_NotJSFunction)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue callee = callFrame->calleeAsValue();
-
- CallData callData;
- CallType callType = getCallData(callee, callData);
-
- ASSERT(callType != CallTypeJS);
- if (callType != CallTypeHost) {
- ASSERT(callType == CallTypeNone);
- ErrorWithExecAndCalleeFunctor functor = ErrorWithExecAndCalleeFunctor(createNotAConstructorError, callee);
- return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
- }
-
- EncodedJSValue returnValue;
- {
- SamplingTool::CallRecord callRecord(CTI_SAMPLER, true);
- returnValue = callData.native.function(callFrame);
- }
-
- if (stackFrame.vm->exception)
- return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
-
- return returnValue;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_create_arguments)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- Arguments* arguments = Arguments::create(*stackFrame.vm, stackFrame.callFrame);
- return JSValue::encode(JSValue(arguments));
-}
-
-DEFINE_STUB_FUNCTION(void, op_tear_off_activation)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- ASSERT(stackFrame.callFrame->codeBlock()->needsFullScopeChain());
- jsCast<JSActivation*>(stackFrame.args[0].jsValue())->tearOff(*stackFrame.vm);
-}
-
-DEFINE_STUB_FUNCTION(void, op_tear_off_arguments)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- ASSERT(callFrame->codeBlock()->usesArguments());
- Arguments* arguments = jsCast<Arguments*>(stackFrame.args[0].jsValue());
- if (JSValue activationValue = stackFrame.args[1].jsValue()) {
- arguments->didTearOffActivation(callFrame, jsCast<JSActivation*>(activationValue));
- return;
- }
- arguments->tearOff(callFrame);
-}
-
-DEFINE_STUB_FUNCTION(void, op_profile_will_call)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- if (LegacyProfiler* profiler = stackFrame.vm->enabledProfiler())
- profiler->willExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(void, op_profile_did_call)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- if (LegacyProfiler* profiler = stackFrame.vm->enabledProfiler())
- profiler->didExecute(stackFrame.callFrame, stackFrame.args[0].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_array)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return constructArray(stackFrame.callFrame, stackFrame.args[2].arrayAllocationProfile(), reinterpret_cast<JSValue*>(&stackFrame.callFrame->registers()[stackFrame.args[0].int32()]), stackFrame.args[1].int32());
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_array_with_size)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return constructArrayWithSizeQuirk(stackFrame.callFrame, stackFrame.args[1].arrayAllocationProfile(), stackFrame.callFrame->lexicalGlobalObject(), stackFrame.args[0].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_array_buffer)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return constructArray(stackFrame.callFrame, stackFrame.args[2].arrayAllocationProfile(), stackFrame.callFrame->codeBlock()->constantBuffer(stackFrame.args[0].int32()), stackFrame.args[1].int32());
-}
-
-DEFINE_STUB_FUNCTION(void, op_init_global_const_check)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
- symbolTablePut(codeBlock->globalObject(), callFrame, codeBlock->identifier(stackFrame.args[1].int32()), stackFrame.args[0].jsValue(), true);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue result = JSScope::resolve(callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].resolveOperations());
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_to_base)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue base = callFrame->r(stackFrame.args[0].int32()).jsValue();
- JSValue value = callFrame->r(stackFrame.args[2].int32()).jsValue();
- JSScope::resolvePut(callFrame, base, stackFrame.args[1].identifier(), value, stackFrame.args[3].putToBaseOperation());
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_construct_NotJSConstruct)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue callee = callFrame->calleeAsValue();
-
- ConstructData constructData;
- ConstructType constructType = getConstructData(callee, constructData);
-
- ASSERT(constructType != ConstructTypeJS);
- if (constructType != ConstructTypeHost) {
- ASSERT(constructType == ConstructTypeNone);
- ErrorWithExecAndCalleeFunctor functor = ErrorWithExecAndCalleeFunctor(createNotAConstructorError, callee);
- return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS, functor);
- }
-
- EncodedJSValue returnValue;
- {
- SamplingTool::CallRecord callRecord(CTI_SAMPLER, true);
- returnValue = constructData.native.function(callFrame);
- }
-
- if (stackFrame.vm->exception)
- return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
-
- return returnValue;
-}
-
-static JSValue getByVal(
- CallFrame* callFrame, JSValue baseValue, JSValue subscript, ReturnAddressPtr returnAddress)
-{
- if (LIKELY(baseValue.isCell() && subscript.isString())) {
- if (JSValue result = baseValue.asCell()->fastGetOwnProperty(callFrame, asString(subscript)->value(callFrame)))
- return result;
- }
-
- if (subscript.isUInt32()) {
- uint32_t i = subscript.asUInt32();
- if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) {
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), returnAddress, FunctionPtr(cti_op_get_by_val_string));
- return asString(baseValue)->getIndex(callFrame, i);
- }
- return baseValue.get(callFrame, i);
- }
-
- if (isName(subscript))
- return baseValue.get(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName());
-
- Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- return baseValue.get(callFrame, property);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
-
- if (baseValue.isObject() && subscript.isInt32()) {
- // See if it's worth optimizing this at all.
- JSObject* object = asObject(baseValue);
- bool didOptimize = false;
-
- unsigned bytecodeOffset = callFrame->bytecodeOffsetForNonDFGCode();
- ASSERT(bytecodeOffset);
- ByValInfo& byValInfo = callFrame->codeBlock()->getByValInfo(bytecodeOffset - 1);
- ASSERT(!byValInfo.stubRoutine);
-
- if (hasOptimizableIndexing(object->structure())) {
- // Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
- if (arrayMode != byValInfo.arrayMode) {
- JIT::compileGetByVal(&callFrame->vm(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
- didOptimize = true;
- }
- }
-
- if (!didOptimize) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. Or, if we failed to patch and we have some object
- // that intercepts indexed get, then don't even wait until 10 times. For cases
- // where we see non-index-intercepting objects, this gives 10 iterations worth of
- // opportunity for us to observe that the get_by_val may be polymorphic.
- if (++byValInfo.slowPathCount >= 10
- || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
- // Don't ever try to optimize.
- RepatchBuffer repatchBuffer(callFrame->codeBlock());
- repatchBuffer.relinkCallerToFunction(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val_generic));
- }
- }
- }
-
- JSValue result = getByVal(callFrame, baseValue, subscript, STUB_RETURN_ADDRESS);
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
-
- JSValue result = getByVal(callFrame, baseValue, subscript, STUB_RETURN_ADDRESS);
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_val_string)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
-
- JSValue result;
-
- if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i))
- result = asString(baseValue)->getIndex(callFrame, i);
- else {
- result = baseValue.get(callFrame, i);
- if (!isJSString(baseValue))
- ctiPatchCallByReturnAddress(callFrame->codeBlock(), STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_val));
- }
- } else if (isName(subscript))
- result = baseValue.get(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName());
- else {
- Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- result = baseValue.get(callFrame, property);
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_sub)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- if (src1.isNumber() && src2.isNumber())
- return JSValue::encode(jsNumber(src1.asNumber() - src2.asNumber()));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toNumber(callFrame) - src2.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value)
-{
- if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- if (baseValue.isObject()) {
- JSObject* object = asObject(baseValue);
- if (object->canSetIndexQuickly(i))
- object->setIndexQuickly(callFrame->vm(), i, value);
- else
- object->methodTable()->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
- } else
- baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode());
- } else if (isName(subscript)) {
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- baseValue.put(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
- } else {
- Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- if (!callFrame->vm().exception) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot(callFrame->codeBlock()->isStrictMode());
- baseValue.put(callFrame, property, value, slot);
- }
- }
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_val)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
- JSValue value = stackFrame.args[2].jsValue();
-
- if (baseValue.isObject() && subscript.isInt32()) {
- // See if it's worth optimizing at all.
- JSObject* object = asObject(baseValue);
- bool didOptimize = false;
-
- unsigned bytecodeOffset = callFrame->bytecodeOffsetForNonDFGCode();
- ASSERT(bytecodeOffset);
- ByValInfo& byValInfo = callFrame->codeBlock()->getByValInfo(bytecodeOffset - 1);
- ASSERT(!byValInfo.stubRoutine);
-
- if (hasOptimizableIndexing(object->structure())) {
- // Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
- if (arrayMode != byValInfo.arrayMode) {
- JIT::compilePutByVal(&callFrame->vm(), callFrame->codeBlock(), &byValInfo, STUB_RETURN_ADDRESS, arrayMode);
- didOptimize = true;
- }
- }
-
- if (!didOptimize) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. Or, if we failed to patch and we have some object
- // that intercepts indexed get, then don't even wait until 10 times. For cases
- // where we see non-index-intercepting objects, this gives 10 iterations worth of
- // opportunity for us to observe that the get_by_val may be polymorphic.
- if (++byValInfo.slowPathCount >= 10
- || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
- // Don't ever try to optimize.
- RepatchBuffer repatchBuffer(callFrame->codeBlock());
- repatchBuffer.relinkCallerToFunction(STUB_RETURN_ADDRESS, FunctionPtr(cti_op_put_by_val_generic));
- }
- }
- }
-
- putByVal(callFrame, baseValue, subscript, value);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_val_generic)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSValue subscript = stackFrame.args[1].jsValue();
- JSValue value = stackFrame.args[2].jsValue();
-
- putByVal(callFrame, baseValue, subscript, value);
-
- CHECK_FOR_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_less)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(jsLess<true>(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_lesseq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(jsLessEq<true>(callFrame, stackFrame.args[0].jsValue(), stackFrame.args[1].jsValue()));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_greater)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(jsLess<false>(callFrame, stackFrame.args[1].jsValue(), stackFrame.args[0].jsValue()));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_greatereq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsBoolean(jsLessEq<false>(callFrame, stackFrame.args[1].jsValue(), stackFrame.args[0].jsValue()));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(void*, op_load_varargs)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSStack* stack = stackFrame.stack;
- JSValue thisValue = stackFrame.args[0].jsValue();
- JSValue arguments = stackFrame.args[1].jsValue();
- int firstFreeRegister = stackFrame.args[2].int32();
-
- CallFrame* newCallFrame = loadVarargs(callFrame, stack, thisValue, arguments, firstFreeRegister);
- if (!newCallFrame)
- VM_THROW_EXCEPTION();
- return newCallFrame;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_negate)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
-
- if (src.isNumber())
- return JSValue::encode(jsNumber(-src.asNumber()));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(-src.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), false, stackFrame.args[1].resolveOperations(), stackFrame.args[2].putToBaseOperation()));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_base_strict_put)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- if (JSValue result = JSScope::resolveBase(stackFrame.callFrame, stackFrame.args[0].identifier(), true, stackFrame.args[1].resolveOperations(), stackFrame.args[2].putToBaseOperation()))
- return JSValue::encode(result);
- VM_THROW_EXCEPTION();
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_div)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- if (src1.isNumber() && src2.isNumber())
- return JSValue::encode(jsNumber(src1.asNumber() / src2.asNumber()));
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toNumber(callFrame) / src2.toNumber(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_dec)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue v = stackFrame.args[0].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(v.toNumber(callFrame) - 1);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(int, op_jless)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLess<true>(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(int, op_jlesseq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLessEq<true>(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(int, op_jgreater)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLess<false>(callFrame, src2, src1);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(int, op_jgreatereq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = jsLessEq<false>(callFrame, src2, src1);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_not)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
-
- JSValue result = jsBoolean(!src.toBoolean(stackFrame.callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(int, op_jtrue)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
-
- bool result = src1.toBoolean(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(int, op_eq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
-#if USE(JSVALUE32_64)
- start:
- if (src2.isUndefined()) {
- return src1.isNull() ||
- (src1.isCell() && src1.asCell()->structure()->masqueradesAsUndefined(stackFrame.callFrame->lexicalGlobalObject()))
- || src1.isUndefined();
- }
-
- if (src2.isNull()) {
- return src1.isUndefined() ||
- (src1.isCell() && src1.asCell()->structure()->masqueradesAsUndefined(stackFrame.callFrame->lexicalGlobalObject()))
- || src1.isNull();
- }
-
- if (src1.isInt32()) {
- if (src2.isDouble())
- return src1.asInt32() == src2.asDouble();
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return src1.asInt32() == d;
- }
-
- if (src1.isDouble()) {
- if (src2.isInt32())
- return src1.asDouble() == src2.asInt32();
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return src1.asDouble() == d;
- }
-
- if (src1.isTrue()) {
- if (src2.isFalse())
- return false;
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return d == 1.0;
- }
-
- if (src1.isFalse()) {
- if (src2.isTrue())
- return false;
- double d = src2.toNumber(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- return d == 0.0;
- }
-
- if (src1.isUndefined())
- return src2.isCell() && src2.asCell()->structure()->masqueradesAsUndefined(stackFrame.callFrame->lexicalGlobalObject());
-
- if (src1.isNull())
- return src2.isCell() && src2.asCell()->structure()->masqueradesAsUndefined(stackFrame.callFrame->lexicalGlobalObject());
-
- JSCell* cell1 = src1.asCell();
-
- if (cell1->isString()) {
- if (src2.isInt32())
- return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asInt32();
-
- if (src2.isDouble())
- return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == src2.asDouble();
-
- if (src2.isTrue())
- return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == 1.0;
-
- if (src2.isFalse())
- return jsToNumber(jsCast<JSString*>(cell1)->value(stackFrame.callFrame)) == 0.0;
-
- JSCell* cell2 = src2.asCell();
- if (cell2->isString())
- return jsCast<JSString*>(cell1)->value(stackFrame.callFrame) == jsCast<JSString*>(cell2)->value(stackFrame.callFrame);
-
- src2 = asObject(cell2)->toPrimitive(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- goto start;
- }
-
- if (src2.isObject())
- return asObject(cell1) == asObject(src2);
- src1 = asObject(cell1)->toPrimitive(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION();
- goto start;
-
-#else // USE(JSVALUE32_64)
- CallFrame* callFrame = stackFrame.callFrame;
-
- bool result = JSValue::equalSlowCaseInline(callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-#endif // USE(JSVALUE32_64)
-}
-
-DEFINE_STUB_FUNCTION(int, op_eq_strings)
-{
-#if USE(JSVALUE32_64)
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSString* string1 = stackFrame.args[0].jsString();
- JSString* string2 = stackFrame.args[1].jsString();
-
- ASSERT(string1->isString());
- ASSERT(string2->isString());
- return string1->value(stackFrame.callFrame) == string2->value(stackFrame.callFrame);
-#else
- UNUSED_PARAM(args);
- RELEASE_ASSERT_NOT_REACHED();
- return 0;
-#endif
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_lshift)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue val = stackFrame.args[0].jsValue();
- JSValue shift = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber((val.toInt32(callFrame)) << (shift.toUInt32(callFrame) & 0x1f));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitand)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- ASSERT(!src1.isInt32() || !src2.isInt32());
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber(src1.toInt32(callFrame) & src2.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_rshift)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue val = stackFrame.args[0].jsValue();
- JSValue shift = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber((val.toInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_base)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolveWithBase(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()], stackFrame.args[2].resolveOperations(), stackFrame.args[3].putToBaseOperation());
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_resolve_with_this)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = JSScope::resolveWithThis(callFrame, stackFrame.args[0].identifier(), &callFrame->registers()[stackFrame.args[1].int32()], stackFrame.args[2].resolveOperations());
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_func_exp)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- CallFrame* callFrame = stackFrame.callFrame;
-
- FunctionExecutable* function = stackFrame.args[0].function();
- JSFunction* func = JSFunction::create(callFrame, function, callFrame->scope());
- ASSERT(callFrame->codeBlock()->codeType() != FunctionCode || !callFrame->codeBlock()->needsFullScopeChain() || callFrame->uncheckedR(callFrame->codeBlock()->activationRegister()).jsValue());
-
- return func;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_mod)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue dividendValue = stackFrame.args[0].jsValue();
- JSValue divisorValue = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- double d = dividendValue.toNumber(callFrame);
- JSValue result = jsNumber(fmod(d, divisorValue.toNumber(callFrame)));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_urshift)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue val = stackFrame.args[0].jsValue();
- JSValue shift = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue result = jsNumber((val.toUInt32(callFrame)) >> (shift.toUInt32(callFrame) & 0x1f));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitxor)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue result = jsNumber(src1.toInt32(callFrame) ^ src2.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(JSObject*, op_new_regexp)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- RegExp* regExp = stackFrame.args[0].regExp();
- if (!regExp->isValid()) {
- stackFrame.vm->exception = createSyntaxError(callFrame, "Invalid flags supplied to RegExp constructor.");
- VM_THROW_EXCEPTION();
- }
-
- return RegExpObject::create(*stackFrame.vm, stackFrame.callFrame->lexicalGlobalObject(), stackFrame.callFrame->lexicalGlobalObject()->regExpStructure(), regExp);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_bitor)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue result = jsNumber(src1.toInt32(callFrame) | src2.toInt32(callFrame));
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_call_eval)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- CallFrame* callerFrame = callFrame->callerFrame();
- ASSERT(callFrame->callerFrame()->codeBlock()->codeType() != FunctionCode
- || !callFrame->callerFrame()->codeBlock()->needsFullScopeChain()
- || callFrame->callerFrame()->uncheckedR(callFrame->callerFrame()->codeBlock()->activationRegister()).jsValue());
-
- callFrame->setScope(callerFrame->scope());
- callFrame->setReturnPC(static_cast<Instruction*>((STUB_RETURN_ADDRESS).value()));
- callFrame->setCodeBlock(0);
-
- if (!isHostFunction(callFrame->calleeAsValue(), globalFuncEval))
- return JSValue::encode(JSValue());
-
- JSValue result = eval(callFrame);
- if (stackFrame.vm->exception)
- return throwExceptionFromOpCall<EncodedJSValue>(stackFrame, callFrame, STUB_RETURN_ADDRESS);
-
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(void*, op_throw)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- ExceptionHandler handler = jitThrow(stackFrame.vm, stackFrame.callFrame, stackFrame.args[0].jsValue(), STUB_RETURN_ADDRESS);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
-}
-
-DEFINE_STUB_FUNCTION(JSPropertyNameIterator*, op_get_pnames)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSObject* o = stackFrame.args[0].jsObject();
- Structure* structure = o->structure();
- JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
- if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(callFrame))
- jsPropertyNameIterator = JSPropertyNameIterator::create(callFrame, o);
- return jsPropertyNameIterator;
-}
-
-DEFINE_STUB_FUNCTION(int, has_property)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSObject* base = stackFrame.args[0].jsObject();
- JSString* property = stackFrame.args[1].jsString();
- int result = base->hasProperty(stackFrame.callFrame, Identifier(stackFrame.callFrame, property->value(stackFrame.callFrame)));
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(void, op_push_with_scope)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSObject* o = stackFrame.args[0].jsValue().toObject(stackFrame.callFrame);
- CHECK_FOR_EXCEPTION_VOID();
- stackFrame.callFrame->setScope(JSWithScope::create(stackFrame.callFrame, o));
-}
-
-DEFINE_STUB_FUNCTION(void, op_pop_scope)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- stackFrame.callFrame->setScope(stackFrame.callFrame->scope()->next());
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_typeof)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsTypeStringForValue(stackFrame.callFrame, stackFrame.args[0].jsValue()));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_object)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsBoolean(jsIsObjectType(stackFrame.callFrame, stackFrame.args[0].jsValue())));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_is_function)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(jsBoolean(jsIsFunctionType(stackFrame.args[0].jsValue())));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_stricteq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- bool result = JSValue::strictEqual(stackFrame.callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_primitive)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- return JSValue::encode(stackFrame.args[0].jsValue().toPrimitive(stackFrame.callFrame));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_strcat)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue result = jsString(stackFrame.callFrame, &stackFrame.callFrame->registers()[stackFrame.args[0].int32()], stackFrame.args[1].int32());
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(result);
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_nstricteq)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src1 = stackFrame.args[0].jsValue();
- JSValue src2 = stackFrame.args[1].jsValue();
-
- bool result = !JSValue::strictEqual(stackFrame.callFrame, src1, src2);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_to_number)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue src = stackFrame.args[0].jsValue();
- CallFrame* callFrame = stackFrame.callFrame;
-
- double number = src.toNumber(callFrame);
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsNumber(number));
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_in)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- JSValue baseVal = stackFrame.args[1].jsValue();
-
- if (!baseVal.isObject()) {
- stackFrame.vm->exception = createInvalidParameterError(stackFrame.callFrame, "in", baseVal);
- VM_THROW_EXCEPTION();
- }
-
- JSValue propName = stackFrame.args[0].jsValue();
- JSObject* baseObj = asObject(baseVal);
-
- uint32_t i;
- if (propName.getUInt32(i))
- return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, i)));
-
- if (isName(propName))
- return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, jsCast<NameInstance*>(propName.asCell())->privateName())));
-
- Identifier property(callFrame, propName.toString(callFrame)->value(callFrame));
- CHECK_FOR_EXCEPTION();
- return JSValue::encode(jsBoolean(baseObj->hasProperty(callFrame, property)));
-}
-
-DEFINE_STUB_FUNCTION(void, op_push_name_scope)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSNameScope* scope = JSNameScope::create(stackFrame.callFrame, stackFrame.args[0].identifier(), stackFrame.args[1].jsValue(), stackFrame.args[2].int32());
-
- CallFrame* callFrame = stackFrame.callFrame;
- callFrame->setScope(scope);
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_by_index)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- unsigned property = stackFrame.args[1].int32();
-
- JSValue arrayValue = stackFrame.args[0].jsValue();
- ASSERT(isJSArray(arrayValue));
- asArray(arrayValue)->putDirectIndex(callFrame, property, stackFrame.args[2].jsValue());
-}
-
-DEFINE_STUB_FUNCTION(void*, op_switch_imm)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue scrutinee = stackFrame.args[0].jsValue();
- unsigned tableIndex = stackFrame.args[1].int32();
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- if (scrutinee.isInt32())
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(scrutinee.asInt32()).executableAddress();
- if (scrutinee.isDouble() && scrutinee.asDouble() == static_cast<int32_t>(scrutinee.asDouble()))
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiForValue(static_cast<int32_t>(scrutinee.asDouble())).executableAddress();
- return codeBlock->immediateSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
-}
-
-DEFINE_STUB_FUNCTION(void*, op_switch_char)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue scrutinee = stackFrame.args[0].jsValue();
- unsigned tableIndex = stackFrame.args[1].int32();
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- void* result = codeBlock->characterSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
-
- if (scrutinee.isString()) {
- StringImpl* value = asString(scrutinee)->value(callFrame).impl();
- if (value->length() == 1)
- result = codeBlock->characterSwitchJumpTable(tableIndex).ctiForValue((*value)[0]).executableAddress();
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(void*, op_switch_string)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- JSValue scrutinee = stackFrame.args[0].jsValue();
- unsigned tableIndex = stackFrame.args[1].int32();
- CallFrame* callFrame = stackFrame.callFrame;
- CodeBlock* codeBlock = callFrame->codeBlock();
-
- void* result = codeBlock->stringSwitchJumpTable(tableIndex).ctiDefault.executableAddress();
-
- if (scrutinee.isString()) {
- StringImpl* value = asString(scrutinee)->value(callFrame).impl();
- result = codeBlock->stringSwitchJumpTable(tableIndex).ctiForValue(value).executableAddress();
- }
-
- CHECK_FOR_EXCEPTION_AT_END();
- return result;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, op_del_by_val)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- JSValue baseValue = stackFrame.args[0].jsValue();
- JSObject* baseObj = baseValue.toObject(callFrame); // may throw
-
- JSValue subscript = stackFrame.args[1].jsValue();
- bool result;
- uint32_t i;
- if (subscript.getUInt32(i))
- result = baseObj->methodTable()->deletePropertyByIndex(baseObj, callFrame, i);
- else if (isName(subscript))
- result = baseObj->methodTable()->deleteProperty(baseObj, callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName());
- else {
- CHECK_FOR_EXCEPTION();
- Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- CHECK_FOR_EXCEPTION();
- result = baseObj->methodTable()->deleteProperty(baseObj, callFrame, property);
- }
-
- if (!result && callFrame->codeBlock()->isStrictMode())
- stackFrame.vm->exception = createTypeError(stackFrame.callFrame, "Unable to delete property.");
-
- CHECK_FOR_EXCEPTION_AT_END();
- return JSValue::encode(jsBoolean(result));
-}
-
-DEFINE_STUB_FUNCTION(void, op_put_getter_setter)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- ASSERT(stackFrame.args[0].jsValue().isObject());
- JSObject* baseObj = asObject(stackFrame.args[0].jsValue());
-
- GetterSetter* accessor = GetterSetter::create(callFrame);
-
- JSValue getter = stackFrame.args[2].jsValue();
- JSValue setter = stackFrame.args[3].jsValue();
- ASSERT(getter.isObject() || getter.isUndefined());
- ASSERT(setter.isObject() || setter.isUndefined());
- ASSERT(getter.isObject() || setter.isObject());
-
- if (!getter.isUndefined())
- accessor->setGetter(callFrame->vm(), asObject(getter));
- if (!setter.isUndefined())
- accessor->setSetter(callFrame->vm(), asObject(setter));
- baseObj->putDirectAccessor(callFrame, stackFrame.args[1].identifier(), accessor, Accessor);
-}
-
-DEFINE_STUB_FUNCTION(void, op_throw_static_error)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- String message = errorDescriptionForValue(callFrame, stackFrame.args[0].jsValue())->value(callFrame);
- if (stackFrame.args[1].asInt32)
- stackFrame.vm->exception = createReferenceError(callFrame, message);
- else
- stackFrame.vm->exception = createTypeError(callFrame, message);
- VM_THROW_EXCEPTION_AT_END();
-}
-
-DEFINE_STUB_FUNCTION(void, op_debug)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
-
- int debugHookID = stackFrame.args[0].int32();
- int firstLine = stackFrame.args[1].int32();
- int lastLine = stackFrame.args[2].int32();
- int column = stackFrame.args[3].int32();
-
- stackFrame.vm->interpreter->debug(callFrame, static_cast<DebugHookID>(debugHookID), firstLine, lastLine, column);
-}
-
-DEFINE_STUB_FUNCTION(void*, vm_throw)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
- VM* vm = stackFrame.vm;
- ExceptionHandler handler = jitThrow(vm, stackFrame.callFrame, vm->exception, vm->exceptionLocation);
- STUB_SET_RETURN_ADDRESS(handler.catchRoutine);
- return handler.callFrame;
-}
-
-DEFINE_STUB_FUNCTION(EncodedJSValue, to_object)
-{
- STUB_INIT_STACK_FRAME(stackFrame);
-
- CallFrame* callFrame = stackFrame.callFrame;
- return JSValue::encode(stackFrame.args[0].jsValue().toObject(callFrame));
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
deleted file mode 100644
index ad7ecc851..000000000
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ /dev/null
@@ -1,460 +0,0 @@
-/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
- * Copyright (C) Research In Motion Limited 2010. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubs_h
-#define JITStubs_h
-
-#include "CallData.h"
-#include "Intrinsic.h"
-#include "LowLevelInterpreter.h"
-#include "MacroAssemblerCodeRef.h"
-#include "Register.h"
-#include "ResolveOperation.h"
-
-namespace JSC {
-
-#if ENABLE(JIT)
-
-struct StructureStubInfo;
-
-class ArrayAllocationProfile;
-class CodeBlock;
-class ExecutablePool;
-class FunctionExecutable;
-class Identifier;
-class VM;
-class JSGlobalObject;
-class JSObject;
-class JSPropertyNameIterator;
-class JSStack;
-class JSValue;
-class JSValueEncodedAsPointer;
-class LegacyProfiler;
-class NativeExecutable;
-class PropertySlot;
-class PutPropertySlot;
-class RegExp;
-class Structure;
-
-template <typename T> class Weak;
-
-union JITStubArg {
- void* asPointer;
- EncodedJSValue asEncodedJSValue;
- int32_t asInt32;
-
- JSValue jsValue() { return JSValue::decode(asEncodedJSValue); }
- JSObject* jsObject() { return static_cast<JSObject*>(asPointer); }
- Register* reg() { return static_cast<Register*>(asPointer); }
- Identifier& identifier() { return *static_cast<Identifier*>(asPointer); }
- int32_t int32() { return asInt32; }
- CodeBlock* codeBlock() { return static_cast<CodeBlock*>(asPointer); }
- FunctionExecutable* function() { return static_cast<FunctionExecutable*>(asPointer); }
- RegExp* regExp() { return static_cast<RegExp*>(asPointer); }
- JSPropertyNameIterator* propertyNameIterator() { return static_cast<JSPropertyNameIterator*>(asPointer); }
- JSGlobalObject* globalObject() { return static_cast<JSGlobalObject*>(asPointer); }
- JSString* jsString() { return static_cast<JSString*>(asPointer); }
- Structure* structure() { return static_cast<Structure*>(asPointer); }
- ReturnAddressPtr returnAddress() { return ReturnAddressPtr(asPointer); }
- ResolveOperations* resolveOperations() { return static_cast<ResolveOperations*>(asPointer); }
- PutToBaseOperation* putToBaseOperation() { return static_cast<PutToBaseOperation*>(asPointer); }
- ArrayAllocationProfile* arrayAllocationProfile() { return static_cast<ArrayAllocationProfile*>(asPointer); }
-};
-
-#if !OS(WINDOWS) && CPU(X86_64)
-struct JITStackFrame {
- void* reserved; // Unused
- JITStubArg args[6];
- void* padding[2]; // Maintain 32-byte stack alignment (possibly overkill).
-
- void* code;
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
- void* unused2;
- VM* vm;
-
- void* savedRBX;
- void* savedR15;
- void* savedR14;
- void* savedR13;
- void* savedR12;
- void* savedRBP;
- void* savedRIP;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
-};
-#elif OS(WINDOWS) && CPU(X86_64)
-struct JITStackFrame {
- void* shadow[4]; // Shadow space reserved for a callee's parameters home addresses
- void* reserved; // Unused, also maintains the 16-bytes stack alignment
- JITStubArg args[6];
-
- void* savedRBX;
- void* savedR15;
- void* savedR14;
- void* savedR13;
- void* savedR12;
- void* savedRBP;
- void* savedRIP;
-
- // Home addresses for our register passed parameters
- // http://msdn.microsoft.com/en-us/library/ew5tede7.aspx
- void* code;
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
-
- // Passed on the stack
- void* unused2;
- VM* vm;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
-};
-#elif CPU(X86)
-#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC)) || OS(QNX)
-#pragma pack(push)
-#pragma pack(4)
-#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC)) || OS(QNX)
-struct JITStackFrame {
- void* reserved; // Unused
- JITStubArg args[6];
-#if USE(JSVALUE32_64)
- void* padding[2]; // Maintain 16-byte stack alignment.
-#endif
-
- void* savedEBX;
- void* savedEDI;
- void* savedESI;
- void* savedEBP;
- void* savedEIP;
-
- void* code;
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
- void* unused2;
- VM* vm;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return reinterpret_cast<ReturnAddressPtr*>(this) - 1; }
-};
-#if COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC)) || OS(QNX)
-#pragma pack(pop)
-#endif // COMPILER(MSVC) || (OS(WINDOWS) && COMPILER(GCC)) || OS(QNX)
-#elif CPU(ARM_THUMB2)
-struct JITStackFrame {
- JITStubArg reserved; // Unused
- JITStubArg args[6];
-
- ReturnAddressPtr thunkReturnAddress;
-
- void* preservedReturnAddress;
- void* preservedR4;
- void* preservedR5;
- void* preservedR6;
- void* preservedR7;
- void* preservedR8;
- void* preservedR9;
- void* preservedR10;
- void* preservedR11;
-
- // These arguments passed in r1..r3 (r0 contained the entry code pointed, which is not preserved)
- JSStack* stack;
- CallFrame* callFrame;
-
- // These arguments passed on the stack.
- void* unused1;
- VM* vm;
-
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
-};
-#elif CPU(ARM_TRADITIONAL)
-#if COMPILER(MSVC)
-#pragma pack(push)
-#pragma pack(4)
-#endif // COMPILER(MSVC)
-struct JITStackFrame {
- JITStubArg padding; // Unused
- JITStubArg args[7];
-
- ReturnAddressPtr thunkReturnAddress;
-
- void* preservedR4;
- void* preservedR5;
- void* preservedR6;
- void* preservedR8;
- void* preservedR9;
- void* preservedR10;
- void* preservedR11;
- void* preservedLink;
-
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
-
- // These arguments passed on the stack.
- void* unused2;
- VM* vm;
-
- // When JIT code makes a call, it pushes its return address just below the rest of the stack.
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
-};
-#if COMPILER(MSVC)
-#pragma pack(pop)
-#endif // COMPILER(MSVC)
-#elif CPU(MIPS)
-struct JITStackFrame {
- JITStubArg reserved; // Unused
- JITStubArg args[6];
-
-#if USE(JSVALUE32_64)
- void* padding; // Make the overall stack length 8-byte aligned.
-#endif
-
- void* preservedGP; // store GP when using PIC code
- void* preservedS0;
- void* preservedS1;
- void* preservedS2;
- void* preservedS3;
- void* preservedS4;
- void* preservedReturnAddress;
-
- ReturnAddressPtr thunkReturnAddress;
-
- // These arguments passed in a1..a3 (a0 contained the entry code pointed, which is not preserved)
- JSStack* stack;
- CallFrame* callFrame;
- void* unused1;
-
- // These arguments passed on the stack.
- void* unused2;
- VM* vm;
-
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
-};
-#elif CPU(SH4)
-struct JITStackFrame {
- JITStubArg padding; // Unused
- JITStubArg args[6];
-
- ReturnAddressPtr thunkReturnAddress;
-
- void* savedR8;
- void* savedR9;
- void* savedR10;
- void* savedR11;
- void* savedR13;
- void* savedRPR;
- void* savedR14;
-
- // These arguments are passed in r5, r6 and r7.
- JSStack* stack;
- CallFrame* callFrame;
- JSValue* exception;
-
- // These arguments are passed on the stack.
- void* unused1;
- VM* vm;
-
- ReturnAddressPtr* returnAddressSlot() { return &thunkReturnAddress; }
-};
-#else
-#error "JITStackFrame not defined for this platform."
-#endif
-
-#define JITSTACKFRAME_ARGS_INDEX (OBJECT_OFFSETOF(JITStackFrame, args) / sizeof(void*))
-
-#define STUB_ARGS_DECLARATION void** args
-#define STUB_ARGS (args)
-
-#if CPU(X86)
-#if COMPILER(MSVC)
-#define JIT_STUB __fastcall
-#elif COMPILER(GCC)
-#define JIT_STUB __attribute__ ((fastcall))
-#elif COMPILER(SUNCC)
-#define JIT_STUB
-#else
-#error "JIT_STUB function calls require fastcall conventions on x86, add appropriate directive/attribute here for your compiler!"
-#endif
-#else
-#define JIT_STUB
-#endif
-
-extern "C" void ctiVMThrowTrampoline();
-extern "C" void ctiOpThrowNotCaught();
-extern "C" EncodedJSValue ctiTrampoline(void* code, JSStack*, CallFrame*, void* /*unused1*/, void* /*unused2*/, VM*);
-#if ENABLE(DFG_JIT)
-extern "C" void ctiTrampolineEnd();
-
-inline bool returnAddressIsInCtiTrampoline(ReturnAddressPtr returnAddress)
-{
- return returnAddress.value() >= bitwise_cast<void*>(&ctiTrampoline)
- && returnAddress.value() < bitwise_cast<void*>(&ctiTrampolineEnd);
-}
-#endif
-
-void performPlatformSpecificJITAssertions(VM*);
-
-extern "C" {
-EncodedJSValue JIT_STUB cti_op_add(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_bitand(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_bitor(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_bitxor(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_call_NotJSFunction(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_call_eval(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_construct_NotJSConstruct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_check_has_instance(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_create_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_convert_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_create_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_del_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_del_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_div(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_array_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_custom_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_getter_stub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_proto_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_proto_list_full(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_self_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_id_string_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_get_by_val_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_in(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_instanceof(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_is_boolean(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_is_function(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_is_number(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_is_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_is_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_is_undefined(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_less(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_lesseq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_greater(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_greatereq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_lshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_mod(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_mul(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_negate(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_not(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_nstricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_dec(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_inc(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_resolve(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_resolve_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_resolve_base_strict_put(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_resolve_with_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_resolve_with_this(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_to_base(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_rshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_strcat(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_stricteq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_sub(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_to_number(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_to_primitive(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_typeof(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_op_urshift(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-EncodedJSValue JIT_STUB cti_to_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_new_array(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_new_array_with_size(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_new_array_buffer(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_new_func(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_new_func_exp(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_new_object(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_new_regexp(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_push_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_push_name_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_push_with_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSObject* JIT_STUB cti_op_put_by_id_transition_realloc(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-JSPropertyNameIterator* JIT_STUB cti_op_get_pnames(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_op_eq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_op_eq_strings(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_op_jless(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_op_jlesseq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_op_jgreater(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_op_jgreatereq(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_op_jtrue(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_load_varargs(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_handle_watchdog_timer(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-int JIT_STUB cti_has_property(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_debug(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_end(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_pop_scope(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_profile_did_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_profile_will_call(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_id(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_id_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_id_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_id_direct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_id_direct_fail(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_id_direct_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_index(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_val(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_by_val_generic(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_put_getter_setter(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_init_global_const_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_tear_off_activation(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_tear_off_arguments(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void JIT_STUB cti_op_throw_static_error(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-#if ENABLE(DFG_JIT)
-void JIT_STUB cti_optimize(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-#endif
-void* JIT_STUB cti_op_call_arityCheck(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_construct_arityCheck(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_call_jitCompile(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_construct_jitCompile(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_switch_char(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_switch_imm(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_switch_string(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_op_throw(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_stack_check(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_vm_lazyLinkCall(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_vm_lazyLinkClosureCall(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_vm_lazyLinkConstruct(STUB_ARGS_DECLARATION) WTF_INTERNAL;
-void* JIT_STUB cti_vm_throw(STUB_ARGS_DECLARATION) REFERENCED_FROM_ASM WTF_INTERNAL;
-} // extern "C"
-
-#elif ENABLE(LLINT_C_LOOP)
-
-struct JITStackFrame {
- VM* vm;
-};
-
-#endif // ENABLE(LLINT_C_LOOP)
-
-} // namespace JSC
-
-#endif // JITStubs_h
diff --git a/Source/JavaScriptCore/jit/JITStubsMSVC64.asm b/Source/JavaScriptCore/jit/JITStubsMSVC64.asm
index 054214153..ef9cd4e0e 100644
--- a/Source/JavaScriptCore/jit/JITStubsMSVC64.asm
+++ b/Source/JavaScriptCore/jit/JITStubsMSVC64.asm
@@ -1,5 +1,5 @@
;/*
-; Copyright (C) 2015 The Qt Company Ltd
+; Copyright (C) 2014 Apple Inc. All rights reserved.
;
; Redistribution and use in source and binary forms, with or without
; modification, are permitted provided that the following conditions
@@ -23,62 +23,22 @@
; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
;*/
-EXTERN cti_vm_throw : near
-PUBLIC ctiTrampoline
-PUBLIC ctiVMThrowTrampoline
-PUBLIC ctiOpThrowNotCaught
+EXTERN getHostCallReturnValueWithExecState : near
-_TEXT SEGMENT
-
-ctiTrampoline PROC
- ; Dump register parameters to their home address
- mov qword ptr[rsp+20h], r9
- mov qword ptr[rsp+18h], r8
- mov qword ptr[rsp+10h], rdx
- mov qword ptr[rsp+8h], rcx
-
- push rbp
- mov rbp, rsp
- push r12
- push r13
- push r14
- push r15
- push rbx
+PUBLIC getHostCallReturnValue
- ; Decrease rsp to point to the start of our JITStackFrame
- sub rsp, 58h
- mov r12, 512
- mov r14, 0FFFF000000000000h
- mov r15, 0FFFF000000000002h
- mov r13, r8
- call rcx
- add rsp, 58h
- pop rbx
- pop r15
- pop r14
- pop r13
- pop r12
- pop rbp
- ret
-ctiTrampoline ENDP
-
-ctiVMThrowTrampoline PROC
- mov rcx, rsp
- call cti_vm_throw
- int 3
-ctiVMThrowTrampoline ENDP
+_TEXT SEGMENT
-ctiOpThrowNotCaught PROC
- add rsp, 58h
- pop rbx
- pop r15
- pop r14
- pop r13
- pop r12
- pop rbp
+getHostCallReturnValue PROC
+ lea rcx, [rsp - 8]
+ ; Allocate space for all 4 parameter registers, and align stack pointer to 16 bytes boundary by allocating another 8 bytes.
+ ; The stack alignment is needed to fix a crash in the CRT library on a floating point instruction.
+ sub rsp, 40
+ call getHostCallReturnValueWithExecState
+ add rsp, 40
ret
-ctiOpThrowNotCaught ENDP
+getHostCallReturnValue ENDP
_TEXT ENDS
-END \ No newline at end of file
+END
diff --git a/Source/JavaScriptCore/jit/JITSubGenerator.cpp b/Source/JavaScriptCore/jit/JITSubGenerator.cpp
new file mode 100644
index 000000000..3ebaaa372
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITSubGenerator.cpp
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITSubGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITSubGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ m_didEmitFastPath = true;
+
+ CCallHelpers::Jump leftNotInt = jit.branchIfNotInt32(m_left);
+ CCallHelpers::Jump rightNotInt = jit.branchIfNotInt32(m_right);
+
+ jit.move(m_left.payloadGPR(), m_scratchGPR);
+ m_slowPathJumpList.append(jit.branchSub32(CCallHelpers::Overflow, m_right.payloadGPR(), m_scratchGPR));
+
+ jit.boxInt32(m_scratchGPR, m_result);
+
+ m_endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ m_slowPathJumpList.append(leftNotInt);
+ m_slowPathJumpList.append(rightNotInt);
+ return;
+ }
+
+ leftNotInt.link(&jit);
+ if (!m_leftOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
+
+ jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
+ CCallHelpers::Jump rightWasInteger = jit.jump();
+
+ rightNotInt.link(&jit);
+ if (!m_rightOperand.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
+
+ rightIsDouble.link(&jit);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+
+ rightWasInteger.link(&jit);
+
+ jit.subDouble(m_rightFPR, m_leftFPR);
+ jit.boxDouble(m_leftFPR, m_result);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITSubGenerator.h b/Source/JavaScriptCore/jit/JITSubGenerator.h
new file mode 100644
index 000000000..f677e0878
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITSubGenerator.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITSubGenerator_h
+#define JITSubGenerator_h
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class JITSubGenerator {
+public:
+ JITSubGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+
+private:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // JITSubGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITThunks.cpp b/Source/JavaScriptCore/jit/JITThunks.cpp
index e11774be0..5d4269d61 100644
--- a/Source/JavaScriptCore/jit/JITThunks.cpp
+++ b/Source/JavaScriptCore/jit/JITThunks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,12 +31,12 @@
#include "Executable.h"
#include "JIT.h"
#include "VM.h"
-#include "Operations.h"
+#include "JSCInlines.h"
namespace JSC {
JITThunks::JITThunks()
- : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap))
+ : m_hostFunctionStubMap(std::make_unique<HostFunctionStubMap>())
{
}
@@ -46,61 +46,84 @@ JITThunks::~JITThunks()
MacroAssemblerCodePtr JITThunks::ctiNativeCall(VM* vm)
{
-#if ENABLE(LLINT)
if (!vm->canUseJIT())
return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline);
-#endif
return ctiStub(vm, nativeCallGenerator).code();
}
+
MacroAssemblerCodePtr JITThunks::ctiNativeConstruct(VM* vm)
{
-#if ENABLE(LLINT)
if (!vm->canUseJIT())
return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
-#endif
return ctiStub(vm, nativeConstructGenerator).code();
}
+MacroAssemblerCodePtr JITThunks::ctiNativeTailCall(VM* vm)
+{
+ ASSERT(vm->canUseJIT());
+ return ctiStub(vm, nativeTailCallGenerator).code();
+}
+
MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
{
+ LockHolder locker(m_lock);
CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
- if (entry.isNewEntry)
+ if (entry.isNewEntry) {
+ // Compilation thread can only retrieve existing entries.
+ ASSERT(!isCompilationThread());
entry.iterator->value = generator(vm);
+ }
return entry.iterator->value;
}
-NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor)
+void JITThunks::finalize(Handle<Unknown> handle, void*)
+{
+ auto* nativeExecutable = jsCast<NativeExecutable*>(handle.get().asCell());
+ weakRemove(*m_hostFunctionStubMap, std::make_pair(nativeExecutable->function(), nativeExecutable->constructor()), nativeExecutable);
+}
+
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor, const String& name)
{
+ ASSERT(!isCompilationThread());
+
if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, constructor)))
return nativeExecutable;
- NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, JIT::compileCTINativeCall(vm, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), constructor, NoIntrinsic);
- weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), PassWeak<NativeExecutable>(nativeExecutable));
+ NativeExecutable* nativeExecutable = NativeExecutable::create(
+ *vm,
+ adoptRef(new NativeJITCode(JIT::compileCTINativeCall(vm, function), JITCode::HostCallThunk)),
+ function,
+ adoptRef(new NativeJITCode(MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), JITCode::HostCallThunk)),
+ constructor, NoIntrinsic, name);
+ weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), Weak<NativeExecutable>(nativeExecutable, this));
return nativeExecutable;
}
-NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic, const String& name)
{
+ ASSERT(!isCompilationThread());
+ ASSERT(vm->canUseJIT());
+
if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, &callHostFunctionAsConstructor)))
return nativeExecutable;
- MacroAssemblerCodeRef code;
+ RefPtr<JITCode> forCall;
if (generator) {
- if (vm->canUseJIT())
- code = generator(vm);
- else
- code = MacroAssemblerCodeRef();
+ MacroAssemblerCodeRef entry = generator(vm);
+ forCall = adoptRef(new DirectJITCode(entry, entry.code(), JITCode::HostCallThunk));
} else
- code = JIT::compileCTINativeCall(vm, function);
-
- NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), callHostFunctionAsConstructor, intrinsic);
- weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), PassWeak<NativeExecutable>(nativeExecutable));
+ forCall = adoptRef(new NativeJITCode(JIT::compileCTINativeCall(vm, function), JITCode::HostCallThunk));
+
+ RefPtr<JITCode> forConstruct = adoptRef(new NativeJITCode(MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), JITCode::HostCallThunk));
+
+ NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, forCall, function, forConstruct, callHostFunctionAsConstructor, intrinsic, name);
+ weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), Weak<NativeExecutable>(nativeExecutable, this));
return nativeExecutable;
}
void JITThunks::clearHostFunctionStubs()
{
- m_hostFunctionStubMap.clear();
+ m_hostFunctionStubMap = nullptr;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITThunks.h b/Source/JavaScriptCore/jit/JITThunks.h
index 769583b1d..f17f56eb6 100644
--- a/Source/JavaScriptCore/jit/JITThunks.h
+++ b/Source/JavaScriptCore/jit/JITThunks.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,8 +26,6 @@
#ifndef JITThunks_h
#define JITThunks_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
#include "CallData.h"
@@ -36,36 +34,42 @@
#include "MacroAssemblerCodeRef.h"
#include "ThunkGenerator.h"
#include "Weak.h"
+#include "WeakHandleOwner.h"
#include "WeakInlines.h"
#include <wtf/HashMap.h>
-#include <wtf/OwnPtr.h>
#include <wtf/RefPtr.h>
+#include <wtf/ThreadingPrimitives.h>
namespace JSC {
class VM;
class NativeExecutable;
-class JITThunks {
+class JITThunks final : private WeakHandleOwner {
+ WTF_MAKE_FAST_ALLOCATED;
public:
JITThunks();
- ~JITThunks();
+ virtual ~JITThunks();
MacroAssemblerCodePtr ctiNativeCall(VM*);
MacroAssemblerCodePtr ctiNativeConstruct(VM*);
+ MacroAssemblerCodePtr ctiNativeTailCall(VM*);
MacroAssemblerCodeRef ctiStub(VM*, ThunkGenerator);
- NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor);
- NativeExecutable* hostFunctionStub(VM*, NativeFunction, ThunkGenerator, Intrinsic);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor, const String& name);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, ThunkGenerator, Intrinsic, const String& name);
void clearHostFunctionStubs();
private:
+ void finalize(Handle<Unknown>, void* context) override;
+
typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
CTIStubMap m_ctiStubMap;
- typedef HashMap<std::pair<NativeFunction, NativeFunction>, Weak<NativeExecutable> > HostFunctionStubMap;
- OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
+ typedef HashMap<std::pair<NativeFunction, NativeFunction>, Weak<NativeExecutable>> HostFunctionStubMap;
+ std::unique_ptr<HostFunctionStubMap> m_hostFunctionStubMap;
+ Lock m_lock;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
new file mode 100644
index 000000000..876b0401b
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITToDFGDeferredCompilationCallback.h"
+
+#if ENABLE(DFG_JIT)
+
+#include "CodeBlock.h"
+#include "Executable.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+JITToDFGDeferredCompilationCallback::JITToDFGDeferredCompilationCallback() { }
+JITToDFGDeferredCompilationCallback::~JITToDFGDeferredCompilationCallback() { }
+
+Ref<JITToDFGDeferredCompilationCallback> JITToDFGDeferredCompilationCallback::create()
+{
+ return adoptRef(*new JITToDFGDeferredCompilationCallback());
+}
+
+void JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously(
+ CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock)
+{
+ ASSERT_UNUSED(profiledDFGCodeBlock, !profiledDFGCodeBlock);
+ ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
+
+ if (Options::verboseOSR())
+ dataLog("Optimizing compilation of ", *codeBlock, " did become ready.\n");
+
+ codeBlock->alternative()->forceOptimizationSlowPathConcurrently();
+}
+
+void JITToDFGDeferredCompilationCallback::compilationDidComplete(
+ CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result)
+{
+ ASSERT(!profiledDFGCodeBlock);
+ ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
+
+ if (Options::verboseOSR())
+ dataLog("Optimizing compilation of ", *codeBlock, " result: ", result, "\n");
+
+ if (result == CompilationSuccessful)
+ codeBlock->ownerScriptExecutable()->installCode(codeBlock);
+
+ codeBlock->alternative()->setOptimizationThresholdBasedOnCompilationResult(result);
+
+ DeferredCompilationCallback::compilationDidComplete(codeBlock, profiledDFGCodeBlock, result);
+}
+
+} // JSC
+
+#endif // ENABLE(DFG_JIT)
diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
new file mode 100644
index 000000000..af2532f92
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITToDFGDeferredCompilationCallback_h
+#define JITToDFGDeferredCompilationCallback_h
+
+#if ENABLE(DFG_JIT)
+
+#include "DeferredCompilationCallback.h"
+#include <wtf/PassRefPtr.h>
+
+namespace JSC {
+
+class ScriptExecutable;
+
+class JITToDFGDeferredCompilationCallback : public DeferredCompilationCallback {
+protected:
+ JITToDFGDeferredCompilationCallback();
+
+public:
+ virtual ~JITToDFGDeferredCompilationCallback();
+
+ static Ref<JITToDFGDeferredCompilationCallback> create();
+
+ virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) override;
+ virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult) override;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // JITToDFGDeferredCompilationCallback_h
+
diff --git a/Source/JavaScriptCore/jit/JITWriteBarrier.h b/Source/JavaScriptCore/jit/JITWriteBarrier.h
index 9da1ea782..b410ecadb 100644
--- a/Source/JavaScriptCore/jit/JITWriteBarrier.h
+++ b/Source/JavaScriptCore/jit/JITWriteBarrier.h
@@ -31,6 +31,7 @@
#include "MacroAssembler.h"
#include "SlotVisitor.h"
#include "UnusedPointer.h"
+#include "VM.h"
#include "WriteBarrier.h"
namespace JSC {
@@ -42,8 +43,7 @@ class VM;
#define JITWriteBarrierFlag ((void*)2)
class JITWriteBarrierBase {
public:
- typedef void* (JITWriteBarrierBase::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const { return get() ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
+ explicit operator bool() const { return get(); }
bool operator!() const { return !get(); }
void setFlagOnBarrier()
@@ -77,9 +77,9 @@ protected:
{
}
- void set(VM&, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
+ void set(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
{
- Heap::writeBarrier(owner, value);
+ vm.heap.writeBarrier(owner, value);
m_location = location;
ASSERT(((!!m_location) && m_location.executableAddress() != JITWriteBarrierFlag) || (location.executableAddress() == m_location.executableAddress()));
MacroAssembler::repatchPointer(m_location, value);
@@ -137,7 +137,7 @@ public:
template<typename T> inline void SlotVisitor::append(JITWriteBarrier<T>* slot)
{
- internalAppend(slot->get());
+ internalAppend(0, slot->get());
}
}
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 0cf7589dd..201e3ab2b 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -27,8 +27,11 @@
#define JSInterfaceJIT_h
#include "BytecodeConventions.h"
+#include "CCallHelpers.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
#include "JITCode.h"
-#include "JITStubs.h"
+#include "JITOperations.h"
#include "JSCJSValue.h"
#include "JSStack.h"
#include "JSString.h"
@@ -38,151 +41,17 @@
#if ENABLE(JIT)
namespace JSC {
- class JSInterfaceJIT : public MacroAssembler {
+ class JSInterfaceJIT : public CCallHelpers, public GPRInfo, public FPRInfo {
public:
- // NOTES:
- //
- // regT0 has two special meanings. The return value from a stub
- // call will always be in regT0, and by default (unless
- // a register is specified) emitPutVirtualRegister() will store
- // the value from regT0.
- //
- // regT3 is required to be callee-preserved.
- //
- // tempRegister2 is has no such dependencies. It is important that
- // on x86/x86-64 it is ecx for performance reasons, since the
- // MacroAssembler will need to plant register swaps if it is not -
- // however the code will still function correctly.
-#if CPU(X86_64)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
-#if !OS(WINDOWS)
- static const RegisterID firstArgumentRegister = X86Registers::edi;
-#else
- static const RegisterID firstArgumentRegister = X86Registers::ecx;
-#endif
-
-#if ENABLE(VALUE_PROFILER)
- static const RegisterID bucketCounterRegister = X86Registers::r10;
-#endif
-
- static const RegisterID callFrameRegister = X86Registers::r13;
- static const RegisterID tagTypeNumberRegister = X86Registers::r14;
- static const RegisterID tagMaskRegister = X86Registers::r15;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
- static const FPRegisterID fpRegT3 = X86Registers::xmm3;
-
- static const RegisterID nonArgGPR1 = X86Registers::eax; // regT0
-#elif CPU(X86)
- static const RegisterID returnValueRegister = X86Registers::eax;
- static const RegisterID cachedResultRegister = X86Registers::eax;
- // On x86 we always use fastcall conventions = but on
- // OS X if might make more sense to just use regparm.
- static const RegisterID firstArgumentRegister = X86Registers::ecx;
-
- static const RegisterID bucketCounterRegister = X86Registers::esi;
- static const RegisterID callFrameRegister = X86Registers::edi;
-
- static const RegisterID regT0 = X86Registers::eax;
- static const RegisterID regT1 = X86Registers::edx;
- static const RegisterID regT2 = X86Registers::ecx;
- static const RegisterID regT3 = X86Registers::ebx;
-
- static const FPRegisterID fpRegT0 = X86Registers::xmm0;
- static const FPRegisterID fpRegT1 = X86Registers::xmm1;
- static const FPRegisterID fpRegT2 = X86Registers::xmm2;
- static const FPRegisterID fpRegT3 = X86Registers::xmm3;
-#elif CPU(ARM)
- static const RegisterID returnValueRegister = ARMRegisters::r0;
- static const RegisterID cachedResultRegister = ARMRegisters::r0;
- static const RegisterID firstArgumentRegister = ARMRegisters::r0;
-
-#if ENABLE(VALUE_PROFILER)
- static const RegisterID bucketCounterRegister = ARMRegisters::r7;
-#endif
-
- static const RegisterID regT0 = ARMRegisters::r0;
- static const RegisterID regT1 = ARMRegisters::r1;
- static const RegisterID regT2 = ARMRegisters::r2;
- static const RegisterID regT3 = ARMRegisters::r4;
-
- // Update ctiTrampoline in JITStubs.cpp if these values are changed!
- static const RegisterID callFrameRegister = ARMRegisters::r5;
-
- static const FPRegisterID fpRegT0 = ARMRegisters::d0;
- static const FPRegisterID fpRegT1 = ARMRegisters::d1;
- static const FPRegisterID fpRegT2 = ARMRegisters::d2;
- static const FPRegisterID fpRegT3 = ARMRegisters::d3;
-#elif CPU(MIPS)
- static const RegisterID returnValueRegister = MIPSRegisters::v0;
- static const RegisterID cachedResultRegister = MIPSRegisters::v0;
- static const RegisterID firstArgumentRegister = MIPSRegisters::a0;
-
-#if ENABLE(VALUE_PROFILER)
- static const RegisterID bucketCounterRegister = MIPSRegisters::s3;
-#endif
-
- // regT0 must be v0 for returning a 32-bit value.
- static const RegisterID regT0 = MIPSRegisters::v0;
-
- // regT1 must be v1 for returning a pair of 32-bit value.
- static const RegisterID regT1 = MIPSRegisters::v1;
-
- static const RegisterID regT2 = MIPSRegisters::t4;
-
- // regT3 must be saved in the callee, so use an S register.
- static const RegisterID regT3 = MIPSRegisters::s2;
-
- static const RegisterID callFrameRegister = MIPSRegisters::s0;
-
- static const FPRegisterID fpRegT0 = MIPSRegisters::f4;
- static const FPRegisterID fpRegT1 = MIPSRegisters::f6;
- static const FPRegisterID fpRegT2 = MIPSRegisters::f8;
- static const FPRegisterID fpRegT3 = MIPSRegisters::f10;
-#elif CPU(SH4)
- static const RegisterID callFrameRegister = SH4Registers::fp;
-
-#if ENABLE(VALUE_PROFILER)
- static const RegisterID bucketCounterRegister = SH4Registers::r8;
-#endif
-
- static const RegisterID regT0 = SH4Registers::r0;
- static const RegisterID regT1 = SH4Registers::r1;
- static const RegisterID regT2 = SH4Registers::r2;
- static const RegisterID regT3 = SH4Registers::r10;
- static const RegisterID regT4 = SH4Registers::r4;
- static const RegisterID regT5 = SH4Registers::r5;
- static const RegisterID regT6 = SH4Registers::r6;
- static const RegisterID regT7 = SH4Registers::r7;
- static const RegisterID firstArgumentRegister = regT4;
-
- static const RegisterID returnValueRegister = SH4Registers::r0;
- static const RegisterID cachedResultRegister = SH4Registers::r0;
-
- static const FPRegisterID fpRegT0 = SH4Registers::dr0;
- static const FPRegisterID fpRegT1 = SH4Registers::dr2;
- static const FPRegisterID fpRegT2 = SH4Registers::dr4;
- static const FPRegisterID fpRegT3 = SH4Registers::dr6;
- static const FPRegisterID fpRegT4 = SH4Registers::dr8;
- static const FPRegisterID fpRegT5 = SH4Registers::dr10;
-#else
-#error "JIT not supported on this platform."
-#endif
+ JSInterfaceJIT(VM* vm, CodeBlock* codeBlock = 0)
+ : CCallHelpers(vm, codeBlock)
+ {
+ }
#if USE(JSVALUE32_64)
- // Can't just propogate JSValue::Int32Tag as visual studio doesn't like it
- static const unsigned Int32Tag = 0xffffffff;
- COMPILE_ASSERT(Int32Tag == JSValue::Int32Tag, Int32Tag_out_of_sync);
+ static const unsigned Int32Tag = static_cast<unsigned>(JSValue::Int32Tag);
#else
- static const unsigned Int32Tag = TagTypeNumber >> 32;
+ static const unsigned Int32Tag = static_cast<unsigned>(TagTypeNumber >> 32);
#endif
inline Jump emitLoadJSCell(unsigned virtualRegisterIndex, RegisterID payload);
inline Jump emitLoadInt32(unsigned virtualRegisterIndex, RegisterID dst);
@@ -195,24 +64,18 @@ namespace JSC {
#if USE(JSVALUE64)
Jump emitJumpIfNotJSCell(RegisterID);
- Jump emitJumpIfImmediateNumber(RegisterID reg);
- Jump emitJumpIfNotImmediateNumber(RegisterID reg);
- void emitFastArithImmToInt(RegisterID reg);
- void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+ Jump emitJumpIfNumber(RegisterID);
+ Jump emitJumpIfNotNumber(RegisterID);
+ void emitTagInt(RegisterID src, RegisterID dest);
#endif
- Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType);
+ Jump emitJumpIfNotType(RegisterID baseReg, JSType);
void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
+ void emitPutToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
void emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void preserveReturnAddressAfterCall(RegisterID);
- void restoreReturnAddressBeforeReturn(RegisterID);
- void restoreReturnAddressBeforeReturn(Address);
- void restoreArgumentReference();
-
inline Address payloadFor(int index, RegisterID base = callFrameRegister);
inline Address intPayloadFor(int index, RegisterID base = callFrameRegister);
inline Address intTagFor(int index, RegisterID base = callFrameRegister);
@@ -289,11 +152,11 @@ namespace JSC {
return branchTest64(NonZero, reg, tagMaskRegister);
}
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNumber(RegisterID reg)
{
return branchTest64(NonZero, reg, tagTypeNumberRegister);
}
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotNumber(RegisterID reg)
{
return branchTest64(Zero, reg, tagTypeNumberRegister);
}
@@ -314,7 +177,7 @@ namespace JSC {
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
{
load64(addressFor(virtualRegisterIndex), scratch);
- Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
+ Jump notNumber = emitJumpIfNotNumber(scratch);
Jump notInt = branch64(Below, scratch, tagTypeNumberRegister);
convertInt32ToDouble(scratch, dst);
Jump done = jump();
@@ -325,12 +188,8 @@ namespace JSC {
return notNumber;
}
- ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
- {
- }
-
// operand is int32_t, must have been zero-extended if register is 64-bit.
- ALWAYS_INLINE void JSInterfaceJIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
+ ALWAYS_INLINE void JSInterfaceJIT::emitTagInt(RegisterID src, RegisterID dest)
{
if (src != dest)
move(src, dest);
@@ -357,10 +216,9 @@ namespace JSC {
}
#endif
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, JSType type)
{
- loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
- return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
+ return branch8(NotEqual, Address(baseReg, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
}
ALWAYS_INLINE void JSInterfaceJIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
@@ -377,7 +235,7 @@ namespace JSC {
#endif
}
- ALWAYS_INLINE void JSInterfaceJIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
{
storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
}
@@ -398,81 +256,6 @@ namespace JSC {
return Address(base, (static_cast<unsigned>(virtualRegisterIndex) * sizeof(Register)));
}
-#if CPU(ARM)
-
- ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
- {
- move(linkRegister, reg);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
- {
- move(reg, linkRegister);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
- {
- loadPtr(address, linkRegister);
- }
-#elif CPU(SH4)
-
- ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
- {
- m_assembler.stspr(reg);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
- {
- m_assembler.ldspr(reg);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
- {
- loadPtrLinkReg(address);
- }
-
-#elif CPU(MIPS)
-
- ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
- {
- move(returnAddressRegister, reg);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
- {
- move(reg, returnAddressRegister);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
- {
- loadPtr(address, returnAddressRegister);
- }
-
-#else // CPU(X86) || CPU(X86_64)
-
- ALWAYS_INLINE void JSInterfaceJIT::preserveReturnAddressAfterCall(RegisterID reg)
- {
- pop(reg);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(RegisterID reg)
- {
- push(reg);
- }
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreReturnAddressBeforeReturn(Address address)
- {
- push(address);
- }
-
-#endif
-
- ALWAYS_INLINE void JSInterfaceJIT::restoreArgumentReference()
- {
- move(stackPointerRegister, firstArgumentRegister);
- poke(callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
- }
-
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp b/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp
new file mode 100644
index 000000000..8676d2de0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp
@@ -0,0 +1,301 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PCToCodeOriginMap.h"
+
+#if ENABLE(JIT)
+
+#include "B3PCToOriginMap.h"
+#include "DFGNode.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+namespace {
+
+class DeltaCompressionBuilder {
+public:
+ DeltaCompressionBuilder(size_t maxSize)
+ : m_offset(0)
+ , m_maxSize(maxSize)
+ {
+ m_buffer = static_cast<uint8_t*>(fastMalloc(m_maxSize));
+ }
+
+ template <typename T>
+ void write(T item)
+ {
+ RELEASE_ASSERT(m_offset + sizeof(T) <= m_maxSize);
+ static const uint8_t mask = std::numeric_limits<uint8_t>::max();
+ for (unsigned i = 0; i < sizeof(T); i++) {
+ *(m_buffer + m_offset) = static_cast<uint8_t>(item & mask);
+ item = item >> (sizeof(uint8_t) * 8);
+ m_offset += 1;
+ }
+ }
+
+ uint8_t* m_buffer;
+ size_t m_offset;
+ size_t m_maxSize;
+};
+
+class DeltaCompresseionReader {
+public:
+ DeltaCompresseionReader(uint8_t* buffer, size_t size)
+ : m_buffer(buffer)
+ , m_size(size)
+ , m_offset(0)
+ { }
+
+ template <typename T>
+ T read()
+ {
+ RELEASE_ASSERT(m_offset + sizeof(T) <= m_size);
+ T result = 0;
+ for (unsigned i = 0; i < sizeof(T); i++) {
+ uint8_t bitsAsInt8 = *(m_buffer + m_offset);
+ T bits = static_cast<T>(bitsAsInt8);
+ bits = bits << (sizeof(uint8_t) * 8 * i);
+ result |= bits;
+ m_offset += 1;
+ }
+ return result;
+ }
+
+private:
+ uint8_t* m_buffer;
+ size_t m_size;
+ size_t m_offset;
+};
+
+} // anonymous namespace
+
+PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(VM& vm)
+ : m_vm(vm)
+ , m_shouldBuildMapping(vm.shouldBuilderPCToCodeOriginMapping())
+{ }
+
+PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(PCToCodeOriginMapBuilder&& other)
+ : m_vm(other.m_vm)
+ , m_codeRanges(WTFMove(other.m_codeRanges))
+ , m_shouldBuildMapping(other.m_shouldBuildMapping)
+{ }
+
+#if ENABLE(FTL_JIT)
+PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(VM& vm, B3::PCToOriginMap&& b3PCToOriginMap)
+ : m_vm(vm)
+ , m_shouldBuildMapping(vm.shouldBuilderPCToCodeOriginMapping())
+{
+ if (!m_shouldBuildMapping)
+ return;
+
+ for (const B3::PCToOriginMap::OriginRange& originRange : b3PCToOriginMap.ranges()) {
+ DFG::Node* node = bitwise_cast<DFG::Node*>(originRange.origin.data());
+ if (node)
+ appendItem(originRange.label, node->origin.semantic);
+ else
+ appendItem(originRange.label, PCToCodeOriginMapBuilder::defaultCodeOrigin());
+ }
+}
+#endif
+
+void PCToCodeOriginMapBuilder::appendItem(MacroAssembler::Label label, const CodeOrigin& codeOrigin)
+{
+ if (!m_shouldBuildMapping)
+ return;
+
+ if (m_codeRanges.size()) {
+ if (m_codeRanges.last().end == label)
+ return;
+ m_codeRanges.last().end = label;
+ if (m_codeRanges.last().codeOrigin == codeOrigin || !codeOrigin)
+ return;
+ }
+
+ m_codeRanges.append(CodeRange{label, label, codeOrigin});
+}
+
+
+static const uint8_t sentinelPCDelta = 0;
+static const int8_t sentinelBytecodeDelta = 0;
+
+PCToCodeOriginMap::PCToCodeOriginMap(PCToCodeOriginMapBuilder&& builder, LinkBuffer& linkBuffer)
+{
+ RELEASE_ASSERT(builder.didBuildMapping());
+
+ if (!builder.m_codeRanges.size()) {
+ m_pcRangeStart = std::numeric_limits<uintptr_t>::max();
+ m_pcRangeEnd = std::numeric_limits<uintptr_t>::max();
+
+ m_compressedPCBufferSize = 0;
+ m_compressedPCs = nullptr;
+
+ m_compressedCodeOriginsSize = 0;
+ m_compressedCodeOrigins = nullptr;
+
+ return;
+ }
+
+ // We do a final touch-up on the last range here because of how we generate the table.
+ // The final range (if non empty) would be ignored if we didn't append any (arbitrary)
+ // range as the last item of the vector.
+ PCToCodeOriginMapBuilder::CodeRange& last = builder.m_codeRanges.last();
+ if (!(last.start == last.end))
+ builder.m_codeRanges.append(PCToCodeOriginMapBuilder::CodeRange{ last.end, last.end, last.codeOrigin }); // This range will never actually be found, but it ensures the real last range is found.
+
+ DeltaCompressionBuilder pcCompressor((sizeof(uintptr_t) + sizeof(uint8_t)) * builder.m_codeRanges.size());
+ void* lastPCValue = nullptr;
+ auto buildPCTable = [&] (void* pcValue) {
+ RELEASE_ASSERT(pcValue > lastPCValue);
+ uintptr_t delta = bitwise_cast<uintptr_t>(pcValue) - bitwise_cast<uintptr_t>(lastPCValue);
+ RELEASE_ASSERT(delta != sentinelPCDelta);
+ lastPCValue = pcValue;
+ if (delta > std::numeric_limits<uint8_t>::max()) {
+ pcCompressor.write<uint8_t>(sentinelPCDelta);
+ pcCompressor.write<uintptr_t>(delta);
+ return;
+ }
+
+ pcCompressor.write<uint8_t>(static_cast<uint8_t>(delta));
+ };
+
+ DeltaCompressionBuilder codeOriginCompressor((sizeof(intptr_t) + sizeof(int8_t) + sizeof(int8_t) + sizeof(InlineCallFrame*)) * builder.m_codeRanges.size());
+ CodeOrigin lastCodeOrigin(0, nullptr);
+ auto buildCodeOriginTable = [&] (const CodeOrigin& codeOrigin) {
+ intptr_t delta = static_cast<intptr_t>(codeOrigin.bytecodeIndex) - static_cast<intptr_t>(lastCodeOrigin.bytecodeIndex);
+ lastCodeOrigin = codeOrigin;
+ if (delta > std::numeric_limits<int8_t>::max() || delta < std::numeric_limits<int8_t>::min() || delta == sentinelBytecodeDelta) {
+ codeOriginCompressor.write<int8_t>(sentinelBytecodeDelta);
+ codeOriginCompressor.write<intptr_t>(delta);
+ } else
+ codeOriginCompressor.write<int8_t>(static_cast<int8_t>(delta));
+
+ int8_t hasInlineCallFrameByte = codeOrigin.inlineCallFrame ? 1 : 0;
+ codeOriginCompressor.write<int8_t>(hasInlineCallFrameByte);
+ if (hasInlineCallFrameByte)
+ codeOriginCompressor.write<uintptr_t>(bitwise_cast<uintptr_t>(codeOrigin.inlineCallFrame));
+ };
+
+ m_pcRangeStart = bitwise_cast<uintptr_t>(linkBuffer.locationOf(builder.m_codeRanges.first().start).dataLocation());
+ m_pcRangeEnd = bitwise_cast<uintptr_t>(linkBuffer.locationOf(builder.m_codeRanges.last().end).dataLocation());
+ m_pcRangeEnd -= 1;
+
+ for (unsigned i = 0; i < builder.m_codeRanges.size(); i++) {
+ PCToCodeOriginMapBuilder::CodeRange& codeRange = builder.m_codeRanges[i];
+ void* start = linkBuffer.locationOf(codeRange.start).dataLocation();
+ void* end = linkBuffer.locationOf(codeRange.end).dataLocation();
+ ASSERT(m_pcRangeStart <= bitwise_cast<uintptr_t>(start));
+ ASSERT(m_pcRangeEnd >= bitwise_cast<uintptr_t>(end) - 1);
+ if (start == end)
+ ASSERT(i == builder.m_codeRanges.size() - 1);
+ if (i > 0)
+ ASSERT(linkBuffer.locationOf(builder.m_codeRanges[i - 1].end).dataLocation() == start);
+
+ buildPCTable(start);
+ buildCodeOriginTable(codeRange.codeOrigin);
+ }
+
+ m_compressedPCBufferSize = pcCompressor.m_offset;
+ m_compressedPCs = static_cast<uint8_t*>(fastRealloc(pcCompressor.m_buffer, m_compressedPCBufferSize));
+
+ m_compressedCodeOriginsSize = codeOriginCompressor.m_offset;
+ m_compressedCodeOrigins = static_cast<uint8_t*>(fastRealloc(codeOriginCompressor.m_buffer, m_compressedCodeOriginsSize));
+}
+
+PCToCodeOriginMap::~PCToCodeOriginMap()
+{
+ if (m_compressedPCs)
+ fastFree(m_compressedPCs);
+ if (m_compressedCodeOrigins)
+ fastFree(m_compressedCodeOrigins);
+}
+
+double PCToCodeOriginMap::memorySize()
+{
+ double size = 0;
+ size += m_compressedPCBufferSize;
+ size += m_compressedCodeOriginsSize;
+ return size;
+}
+
+Optional<CodeOrigin> PCToCodeOriginMap::findPC(void* pc) const
+{
+ uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc);
+ if (!(m_pcRangeStart <= pcAsInt && pcAsInt <= m_pcRangeEnd))
+ return Nullopt;
+
+ uintptr_t currentPC = 0;
+ CodeOrigin currentCodeOrigin(0, nullptr);
+
+ DeltaCompresseionReader pcReader(m_compressedPCs, m_compressedPCBufferSize);
+ DeltaCompresseionReader codeOriginReader(m_compressedCodeOrigins, m_compressedCodeOriginsSize);
+ while (true) {
+ uintptr_t previousPC = currentPC;
+ {
+ uint8_t value = pcReader.read<uint8_t>();
+ uintptr_t delta;
+ if (value == sentinelPCDelta)
+ delta = pcReader.read<uintptr_t>();
+ else
+ delta = value;
+ currentPC += delta;
+ }
+
+ CodeOrigin previousOrigin = currentCodeOrigin;
+ {
+ int8_t value = codeOriginReader.read<int8_t>();
+ intptr_t delta;
+ if (value == sentinelBytecodeDelta)
+ delta = codeOriginReader.read<intptr_t>();
+ else
+ delta = static_cast<intptr_t>(value);
+
+ currentCodeOrigin.bytecodeIndex = static_cast<unsigned>(static_cast<intptr_t>(currentCodeOrigin.bytecodeIndex) + delta);
+
+ int8_t hasInlineFrame = codeOriginReader.read<int8_t>();
+ ASSERT(hasInlineFrame == 0 || hasInlineFrame == 1);
+ if (hasInlineFrame)
+ currentCodeOrigin.inlineCallFrame = bitwise_cast<InlineCallFrame*>(codeOriginReader.read<uintptr_t>());
+ else
+ currentCodeOrigin.inlineCallFrame = nullptr;
+ }
+
+ if (previousPC) {
+ uintptr_t startOfRange = previousPC;
+ // We subtract 1 because we generate end points inclusively in this table, even though we are interested in ranges of the form: [previousPC, currentPC)
+ uintptr_t endOfRange = currentPC - 1;
+ if (startOfRange <= pcAsInt && pcAsInt <= endOfRange)
+ return Optional<CodeOrigin>(previousOrigin); // We return previousOrigin here because CodeOrigin's are mapped to the startValue of the range.
+ }
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return Nullopt;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PCToCodeOriginMap.h b/Source/JavaScriptCore/jit/PCToCodeOriginMap.h
new file mode 100644
index 000000000..75b54448c
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PCToCodeOriginMap.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PCToCodeOriginMap_h
+#define PCToCodeOriginMap_h
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "DFGCommon.h"
+#include "MacroAssembler.h"
+#include "VM.h"
+#include <wtf/Bag.h>
+#include <wtf/Optional.h>
+#include <wtf/RedBlackTree.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+#if ENABLE(FTL_JIT)
+namespace B3 {
+class PCToOriginMap;
+}
+#endif
+
+class LinkBuffer;
+class PCToCodeOriginMapBuilder;
+
+class PCToCodeOriginMapBuilder {
+ WTF_MAKE_NONCOPYABLE(PCToCodeOriginMapBuilder);
+ friend class PCToCodeOriginMap;
+
+public:
+ PCToCodeOriginMapBuilder(VM&);
+ PCToCodeOriginMapBuilder(PCToCodeOriginMapBuilder&& other);
+
+#if ENABLE(FTL_JIT)
+ PCToCodeOriginMapBuilder(VM&, B3::PCToOriginMap&&);
+#endif
+
+ void appendItem(MacroAssembler::Label, const CodeOrigin&);
+ static CodeOrigin defaultCodeOrigin() { return CodeOrigin(0, nullptr); }
+
+ bool didBuildMapping() const { return m_shouldBuildMapping; }
+
+private:
+
+ struct CodeRange {
+ MacroAssembler::Label start;
+ MacroAssembler::Label end;
+ CodeOrigin codeOrigin;
+ };
+
+ VM& m_vm;
+ Vector<CodeRange> m_codeRanges;
+ bool m_shouldBuildMapping;
+};
+
+class PCToCodeOriginMap {
+ WTF_MAKE_NONCOPYABLE(PCToCodeOriginMap);
+public:
+ PCToCodeOriginMap(PCToCodeOriginMapBuilder&&, LinkBuffer&);
+ ~PCToCodeOriginMap();
+
+ Optional<CodeOrigin> findPC(void* pc) const;
+
+ double memorySize();
+
+private:
+ size_t m_compressedPCBufferSize;
+ size_t m_compressedCodeOriginsSize;
+ uint8_t* m_compressedPCs;
+ uint8_t* m_compressedCodeOrigins;
+ uintptr_t m_pcRangeStart;
+ uintptr_t m_pcRangeEnd;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // PCToCodeOriginMap_h
diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
new file mode 100644
index 000000000..b12b55299
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PolymorphicCallStubRoutine.h"
+
+#if ENABLE(JIT)
+
+#include "CallLinkInfo.h"
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+PolymorphicCallNode::~PolymorphicCallNode()
+{
+ if (isOnList())
+ remove();
+}
+
+void PolymorphicCallNode::unlink(VM& vm)
+{
+ if (m_callLinkInfo) {
+ if (Options::dumpDisassembly())
+ dataLog("Unlinking polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n");
+
+ m_callLinkInfo->unlink(vm);
+ }
+
+ if (isOnList())
+ remove();
+}
+
+void PolymorphicCallNode::clearCallLinkInfo()
+{
+ if (Options::dumpDisassembly())
+ dataLog("Clearing call link info for polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n");
+
+ m_callLinkInfo = nullptr;
+}
+
+void PolymorphicCallCase::dump(PrintStream& out) const
+{
+ out.print("<variant = ", m_variant, ", codeBlock = ", pointerDump(m_codeBlock), ">");
+}
+
+PolymorphicCallStubRoutine::PolymorphicCallStubRoutine(
+ const MacroAssemblerCodeRef& codeRef, VM& vm, const JSCell* owner, ExecState* callerFrame,
+ CallLinkInfo& info, const Vector<PolymorphicCallCase>& cases,
+ std::unique_ptr<uint32_t[]> fastCounts)
+ : GCAwareJITStubRoutine(codeRef, vm)
+ , m_fastCounts(WTFMove(fastCounts))
+{
+ for (PolymorphicCallCase callCase : cases) {
+ m_variants.append(WriteBarrier<JSCell>(vm, owner, callCase.variant().rawCalleeCell()));
+ if (shouldDumpDisassemblyFor(callerFrame->codeBlock()))
+ dataLog("Linking polymorphic call in ", *callerFrame->codeBlock(), " at ", callerFrame->codeOrigin(), " to ", callCase.variant(), ", codeBlock = ", pointerDump(callCase.codeBlock()), "\n");
+ if (CodeBlock* codeBlock = callCase.codeBlock())
+ codeBlock->linkIncomingPolymorphicCall(callerFrame, m_callNodes.add(&info));
+ }
+ m_variants.shrinkToFit();
+ WTF::storeStoreFence();
+}
+
+PolymorphicCallStubRoutine::~PolymorphicCallStubRoutine() { }
+
+CallVariantList PolymorphicCallStubRoutine::variants() const
+{
+ CallVariantList result;
+ for (size_t i = 0; i < m_variants.size(); ++i)
+ result.append(CallVariant(m_variants[i].get()));
+ return result;
+}
+
+CallEdgeList PolymorphicCallStubRoutine::edges() const
+{
+ // We wouldn't have these if this was an FTL stub routine. We shouldn't be asking for profiling
+ // from the FTL.
+ RELEASE_ASSERT(m_fastCounts);
+
+ CallEdgeList result;
+ for (size_t i = 0; i < m_variants.size(); ++i)
+ result.append(CallEdge(CallVariant(m_variants[i].get()), m_fastCounts[i]));
+ return result;
+}
+
+void PolymorphicCallStubRoutine::clearCallNodesFor(CallLinkInfo* info)
+{
+ for (Bag<PolymorphicCallNode>::iterator iter = m_callNodes.begin(); !!iter; ++iter) {
+ PolymorphicCallNode& node = **iter;
+ // All nodes should point to info, but okay to be a little paranoid.
+ if (node.hasCallLinkInfo(info))
+ node.clearCallLinkInfo();
+ }
+}
+
+bool PolymorphicCallStubRoutine::visitWeak(VM&)
+{
+ for (auto& variant : m_variants) {
+ if (!Heap::isMarked(variant.get()))
+ return false;
+ }
+ return true;
+}
+
+void PolymorphicCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
+{
+ for (auto& variant : m_variants)
+ visitor.append(&variant);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h
new file mode 100644
index 000000000..9d1491ca0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef PolymorphicCallStubRoutine_h
+#define PolymorphicCallStubRoutine_h
+
+#if ENABLE(JIT)
+
+#include "CallEdge.h"
+#include "CallVariant.h"
+#include "CodeOrigin.h"
+#include "GCAwareJITStubRoutine.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CallLinkInfo;
+
+class PolymorphicCallNode : public BasicRawSentinelNode<PolymorphicCallNode> {
+ WTF_MAKE_NONCOPYABLE(PolymorphicCallNode);
+public:
+ PolymorphicCallNode(CallLinkInfo* info)
+ : m_callLinkInfo(info)
+ {
+ }
+
+ ~PolymorphicCallNode();
+
+ void unlink(VM&);
+
+ bool hasCallLinkInfo(CallLinkInfo* info) { return m_callLinkInfo == info; }
+ void clearCallLinkInfo();
+
+private:
+ CallLinkInfo* m_callLinkInfo;
+};
+
+class PolymorphicCallCase {
+public:
+ PolymorphicCallCase()
+ : m_codeBlock(nullptr)
+ {
+ }
+
+ PolymorphicCallCase(CallVariant variant, CodeBlock* codeBlock)
+ : m_variant(variant)
+ , m_codeBlock(codeBlock)
+ {
+ }
+
+ CallVariant variant() const { return m_variant; }
+ CodeBlock* codeBlock() const { return m_codeBlock; }
+
+ void dump(PrintStream&) const;
+
+private:
+ CallVariant m_variant;
+ CodeBlock* m_codeBlock;
+};
+
+class PolymorphicCallStubRoutine : public GCAwareJITStubRoutine {
+public:
+ PolymorphicCallStubRoutine(
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner,
+ ExecState* callerFrame, CallLinkInfo&, const Vector<PolymorphicCallCase>&,
+ std::unique_ptr<uint32_t[]> fastCounts);
+
+ virtual ~PolymorphicCallStubRoutine();
+
+ CallVariantList variants() const;
+ CallEdgeList edges() const;
+
+ void clearCallNodesFor(CallLinkInfo*);
+
+ bool visitWeak(VM&) override;
+
+protected:
+ virtual void markRequiredObjectsInternal(SlotVisitor&) override;
+
+private:
+ Vector<WriteBarrier<JSCell>, 2> m_variants;
+ std::unique_ptr<uint32_t[]> m_fastCounts;
+ Bag<PolymorphicCallNode> m_callNodes;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // PolymorphicCallStubRoutine_h
+
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp b/Source/JavaScriptCore/jit/Reg.cpp
index 1588f7fea..6c0258061 100644
--- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/Reg.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,37 +24,23 @@
*/
#include "config.h"
-#include "ClosureCallStubRoutine.h"
+#include "Reg.h"
#if ENABLE(JIT)
-#include "Executable.h"
-#include "Heap.h"
-#include "VM.h"
-#include "Operations.h"
-#include "SlotVisitor.h"
-#include "Structure.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
namespace JSC {
-ClosureCallStubRoutine::ClosureCallStubRoutine(
- const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner,
- Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin)
- : GCAwareJITStubRoutine(code, vm, true)
- , m_structure(vm, owner, structure)
- , m_executable(vm, owner, executable)
- , m_codeOrigin(codeOrigin)
+void Reg::dump(PrintStream& out) const
{
-}
-
-ClosureCallStubRoutine::~ClosureCallStubRoutine()
-{
-}
-
-void ClosureCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
-{
- visitor.append(&m_structure);
- visitor.append(&m_executable);
+ if (!*this)
+ out.print("<none>");
+ else if (isGPR())
+ out.print(gpr());
+ else
+ out.print(fpr());
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/Reg.h b/Source/JavaScriptCore/jit/Reg.h
new file mode 100644
index 000000000..4db916321
--- /dev/null
+++ b/Source/JavaScriptCore/jit/Reg.h
@@ -0,0 +1,250 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Reg_h
+#define Reg_h
+
+#if ENABLE(JIT)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+// Reg is a polymorphic register class. It can refer to either integer or float registers.
+// Here are some use cases:
+//
+// GPRReg gpr;
+// Reg reg = gpr;
+// reg.isSet() == true
+// reg.isGPR() == true
+// reg.isFPR() == false
+//
+// for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+// if (reg.isGPR()) {
+// } else /* reg.isFPR() */ {
+// }
+// }
+//
+// The above loop could have also used !!reg or reg.isSet() as a condition.
+
+class Reg {
+public:
+ Reg()
+ : m_index(invalid())
+ {
+ }
+
+ Reg(WTF::HashTableDeletedValueType)
+ : m_index(deleted())
+ {
+ }
+
+ Reg(MacroAssembler::RegisterID reg)
+ : m_index(MacroAssembler::registerIndex(reg))
+ {
+ }
+
+ Reg(MacroAssembler::FPRegisterID reg)
+ : m_index(MacroAssembler::registerIndex(reg))
+ {
+ }
+
+ static Reg fromIndex(unsigned index)
+ {
+ Reg result;
+ result.m_index = index;
+ return result;
+ }
+
+ static Reg first()
+ {
+ Reg result;
+ result.m_index = 0;
+ return result;
+ }
+
+ static Reg last()
+ {
+ Reg result;
+ result.m_index = MacroAssembler::numberOfRegisters() + MacroAssembler::numberOfFPRegisters() - 1;
+ return result;
+ }
+
+ Reg next() const
+ {
+ ASSERT(!!*this);
+ if (*this == last())
+ return Reg();
+ Reg result;
+ result.m_index = m_index + 1;
+ return result;
+ }
+
+ unsigned index() const { return m_index; }
+
+ static unsigned maxIndex()
+ {
+ return last().index();
+ }
+
+ bool isSet() const { return m_index != invalid(); }
+ explicit operator bool() const { return isSet(); }
+
+ bool isHashTableDeletedValue() const { return m_index == deleted(); }
+
+ bool isGPR() const
+ {
+ return m_index < MacroAssembler::numberOfRegisters();
+ }
+
+ bool isFPR() const
+ {
+ return (m_index - MacroAssembler::numberOfRegisters()) < MacroAssembler::numberOfFPRegisters();
+ }
+
+ MacroAssembler::RegisterID gpr() const
+ {
+ ASSERT(isGPR());
+ return static_cast<MacroAssembler::RegisterID>(MacroAssembler::firstRegister() + m_index);
+ }
+
+ MacroAssembler::FPRegisterID fpr() const
+ {
+ ASSERT(isFPR());
+ return static_cast<MacroAssembler::FPRegisterID>(
+ MacroAssembler::firstFPRegister() + (m_index - MacroAssembler::numberOfRegisters()));
+ }
+
+ bool operator==(const Reg& other) const
+ {
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const Reg& other) const
+ {
+ return m_index != other.m_index;
+ }
+
+ bool operator<(const Reg& other) const
+ {
+ return m_index < other.m_index;
+ }
+
+ bool operator>(const Reg& other) const
+ {
+ return m_index > other.m_index;
+ }
+
+ bool operator<=(const Reg& other) const
+ {
+ return m_index <= other.m_index;
+ }
+
+ bool operator>=(const Reg& other) const
+ {
+ return m_index >= other.m_index;
+ }
+
+ unsigned hash() const
+ {
+ return m_index;
+ }
+
+ void dump(PrintStream&) const;
+
+ class AllRegsIterable {
+ public:
+
+ class iterator {
+ public:
+ iterator() { }
+
+ explicit iterator(Reg reg)
+ : m_regIndex(reg.index())
+ {
+ }
+
+ Reg operator*() const { return Reg::fromIndex(m_regIndex); }
+
+ iterator& operator++()
+ {
+ m_regIndex = Reg::fromIndex(m_regIndex).next().index();
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_regIndex == other.m_regIndex;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ unsigned m_regIndex;
+ };
+
+ iterator begin() const { return iterator(Reg::first()); }
+ iterator end() const { return iterator(Reg()); }
+ };
+
+ static AllRegsIterable all() { return AllRegsIterable(); }
+
+private:
+ static uint8_t invalid() { return 0xff; }
+
+ static uint8_t deleted() { return 0xfe; }
+
+ uint8_t m_index;
+};
+
+struct RegHash {
+ static unsigned hash(const Reg& key) { return key.hash(); }
+ static bool equal(const Reg& a, const Reg& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::Reg> {
+ typedef JSC::RegHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::Reg> : SimpleClassHashTraits<JSC::Reg> {
+ static const bool emptyValueIsZero = false;
+ };
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
+
+#endif // Reg_h
+
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffset.cpp b/Source/JavaScriptCore/jit/RegisterAtOffset.cpp
new file mode 100644
index 000000000..16a639ca8
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterAtOffset.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RegisterAtOffset.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+#if !COMPILER(MSVC)
+static_assert(sizeof(RegisterAtOffset) == sizeof(ptrdiff_t), "RegisterAtOffset should be small.");
+#endif
+
+void RegisterAtOffset::dump(PrintStream& out) const
+{
+ out.print(reg(), " at ", offset());
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h b/Source/JavaScriptCore/jit/RegisterAtOffset.h
index 457cbb286..3fc177070 100644
--- a/Source/JavaScriptCore/jit/JumpReplacementWatchpoint.h
+++ b/Source/JavaScriptCore/jit/RegisterAtOffset.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,58 +23,59 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JumpReplacementWatchpoint_h
-#define JumpReplacementWatchpoint_h
-
-#include "Watchpoint.h"
-#include <wtf/Platform.h>
+#ifndef RegisterAtOffset_h
+#define RegisterAtOffset_h
#if ENABLE(JIT)
-#include "CodeLocation.h"
-#include "MacroAssembler.h"
+#include "Reg.h"
+#include <wtf/PrintStream.h>
namespace JSC {
-class JumpReplacementWatchpoint : public Watchpoint {
+class RegisterAtOffset {
public:
- JumpReplacementWatchpoint()
- : m_source(std::numeric_limits<uintptr_t>::max())
- , m_destination(std::numeric_limits<uintptr_t>::max())
+ RegisterAtOffset()
+ : m_offset(0)
{
}
- JumpReplacementWatchpoint(MacroAssembler::Label source)
- : m_source(source.m_label.m_offset)
- , m_destination(std::numeric_limits<uintptr_t>::max())
+ RegisterAtOffset(Reg reg, ptrdiff_t offset)
+ : m_reg(reg)
+ , m_offset(offset)
{
}
- MacroAssembler::Label sourceLabel() const
+ bool operator!() const { return !m_reg; }
+
+ Reg reg() const { return m_reg; }
+ ptrdiff_t offset() const { return m_offset; }
+ int offsetAsIndex() const { return offset() / sizeof(void*); }
+
+ bool operator==(const RegisterAtOffset& other) const
{
- MacroAssembler::Label label;
- label.m_label.m_offset = m_source;
- return label;
+ return reg() == other.reg() && offset() == other.offset();
}
- void setDestination(MacroAssembler::Label destination)
+ bool operator<(const RegisterAtOffset& other) const
{
- m_destination = destination.m_label.m_offset;
+ if (reg() != other.reg())
+ return reg() < other.reg();
+ return offset() < other.offset();
}
- void correctLabels(LinkBuffer&);
-
-protected:
- void fireInternal();
+ static Reg getReg(RegisterAtOffset* value) { return value->reg(); }
+
+ void dump(PrintStream& out) const;
private:
- uintptr_t m_source;
- uintptr_t m_destination;
+ Reg m_reg;
+ ptrdiff_t m_offset : sizeof(ptrdiff_t) * 8 - sizeof(Reg) * 8;
};
} // namespace JSC
#endif // ENABLE(JIT)
-#endif // JumpReplacementWatchpoint_h
+#endif // RegisterAtOffset_h
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp b/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp
new file mode 100644
index 000000000..9df5d40fc
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RegisterAtOffsetList.h"
+
+#if ENABLE(JIT)
+
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+RegisterAtOffsetList::RegisterAtOffsetList() { }
+
+RegisterAtOffsetList::RegisterAtOffsetList(RegisterSet registerSet, OffsetBaseType offsetBaseType)
+{
+ size_t numberOfRegisters = registerSet.numberOfSetRegisters();
+ ptrdiff_t offset = 0;
+
+ if (offsetBaseType == FramePointerBased)
+ offset = -(static_cast<ptrdiff_t>(numberOfRegisters) * sizeof(void*));
+
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (registerSet.get(reg)) {
+ append(RegisterAtOffset(reg, offset));
+ offset += sizeof(void*);
+ }
+ }
+
+ sort();
+}
+
+void RegisterAtOffsetList::sort()
+{
+ std::sort(m_registers.begin(), m_registers.end());
+}
+
+void RegisterAtOffsetList::dump(PrintStream& out) const
+{
+ out.print(listDump(m_registers));
+}
+
+RegisterAtOffset* RegisterAtOffsetList::find(Reg reg) const
+{
+ return tryBinarySearch<RegisterAtOffset, Reg>(m_registers, m_registers.size(), reg, RegisterAtOffset::getReg);
+}
+
+unsigned RegisterAtOffsetList::indexOf(Reg reg) const
+{
+ if (RegisterAtOffset* pointer = find(reg))
+ return pointer - m_registers.begin();
+ return UINT_MAX;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffsetList.h b/Source/JavaScriptCore/jit/RegisterAtOffsetList.h
new file mode 100644
index 000000000..3a771beff
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterAtOffsetList.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RegisterAtOffsetList_h
+#define RegisterAtOffsetList_h
+
+#if ENABLE(JIT)
+
+#include "RegisterAtOffset.h"
+#include "RegisterSet.h"
+
+namespace JSC {
+
+class RegisterAtOffsetList {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ enum OffsetBaseType { FramePointerBased, ZeroBased };
+
+ RegisterAtOffsetList();
+ RegisterAtOffsetList(RegisterSet, OffsetBaseType = FramePointerBased);
+
+ void dump(PrintStream&) const;
+
+ void clear()
+ {
+ m_registers.clear();
+ }
+
+ size_t size() const
+ {
+ return m_registers.size();
+ }
+
+ RegisterAtOffset& at(size_t index)
+ {
+ return m_registers.at(index);
+ }
+
+ void append(RegisterAtOffset registerAtOffset)
+ {
+ m_registers.append(registerAtOffset);
+ }
+
+ void sort();
+ RegisterAtOffset* find(Reg) const;
+ unsigned indexOf(Reg) const; // Returns UINT_MAX if not found.
+
+ Vector<RegisterAtOffset>::const_iterator begin() const { return m_registers.begin(); }
+ Vector<RegisterAtOffset>::const_iterator end() const { return m_registers.end(); }
+
+private:
+ Vector<RegisterAtOffset> m_registers;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // RegisterAtOffsetList_h
+
diff --git a/Source/JavaScriptCore/jit/RegisterMap.h b/Source/JavaScriptCore/jit/RegisterMap.h
new file mode 100644
index 000000000..2ebf09af3
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterMap.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RegisterMap_h
+#define RegisterMap_h
+
+#if ENABLE(JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+#include "Reg.h"
+
+namespace JSC {
+
+template<typename T>
+class RegisterMap {
+public:
+ T& operator[](Reg reg)
+ {
+ return m_map[reg.index()];
+ }
+
+ T& operator[](GPRReg gpr)
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+ T& operator[](FPRReg fpr)
+ {
+ return m_map[MacroAssembler::registerIndex(fpr)];
+ }
+
+ const T& operator[](Reg reg) const
+ {
+ return m_map[reg.index()];
+ }
+
+ const T& operator[](GPRReg gpr) const
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+ const T& operator[](FPRReg fpr) const
+ {
+ return m_map[MacroAssembler::registerIndex(fpr)];
+ }
+
+private:
+ std::array<T, MacroAssembler::totalNumberOfRegisters()> m_map { { } };
+};
+
+template<typename T>
+class GPRMap {
+public:
+ T& operator[](GPRReg gpr)
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+ const T& operator[](GPRReg gpr) const
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+private:
+ std::array<T, MacroAssembler::numberOfRegisters()> m_map { { } };
+};
+
+template<typename T>
+class FPRMap {
+public:
+ T& operator[](FPRReg fpr)
+ {
+ return m_map[MacroAssembler::fpRegisterIndex(fpr)];
+ }
+
+ const T& operator[](FPRReg fpr) const
+ {
+ return m_map[MacroAssembler::fpRegisterIndex(fpr)];
+ }
+
+private:
+ std::array<T, MacroAssembler::numberOfFPRegisters()> m_map { { } };
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // RegisterMap_h
diff --git a/Source/JavaScriptCore/jit/RegisterSet.cpp b/Source/JavaScriptCore/jit/RegisterSet.cpp
new file mode 100644
index 000000000..5418400b0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterSet.cpp
@@ -0,0 +1,404 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RegisterSet.h"
+
+#if ENABLE(JIT)
+
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+#include "JSCInlines.h"
+#include <wtf/CommaPrinter.h>
+
+namespace JSC {
+
+RegisterSet RegisterSet::stackRegisters()
+{
+ return RegisterSet(
+ MacroAssembler::stackPointerRegister,
+ MacroAssembler::framePointerRegister);
+}
+
+RegisterSet RegisterSet::reservedHardwareRegisters()
+{
+#if CPU(ARM64)
+#if PLATFORM(IOS)
+ return RegisterSet(ARM64Registers::x18, ARM64Registers::lr);
+#else
+ return RegisterSet(ARM64Registers::lr);
+#endif // PLATFORM(IOS)
+#else
+ return RegisterSet();
+#endif
+}
+
+RegisterSet RegisterSet::runtimeRegisters()
+{
+#if USE(JSVALUE64)
+ return RegisterSet(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#else
+ return RegisterSet();
+#endif
+}
+
+RegisterSet RegisterSet::specialRegisters()
+{
+ return RegisterSet(
+ stackRegisters(), reservedHardwareRegisters(), runtimeRegisters());
+}
+
+RegisterSet RegisterSet::volatileRegistersForJSCall()
+{
+ RegisterSet volatileRegisters = allRegisters();
+ volatileRegisters.exclude(RegisterSet::stackRegisters());
+ volatileRegisters.exclude(RegisterSet::reservedHardwareRegisters());
+ volatileRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
+ return volatileRegisters;
+}
+
+RegisterSet RegisterSet::stubUnavailableRegisters()
+{
+ return RegisterSet(specialRegisters(), vmCalleeSaveRegisters());
+}
+
+RegisterSet RegisterSet::macroScratchRegisters()
+{
+#if CPU(X86_64)
+ return RegisterSet(MacroAssembler::s_scratchRegister);
+#elif CPU(ARM64)
+ return RegisterSet(MacroAssembler::dataTempRegister, MacroAssembler::memoryTempRegister);
+#elif CPU(MIPS)
+ RegisterSet result;
+ result.set(MacroAssembler::immTempRegister);
+ result.set(MacroAssembler::dataTempRegister);
+ result.set(MacroAssembler::addrTempRegister);
+ result.set(MacroAssembler::cmpTempRegister);
+ return result;
+#else
+ return RegisterSet();
+#endif
+}
+
+RegisterSet RegisterSet::calleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86)
+ result.set(X86Registers::ebx);
+ result.set(X86Registers::ebp);
+ result.set(X86Registers::edi);
+ result.set(X86Registers::esi);
+#elif CPU(X86_64)
+ result.set(X86Registers::ebx);
+ result.set(X86Registers::ebp);
+ result.set(X86Registers::r12);
+ result.set(X86Registers::r13);
+ result.set(X86Registers::r14);
+ result.set(X86Registers::r15);
+#elif CPU(ARM_THUMB2)
+ result.set(ARMRegisters::r4);
+ result.set(ARMRegisters::r5);
+ result.set(ARMRegisters::r6);
+ result.set(ARMRegisters::r8);
+#if !PLATFORM(IOS)
+ result.set(ARMRegisters::r9);
+#endif
+ result.set(ARMRegisters::r10);
+ result.set(ARMRegisters::r11);
+#elif CPU(ARM_TRADITIONAL)
+ result.set(ARMRegisters::r4);
+ result.set(ARMRegisters::r5);
+ result.set(ARMRegisters::r6);
+ result.set(ARMRegisters::r7);
+ result.set(ARMRegisters::r8);
+ result.set(ARMRegisters::r9);
+ result.set(ARMRegisters::r10);
+ result.set(ARMRegisters::r11);
+#elif CPU(ARM64)
+ // We don't include LR in the set of callee-save registers even though it technically belongs
+ // there. This is because we use this set to describe the set of registers that need to be saved
+ // beyond what you would save by the platform-agnostic "preserve return address" and "restore
+ // return address" operations in CCallHelpers.
+ for (
+ ARM64Registers::RegisterID reg = ARM64Registers::x19;
+ reg <= ARM64Registers::x28;
+ reg = static_cast<ARM64Registers::RegisterID>(reg + 1))
+ result.set(reg);
+ result.set(ARM64Registers::fp);
+ for (
+ ARM64Registers::FPRegisterID reg = ARM64Registers::q8;
+ reg <= ARM64Registers::q15;
+ reg = static_cast<ARM64Registers::FPRegisterID>(reg + 1))
+ result.set(reg);
+#elif CPU(MIPS)
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::vmCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86_64)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#if OS(WINDOWS)
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+#endif
+#elif CPU(ARM64)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+ result.set(GPRInfo::regCS7);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+ result.set(FPRInfo::fpRegCS0);
+ result.set(FPRInfo::fpRegCS1);
+ result.set(FPRInfo::fpRegCS2);
+ result.set(FPRInfo::fpRegCS3);
+ result.set(FPRInfo::fpRegCS4);
+ result.set(FPRInfo::fpRegCS5);
+ result.set(FPRInfo::fpRegCS6);
+ result.set(FPRInfo::fpRegCS7);
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::llintBaselineCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86)
+#elif CPU(X86_64)
+#if !OS(WINDOWS)
+ result.set(GPRInfo::regCS2);
+ ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#else
+ result.set(GPRInfo::regCS4);
+ ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+#endif
+#elif CPU(ARM_THUMB2)
+#elif CPU(ARM_TRADITIONAL)
+#elif CPU(ARM64)
+ result.set(GPRInfo::regCS7);
+ ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+#elif CPU(MIPS)
+#elif CPU(SH4)
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::dfgCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86)
+#elif CPU(X86_64)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+#if !OS(WINDOWS)
+ ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#else
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+ ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+#endif
+#elif CPU(ARM_THUMB2)
+#elif CPU(ARM_TRADITIONAL)
+#elif CPU(ARM64)
+ ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+#elif CPU(MIPS)
+#elif CPU(SH4)
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::ftlCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if ENABLE(FTL_JIT)
+#if CPU(X86_64) && !OS(WINDOWS)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#elif CPU(ARM64)
+ // B3 might save and use all ARM64 callee saves specified in the ABI.
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+ result.set(GPRInfo::regCS7);
+ ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+ result.set(FPRInfo::fpRegCS0);
+ result.set(FPRInfo::fpRegCS1);
+ result.set(FPRInfo::fpRegCS2);
+ result.set(FPRInfo::fpRegCS3);
+ result.set(FPRInfo::fpRegCS4);
+ result.set(FPRInfo::fpRegCS5);
+ result.set(FPRInfo::fpRegCS6);
+ result.set(FPRInfo::fpRegCS7);
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+#endif
+ return result;
+}
+
+#if ENABLE(WEBASSEMBLY)
+RegisterSet RegisterSet::webAssemblyCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86)
+#elif CPU(X86_64)
+#if !OS(WINDOWS)
+ ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#else
+ ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+#endif
+#elif CPU(ARM_THUMB2)
+#elif CPU(ARM_TRADITIONAL)
+#elif CPU(ARM64)
+ ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+#elif CPU(MIPS)
+#elif CPU(SH4)
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ return result;
+}
+#endif
+
+RegisterSet RegisterSet::registersToNotSaveForJSCall()
+{
+ return RegisterSet(RegisterSet::vmCalleeSaveRegisters(), RegisterSet::stackRegisters(), RegisterSet::reservedHardwareRegisters());
+}
+
+RegisterSet RegisterSet::registersToNotSaveForCCall()
+{
+ return RegisterSet(RegisterSet::calleeSaveRegisters(), RegisterSet::stackRegisters(), RegisterSet::reservedHardwareRegisters());
+}
+
+RegisterSet RegisterSet::allGPRs()
+{
+ RegisterSet result;
+ for (MacroAssembler::RegisterID reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = static_cast<MacroAssembler::RegisterID>(reg + 1))
+ result.set(reg);
+ return result;
+}
+
+RegisterSet RegisterSet::allFPRs()
+{
+ RegisterSet result;
+ for (MacroAssembler::FPRegisterID reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = static_cast<MacroAssembler::FPRegisterID>(reg + 1))
+ result.set(reg);
+ return result;
+}
+
+RegisterSet RegisterSet::allRegisters()
+{
+ RegisterSet result;
+ result.merge(allGPRs());
+ result.merge(allFPRs());
+ return result;
+}
+
+size_t RegisterSet::numberOfSetGPRs() const
+{
+ RegisterSet temp = *this;
+ temp.filter(allGPRs());
+ return temp.numberOfSetRegisters();
+}
+
+size_t RegisterSet::numberOfSetFPRs() const
+{
+ RegisterSet temp = *this;
+ temp.filter(allFPRs());
+ return temp.numberOfSetRegisters();
+}
+
+void RegisterSet::dump(PrintStream& out) const
+{
+ CommaPrinter comma;
+ out.print("[");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (get(reg))
+ out.print(comma, reg);
+ }
+ out.print("]");
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/RegisterSet.h b/Source/JavaScriptCore/jit/RegisterSet.h
new file mode 100644
index 000000000..a95583ea8
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterSet.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef RegisterSet_h
+#define RegisterSet_h
+
+#if ENABLE(JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+#include "Reg.h"
+#include "TempRegisterSet.h"
+#include <wtf/BitVector.h>
+
+namespace JSC {
+
+class RegisterSet {
+public:
+ template<typename... Regs>
+ explicit RegisterSet(Regs... regs)
+ {
+ setMany(regs...);
+ }
+
+ JS_EXPORT_PRIVATE static RegisterSet stackRegisters();
+ JS_EXPORT_PRIVATE static RegisterSet reservedHardwareRegisters();
+ static RegisterSet runtimeRegisters();
+ static RegisterSet specialRegisters(); // The union of stack, reserved hardware, and runtime registers.
+ static RegisterSet calleeSaveRegisters();
+ static RegisterSet vmCalleeSaveRegisters(); // Callee save registers that might be saved and used by any tier.
+ static RegisterSet llintBaselineCalleeSaveRegisters(); // Registers saved and used by the LLInt.
+ static RegisterSet dfgCalleeSaveRegisters(); // Registers saved and used by the DFG JIT.
+ static RegisterSet ftlCalleeSaveRegisters(); // Registers that might be saved and used by the FTL JIT.
+#if ENABLE(WEBASSEMBLY)
+ static RegisterSet webAssemblyCalleeSaveRegisters(); // Registers saved and used by the WebAssembly JIT.
+#endif
+ static RegisterSet volatileRegistersForJSCall();
+ static RegisterSet stubUnavailableRegisters(); // The union of callee saves and special registers.
+ JS_EXPORT_PRIVATE static RegisterSet macroScratchRegisters();
+ JS_EXPORT_PRIVATE static RegisterSet allGPRs();
+ JS_EXPORT_PRIVATE static RegisterSet allFPRs();
+ static RegisterSet allRegisters();
+
+ static RegisterSet registersToNotSaveForJSCall();
+ static RegisterSet registersToNotSaveForCCall();
+
+ void set(Reg reg, bool value = true)
+ {
+ ASSERT(!!reg);
+ m_vector.set(reg.index(), value);
+ }
+
+ void set(JSValueRegs regs, bool value = true)
+ {
+ if (regs.tagGPR() != InvalidGPRReg)
+ set(regs.tagGPR(), value);
+ set(regs.payloadGPR(), value);
+ }
+
+ void clear(Reg reg)
+ {
+ ASSERT(!!reg);
+ set(reg, false);
+ }
+
+ bool get(Reg reg) const
+ {
+ ASSERT(!!reg);
+ return m_vector.get(reg.index());
+ }
+
+ template<typename Iterable>
+ void setAll(const Iterable& iterable)
+ {
+ for (Reg reg : iterable)
+ set(reg);
+ }
+
+ void merge(const RegisterSet& other) { m_vector.merge(other.m_vector); }
+ void filter(const RegisterSet& other) { m_vector.filter(other.m_vector); }
+ void exclude(const RegisterSet& other) { m_vector.exclude(other.m_vector); }
+
+ size_t numberOfSetGPRs() const;
+ size_t numberOfSetFPRs() const;
+ size_t numberOfSetRegisters() const { return m_vector.bitCount(); }
+
+ void dump(PrintStream&) const;
+
+ enum EmptyValueTag { EmptyValue };
+ enum DeletedValueTag { DeletedValue };
+
+ RegisterSet(EmptyValueTag)
+ : m_vector(BitVector::EmptyValue)
+ {
+ }
+
+ RegisterSet(DeletedValueTag)
+ : m_vector(BitVector::DeletedValue)
+ {
+ }
+
+ bool isEmptyValue() const { return m_vector.isEmptyValue(); }
+ bool isDeletedValue() const { return m_vector.isDeletedValue(); }
+
+ bool operator==(const RegisterSet& other) const { return m_vector == other.m_vector; }
+ unsigned hash() const { return m_vector.hash(); }
+
+ template<typename Functor>
+ void forEach(const Functor& functor) const
+ {
+ for (size_t index : m_vector)
+ functor(Reg::fromIndex(index));
+ }
+
+private:
+ void setAny(Reg reg) { set(reg); }
+ void setAny(const RegisterSet& set) { merge(set); }
+ void setMany() { }
+ template<typename RegType, typename... Regs>
+ void setMany(RegType reg, Regs... regs)
+ {
+ setAny(reg);
+ setMany(regs...);
+ }
+
+ BitVector m_vector;
+};
+
+struct RegisterSetHash {
+ static unsigned hash(const RegisterSet& set) { return set.hash(); }
+ static bool equal(const RegisterSet& a, const RegisterSet& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = false;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::RegisterSet> {
+ typedef JSC::RegisterSetHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::RegisterSet> : public CustomHashTraits<JSC::RegisterSet> { };
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
+
+#endif // RegisterSet_h
+
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
new file mode 100644
index 000000000..bd95f665a
--- /dev/null
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -0,0 +1,939 @@
+/*
+ * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "Repatch.h"
+
+#if ENABLE(JIT)
+
+#include "BinarySwitch.h"
+#include "CCallHelpers.h"
+#include "CallFrameShuffler.h"
+#include "DFGOperations.h"
+#include "DFGSpeculativeJIT.h"
+#include "FTLThunks.h"
+#include "GCAwareJITStubRoutine.h"
+#include "GetterSetter.h"
+#include "JIT.h"
+#include "JITInlines.h"
+#include "LinkBuffer.h"
+#include "JSCInlines.h"
+#include "PolymorphicAccess.h"
+#include "ScratchRegisterAllocator.h"
+#include "StackAlignment.h"
+#include "StructureRareDataInlines.h"
+#include "StructureStubClearingWatchpoint.h"
+#include "ThunkGenerators.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
+#include <wtf/StringPrintStream.h>
+
+namespace JSC {
+
+// Beware: in this code, it is not safe to assume anything about the following registers
+// that would ordinarily have well-known values:
+// - tagTypeNumberRegister
+// - tagMaskRegister
+
+static FunctionPtr readCallTarget(CodeBlock* codeBlock, CodeLocationCall call)
+{
+ FunctionPtr result = MacroAssembler::readCallTarget(call);
+#if ENABLE(FTL_JIT)
+ if (codeBlock->jitType() == JITCode::FTLJIT) {
+ return FunctionPtr(codeBlock->vm()->ftlThunks->keyForSlowPathCallThunk(
+ MacroAssemblerCodePtr::createFromExecutableAddress(
+ result.executableAddress())).callTarget());
+ }
+#else
+ UNUSED_PARAM(codeBlock);
+#endif // ENABLE(FTL_JIT)
+ return result;
+}
+
+static void repatchCall(CodeBlock* codeBlock, CodeLocationCall call, FunctionPtr newCalleeFunction)
+{
+#if ENABLE(FTL_JIT)
+ if (codeBlock->jitType() == JITCode::FTLJIT) {
+ VM& vm = *codeBlock->vm();
+ FTL::Thunks& thunks = *vm.ftlThunks;
+ FTL::SlowPathCallKey key = thunks.keyForSlowPathCallThunk(
+ MacroAssemblerCodePtr::createFromExecutableAddress(
+ MacroAssembler::readCallTarget(call).executableAddress()));
+ key = key.withCallTarget(newCalleeFunction.executableAddress());
+ newCalleeFunction = FunctionPtr(
+ thunks.getSlowPathCallThunk(vm, key).code().executableAddress());
+ }
+#else // ENABLE(FTL_JIT)
+ UNUSED_PARAM(codeBlock);
+#endif // ENABLE(FTL_JIT)
+ MacroAssembler::repatchCall(call, newCalleeFunction);
+}
+
+static void repatchByIdSelfAccess(
+ CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure,
+ PropertyOffset offset, const FunctionPtr &slowPathFunction,
+ bool compact)
+{
+ // Only optimize once!
+ repatchCall(codeBlock, stubInfo.callReturnLocation, slowPathFunction);
+
+ // Patch the structure check & the offset of the load.
+ MacroAssembler::repatchInt32(
+ stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall),
+ bitwise_cast<int32_t>(structure->id()));
+#if USE(JSVALUE64)
+ if (compact)
+ MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToBase(offset));
+ else
+ MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToBase(offset));
+#elif USE(JSVALUE32_64)
+ if (compact) {
+ MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ } else {
+ MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ }
+#endif
+}
+
+static void resetGetByIDCheckAndLoad(StructureStubInfo& stubInfo)
+{
+ CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
+ MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(
+ MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
+ MacroAssembler::Address(
+ static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
+ JSCell::structureIDOffset()),
+ static_cast<int32_t>(unusedPointer));
+ }
+ MacroAssembler::repatchInt32(structureLabel, static_cast<int32_t>(unusedPointer));
+#if USE(JSVALUE64)
+ MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
+#else
+ MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
+ MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+#endif
+}
+
+static void resetPutByIDCheckAndLoad(StructureStubInfo& stubInfo)
+{
+ CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
+ MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(
+ MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
+ MacroAssembler::Address(
+ static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
+ JSCell::structureIDOffset()),
+ static_cast<int32_t>(unusedPointer));
+ }
+ MacroAssembler::repatchInt32(structureLabel, static_cast<int32_t>(unusedPointer));
+#if USE(JSVALUE64)
+ MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
+#else
+ MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
+ MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+#endif
+}
+
+static void replaceWithJump(StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
+{
+ if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
+ MacroAssembler::replaceWithJump(
+ MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(
+ stubInfo.callReturnLocation.dataLabel32AtOffset(
+ -(intptr_t)stubInfo.patch.deltaCheckImmToCall)),
+ CodeLocationLabel(target));
+ return;
+ }
+
+ resetGetByIDCheckAndLoad(stubInfo);
+
+ MacroAssembler::repatchJump(
+ stubInfo.callReturnLocation.jumpAtOffset(
+ stubInfo.patch.deltaCallToJump),
+ CodeLocationLabel(target));
+}
+
+enum InlineCacheAction {
+ GiveUpOnCache,
+ RetryCacheLater,
+ AttemptToCache
+};
+
+static InlineCacheAction actionForCell(VM& vm, JSCell* cell)
+{
+ Structure* structure = cell->structure(vm);
+
+ TypeInfo typeInfo = structure->typeInfo();
+ if (typeInfo.prohibitsPropertyCaching())
+ return GiveUpOnCache;
+
+ if (structure->isUncacheableDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return GiveUpOnCache;
+ // Flattening could have changed the offset, so return early for another try.
+ asObject(cell)->flattenDictionaryObject(vm);
+ return RetryCacheLater;
+ }
+
+ if (!structure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
+
+ return AttemptToCache;
+}
+
+static bool forceICFailure(ExecState*)
+{
+ return Options::forceICFailure();
+}
+
+static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
+
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell())
+ return GiveUpOnCache;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM& vm = exec->vm();
+
+ std::unique_ptr<AccessCase> newCase;
+
+ if (isJSArray(baseValue) && propertyName == exec->propertyNames().length)
+ newCase = AccessCase::getLength(vm, codeBlock, AccessCase::ArrayLength);
+ else if (isJSString(baseValue) && propertyName == exec->propertyNames().length)
+ newCase = AccessCase::getLength(vm, codeBlock, AccessCase::StringLength);
+ else {
+ if (!slot.isCacheable() && !slot.isUnset())
+ return GiveUpOnCache;
+
+ ObjectPropertyConditionSet conditionSet;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure(vm);
+
+ bool loadTargetFromProxy = false;
+ if (baseCell->type() == PureForwardingProxyType) {
+ baseValue = jsCast<JSProxy*>(baseCell)->target();
+ baseCell = baseValue.asCell();
+ structure = baseCell->structure(vm);
+ loadTargetFromProxy = true;
+ }
+
+ InlineCacheAction action = actionForCell(vm, baseCell);
+ if (action != AttemptToCache)
+ return action;
+
+ // Optimize self access.
+ if (stubInfo.cacheType == CacheType::Unset
+ && slot.isCacheableValue()
+ && slot.slotBase() == baseValue
+ && !slot.watchpointSet()
+ && isInlineOffset(slot.cachedOffset())
+ && MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToBase(slot.cachedOffset()))
+ && action == AttemptToCache
+ && !structure->needImpurePropertyWatchpoint()
+ && !loadTargetFromProxy) {
+ structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset());
+ repatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), operationGetByIdOptimize, true);
+ stubInfo.initGetByIdSelf(codeBlock, structure, slot.cachedOffset());
+ return RetryCacheLater;
+ }
+
+ PropertyOffset offset = slot.isUnset() ? invalidOffset : slot.cachedOffset();
+
+ if (slot.isUnset() || slot.slotBase() != baseValue) {
+ if (structure->typeInfo().prohibitsPropertyCaching() || structure->isDictionary())
+ return GiveUpOnCache;
+
+ if (slot.isUnset() && structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
+ return GiveUpOnCache;
+
+ if (slot.isUnset()) {
+ conditionSet = generateConditionsForPropertyMiss(
+ vm, codeBlock, exec, structure, propertyName.impl());
+ } else {
+ conditionSet = generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.slotBase(),
+ propertyName.impl());
+ }
+
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+
+ offset = slot.isUnset() ? invalidOffset : conditionSet.slotBaseCondition().offset();
+ }
+
+ JSFunction* getter = nullptr;
+ if (slot.isCacheableGetter())
+ getter = jsDynamicCast<JSFunction*>(slot.getterSetter()->getter());
+
+ if (!loadTargetFromProxy && getter && AccessCase::canEmitIntrinsicGetter(getter, structure))
+ newCase = AccessCase::getIntrinsic(vm, codeBlock, getter, slot.cachedOffset(), structure, conditionSet);
+ else {
+ AccessCase::AccessType type;
+ if (slot.isCacheableValue())
+ type = AccessCase::Load;
+ else if (slot.isUnset())
+ type = AccessCase::Miss;
+ else if (slot.isCacheableGetter())
+ type = AccessCase::Getter;
+ else if (slot.attributes() & CustomAccessor)
+ type = AccessCase::CustomAccessorGetter;
+ else
+ type = AccessCase::CustomValueGetter;
+
+ newCase = AccessCase::get(
+ vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy,
+ slot.watchpointSet(), slot.isCacheableCustom() ? slot.customGetter() : nullptr,
+ slot.isCacheableCustom() ? slot.slotBase() : nullptr);
+ }
+ }
+
+ MacroAssemblerCodePtr codePtr =
+ stubInfo.addAccessCase(codeBlock, propertyName, WTFMove(newCase));
+
+ if (!codePtr)
+ return GiveUpOnCache;
+
+ replaceWithJump(stubInfo, codePtr);
+
+ return RetryCacheLater;
+}
+
+void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
+
+ if (tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo) == GiveUpOnCache)
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+}
+
+static V_JITOperation_ESsiJJI appropriateGenericPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+{
+ if (slot.isStrictMode()) {
+ if (putKind == Direct)
+ return operationPutByIdDirectStrict;
+ return operationPutByIdStrict;
+ }
+ if (putKind == Direct)
+ return operationPutByIdDirectNonStrict;
+ return operationPutByIdNonStrict;
+}
+
+static V_JITOperation_ESsiJJI appropriateOptimizingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+{
+ if (slot.isStrictMode()) {
+ if (putKind == Direct)
+ return operationPutByIdDirectStrictOptimize;
+ return operationPutByIdStrictOptimize;
+ }
+ if (putKind == Direct)
+ return operationPutByIdDirectNonStrictOptimize;
+ return operationPutByIdNonStrictOptimize;
+}
+
+static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM& vm = exec->vm();
+
+ if (!baseValue.isCell())
+ return GiveUpOnCache;
+
+ if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter())
+ return GiveUpOnCache;
+
+ if (!structure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
+
+ std::unique_ptr<AccessCase> newCase;
+
+ if (slot.base() == baseValue && slot.isCacheablePut()) {
+ if (slot.type() == PutPropertySlot::ExistingProperty) {
+ structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+
+ if (stubInfo.cacheType == CacheType::Unset
+ && isInlineOffset(slot.cachedOffset())
+ && MacroAssembler::isPtrAlignedAddressOffset(maxOffsetRelativeToBase(slot.cachedOffset()))
+ && !structure->needImpurePropertyWatchpoint()
+ && !structure->inferredTypeFor(ident.impl())) {
+
+ repatchByIdSelfAccess(
+ codeBlock, stubInfo, structure, slot.cachedOffset(),
+ appropriateOptimizingPutByIdFunction(slot, putKind), false);
+ stubInfo.initPutByIdReplace(codeBlock, structure, slot.cachedOffset());
+ return RetryCacheLater;
+ }
+
+ newCase = AccessCase::replace(vm, codeBlock, structure, slot.cachedOffset());
+ } else {
+ ASSERT(slot.type() == PutPropertySlot::NewProperty);
+
+ if (!structure->isObject() || structure->isDictionary())
+ return GiveUpOnCache;
+
+ PropertyOffset offset;
+ Structure* newStructure =
+ Structure::addPropertyTransitionToExistingStructureConcurrently(
+ structure, ident.impl(), 0, offset);
+ if (!newStructure || !newStructure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
+
+ ASSERT(newStructure->previousID() == structure);
+ ASSERT(!newStructure->isDictionary());
+ ASSERT(newStructure->isObject());
+
+ ObjectPropertyConditionSet conditionSet;
+ if (putKind == NotDirect) {
+ conditionSet =
+ generateConditionsForPropertySetterMiss(
+ vm, codeBlock, exec, newStructure, ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ }
+
+ newCase = AccessCase::transition(vm, codeBlock, structure, newStructure, offset, conditionSet);
+ }
+ } else if (slot.isCacheableCustom() || slot.isCacheableSetter()) {
+ if (slot.isCacheableCustom()) {
+ ObjectPropertyConditionSet conditionSet;
+
+ if (slot.base() != baseValue) {
+ conditionSet =
+ generateConditionsForPrototypePropertyHitCustom(
+ vm, codeBlock, exec, structure, slot.base(), ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ }
+
+ newCase = AccessCase::setter(
+ vm, codeBlock, slot.isCustomAccessor() ? AccessCase::CustomAccessorSetter : AccessCase::CustomValueSetter, structure, invalidOffset, conditionSet,
+ slot.customSetter(), slot.base());
+ } else {
+ ObjectPropertyConditionSet conditionSet;
+ PropertyOffset offset;
+
+ if (slot.base() != baseValue) {
+ conditionSet =
+ generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.base(), ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ offset = conditionSet.slotBaseCondition().offset();
+ } else
+ offset = slot.cachedOffset();
+
+ newCase = AccessCase::setter(
+ vm, codeBlock, AccessCase::Setter, structure, offset, conditionSet);
+ }
+ }
+
+ MacroAssemblerCodePtr codePtr = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
+
+ if (!codePtr)
+ return GiveUpOnCache;
+
+ resetPutByIDCheckAndLoad(stubInfo);
+ MacroAssembler::repatchJump(
+ stubInfo.callReturnLocation.jumpAtOffset(
+ stubInfo.patch.deltaCallToJump),
+ CodeLocationLabel(codePtr));
+
+ return RetryCacheLater;
+}
+
+void repatchPutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
+
+ if (tryCachePutByID(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache)
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+}
+
+static InlineCacheAction tryRepatchIn(
+ ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
+ const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
+
+ if (!base->structure()->propertyAccessesAreCacheable() || (!wasFound && !base->structure()->propertyAccessesAreCacheableForAbsence()))
+ return GiveUpOnCache;
+
+ if (wasFound) {
+ if (!slot.isCacheable())
+ return GiveUpOnCache;
+ }
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM& vm = exec->vm();
+ Structure* structure = base->structure(vm);
+
+ ObjectPropertyConditionSet conditionSet;
+ if (wasFound) {
+ if (slot.slotBase() != base) {
+ conditionSet = generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.slotBase(), ident.impl());
+ }
+ } else {
+ conditionSet = generateConditionsForPropertyMiss(
+ vm, codeBlock, exec, structure, ident.impl());
+ }
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+
+ std::unique_ptr<AccessCase> newCase = AccessCase::in(
+ vm, codeBlock, wasFound ? AccessCase::InHit : AccessCase::InMiss, structure, conditionSet);
+
+ MacroAssemblerCodePtr codePtr = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
+ if (!codePtr)
+ return GiveUpOnCache;
+
+ MacroAssembler::repatchJump(
+ stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump),
+ CodeLocationLabel(codePtr));
+
+ return RetryCacheLater;
+}
+
+void repatchIn(
+ ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
+ const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo) == GiveUpOnCache)
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
+}
+
+static void linkSlowFor(VM*, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
+{
+ MacroAssembler::repatchNearCall(callLinkInfo.callReturnLocation(), CodeLocationLabel(codeRef.code()));
+}
+
+static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator)
+{
+ linkSlowFor(vm, callLinkInfo, vm->getCTIStub(generator));
+}
+
+static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo)
+{
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
+ linkSlowFor(vm, callLinkInfo, virtualThunk);
+ callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
+}
+
+void linkFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
+ JSFunction* callee, MacroAssemblerCodePtr codePtr)
+{
+ ASSERT(!callLinkInfo.stub());
+
+ CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+
+ VM* vm = callerCodeBlock->vm();
+
+ ASSERT(!callLinkInfo.isLinked());
+ callLinkInfo.setCallee(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin(), callerCodeBlock, callee);
+ callLinkInfo.setLastSeenCallee(exec->callerFrame()->vm(), callerCodeBlock, callee);
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
+ MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), CodeLocationLabel(codePtr));
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->linkIncomingCall(exec->callerFrame(), &callLinkInfo);
+
+ if (callLinkInfo.specializationKind() == CodeForCall && callLinkInfo.allowStubs()) {
+ linkSlowFor(vm, callLinkInfo, linkPolymorphicCallThunkGenerator);
+ return;
+ }
+
+ linkSlowFor(vm, callLinkInfo);
+}
+
+void linkSlowFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo)
+{
+ CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+ VM* vm = callerCodeBlock->vm();
+
+ linkSlowFor(vm, callLinkInfo);
+}
+
+static void revertCall(VM* vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
+{
+ MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(
+ MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0);
+ linkSlowFor(vm, callLinkInfo, codeRef);
+ callLinkInfo.clearSeen();
+ callLinkInfo.clearCallee();
+ callLinkInfo.clearStub();
+ callLinkInfo.clearSlowStub();
+ if (callLinkInfo.isOnList())
+ callLinkInfo.remove();
+}
+
+void unlinkFor(VM& vm, CallLinkInfo& callLinkInfo)
+{
+ if (Options::dumpDisassembly())
+ dataLog("Unlinking call from ", callLinkInfo.callReturnLocation(), "\n");
+
+ revertCall(&vm, callLinkInfo, vm.getCTIStub(linkCallThunkGenerator));
+}
+
+void linkVirtualFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo)
+{
+ CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+ VM* vm = callerCodeBlock->vm();
+
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking virtual call at ", *callerCodeBlock, " ", exec->callerFrame()->codeOrigin(), "\n");
+
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
+ revertCall(vm, callLinkInfo, virtualThunk);
+ callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
+}
+
+namespace {
+struct CallToCodePtr {
+ CCallHelpers::Call call;
+ MacroAssemblerCodePtr codePtr;
+};
+} // annonymous namespace
+
+void linkPolymorphicCall(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant)
+{
+ RELEASE_ASSERT(callLinkInfo.allowStubs());
+
+ // Currently we can't do anything for non-function callees.
+ // https://bugs.webkit.org/show_bug.cgi?id=140685
+ if (!newVariant || !newVariant.executable()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+
+ CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+ VM* vm = callerCodeBlock->vm();
+
+ CallVariantList list;
+ if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub())
+ list = stub->variants();
+ else if (JSFunction* oldCallee = callLinkInfo.callee())
+ list = CallVariantList{ CallVariant(oldCallee) };
+
+ list = variantListWithVariant(list, newVariant);
+
+ // If there are any closure calls then it makes sense to treat all of them as closure calls.
+ // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG;
+ // the DFG doesn't really want to deal with a combination of closure and non-closure callees.
+ bool isClosureCall = false;
+ for (CallVariant variant : list) {
+ if (variant.isClosureCall()) {
+ list = despecifiedVariantList(list);
+ isClosureCall = true;
+ break;
+ }
+ }
+
+ if (isClosureCall)
+ callLinkInfo.setHasSeenClosure();
+
+ Vector<PolymorphicCallCase> callCases;
+
+ // Figure out what our cases are.
+ for (CallVariant variant : list) {
+ CodeBlock* codeBlock;
+ if (variant.executable()->isHostFunction())
+ codeBlock = nullptr;
+ else {
+ ExecutableBase* executable = variant.executable();
+#if ENABLE(WEBASSEMBLY)
+ if (executable->isWebAssemblyExecutable())
+ codeBlock = jsCast<WebAssemblyExecutable*>(executable)->codeBlockForCall();
+ else
+#endif
+ codeBlock = jsCast<FunctionExecutable*>(executable)->codeBlockForCall();
+ // If we cannot handle a callee, either because we don't have a CodeBlock or because arity mismatch,
+ // assume that it's better for this whole thing to be a virtual call.
+ if (!codeBlock || exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.isVarargs()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+ }
+
+ callCases.append(PolymorphicCallCase(variant, codeBlock));
+ }
+
+ // If we are over the limit, just use a normal virtual call.
+ unsigned maxPolymorphicCallVariantListSize;
+ if (callerCodeBlock->jitType() == JITCode::topTierJIT())
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier();
+ else
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize();
+ if (list.size() > maxPolymorphicCallVariantListSize) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+
+ GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR());
+
+ CCallHelpers stubJit(vm, callerCodeBlock);
+
+ CCallHelpers::JumpList slowPath;
+
+ std::unique_ptr<CallFrameShuffler> frameShuffler;
+ if (callLinkInfo.frameShuffleData()) {
+ ASSERT(callLinkInfo.isTailCall());
+ frameShuffler = std::make_unique<CallFrameShuffler>(stubJit, *callLinkInfo.frameShuffleData());
+#if USE(JSVALUE32_64)
+ // We would have already checked that the callee is a cell, and we can
+ // use the additional register this buys us.
+ frameShuffler->assumeCalleeIsCell();
+#endif
+ frameShuffler->lockGPR(calleeGPR);
+ }
+ GPRReg comparisonValueGPR;
+
+ if (isClosureCall) {
+ GPRReg scratchGPR;
+ if (frameShuffler)
+ scratchGPR = frameShuffler->acquireGPR();
+ else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(calleeGPR);
+ // Verify that we have a function and stash the executable in scratchGPR.
+
+#if USE(JSVALUE64)
+ // We can't rely on tagMaskRegister being set, so we do this the hard
+ // way.
+ stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratchGPR);
+ slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratchGPR));
+#else
+ // We would have already checked that the callee is a cell.
+#endif
+
+ slowPath.append(
+ stubJit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(calleeGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(JSFunctionType)));
+
+ stubJit.loadPtr(
+ CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()),
+ scratchGPR);
+
+ comparisonValueGPR = scratchGPR;
+ } else
+ comparisonValueGPR = calleeGPR;
+
+ Vector<int64_t> caseValues(callCases.size());
+ Vector<CallToCodePtr> calls(callCases.size());
+ std::unique_ptr<uint32_t[]> fastCounts;
+
+ if (callerCodeBlock->jitType() != JITCode::topTierJIT())
+ fastCounts = std::make_unique<uint32_t[]>(callCases.size());
+
+ for (size_t i = 0; i < callCases.size(); ++i) {
+ if (fastCounts)
+ fastCounts[i] = 0;
+
+ CallVariant variant = callCases[i].variant();
+ int64_t newCaseValue;
+ if (isClosureCall)
+ newCaseValue = bitwise_cast<intptr_t>(variant.executable());
+ else
+ newCaseValue = bitwise_cast<intptr_t>(variant.function());
+
+ if (!ASSERT_DISABLED) {
+ for (size_t j = 0; j < i; ++j) {
+ if (caseValues[j] != newCaseValue)
+ continue;
+
+ dataLog("ERROR: Attempt to add duplicate case value.\n");
+ dataLog("Existing case values: ");
+ CommaPrinter comma;
+ for (size_t k = 0; k < i; ++k)
+ dataLog(comma, caseValues[k]);
+ dataLog("\n");
+ dataLog("Attempting to add: ", newCaseValue, "\n");
+ dataLog("Variant list: ", listDump(callCases), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ caseValues[i] = newCaseValue;
+ }
+
+ GPRReg fastCountsBaseGPR;
+ if (frameShuffler)
+ fastCountsBaseGPR = frameShuffler->acquireGPR();
+ else {
+ fastCountsBaseGPR =
+ AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
+ }
+ stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR);
+ if (!frameShuffler && callLinkInfo.isTailCall())
+ stubJit.emitRestoreCalleeSaves();
+ BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr);
+ CCallHelpers::JumpList done;
+ while (binarySwitch.advance(stubJit)) {
+ size_t caseIndex = binarySwitch.caseIndex();
+
+ CallVariant variant = callCases[caseIndex].variant();
+
+ ASSERT(variant.executable()->hasJITCodeForCall());
+ MacroAssemblerCodePtr codePtr =
+ variant.executable()->generatedJITCodeForCall()->addressForCall(ArityCheckNotRequired);
+
+ if (fastCounts) {
+ stubJit.add32(
+ CCallHelpers::TrustedImm32(1),
+ CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
+ }
+ if (frameShuffler) {
+ CallFrameShuffler(stubJit, frameShuffler->snapshot()).prepareForTailCall();
+ calls[caseIndex].call = stubJit.nearTailCall();
+ } else if (callLinkInfo.isTailCall()) {
+ stubJit.prepareForTailCallSlow();
+ calls[caseIndex].call = stubJit.nearTailCall();
+ } else
+ calls[caseIndex].call = stubJit.nearCall();
+ calls[caseIndex].codePtr = codePtr;
+ done.append(stubJit.jump());
+ }
+
+ slowPath.link(&stubJit);
+ binarySwitch.fallThrough().link(&stubJit);
+
+ if (frameShuffler) {
+ frameShuffler->releaseGPR(calleeGPR);
+ frameShuffler->releaseGPR(comparisonValueGPR);
+ frameShuffler->releaseGPR(fastCountsBaseGPR);
+#if USE(JSVALUE32_64)
+ frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT1, GPRInfo::regT0));
+#else
+ frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
+#endif
+ frameShuffler->prepareForSlowPath();
+ } else {
+ stubJit.move(calleeGPR, GPRInfo::regT0);
+#if USE(JSVALUE32_64)
+ stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+#endif
+ }
+ stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2);
+ stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4);
+
+ stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4);
+ AssemblyHelpers::Jump slow = stubJit.jump();
+
+ LinkBuffer patchBuffer(*vm, stubJit, callerCodeBlock, JITCompilationCanFail);
+ if (patchBuffer.didFailToAllocate()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+
+ RELEASE_ASSERT(callCases.size() == calls.size());
+ for (CallToCodePtr callToCodePtr : calls) {
+ // Tail call special-casing ensures proper linking on ARM Thumb2, where a tail call jumps to an address
+ // with a non-decorated bottom bit but a normal call calls an address with a decorated bottom bit.
+ bool isTailCall = callToCodePtr.call.isFlagSet(CCallHelpers::Call::Tail);
+ patchBuffer.link(
+ callToCodePtr.call, FunctionPtr(isTailCall ? callToCodePtr.codePtr.dataLocation() : callToCodePtr.codePtr.executableAddress()));
+ }
+ if (JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
+ patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0));
+ else
+ patchBuffer.link(done, callLinkInfo.hotPathOther().labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(linkPolymorphicCallThunkGenerator).code()));
+
+ RefPtr<PolymorphicCallStubRoutine> stubRoutine = adoptRef(new PolymorphicCallStubRoutine(
+ FINALIZE_CODE_FOR(
+ callerCodeBlock, patchBuffer,
+ ("Polymorphic call stub for %s, return point %p, targets %s",
+ toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(),
+ toCString(listDump(callCases)).data())),
+ *vm, callerCodeBlock, exec->callerFrame(), callLinkInfo, callCases,
+ WTFMove(fastCounts)));
+
+ MacroAssembler::replaceWithJump(
+ MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ CodeLocationLabel(stubRoutine->code().code()));
+ // The original slow path is unreachable on 64-bits, but still
+ // reachable on 32-bits since a non-cell callee will always
+ // trigger the slow path
+ linkSlowFor(vm, callLinkInfo);
+
+ // If there had been a previous stub routine, that one will die as soon as the GC runs and sees
+ // that it's no longer on stack.
+ callLinkInfo.setStub(stubRoutine.release());
+
+ // The call link info no longer has a call cache apart from the jump to the polymorphic call
+ // stub.
+ if (callLinkInfo.isOnList())
+ callLinkInfo.remove();
+}
+
+void resetGetByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
+{
+ repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdOptimize);
+ resetGetByIDCheckAndLoad(stubInfo);
+ MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+}
+
+void resetPutByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
+{
+ V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(codeBlock, stubInfo.callReturnLocation).executableAddress());
+ V_JITOperation_ESsiJJI optimizedFunction;
+ if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictOptimize)
+ optimizedFunction = operationPutByIdStrictOptimize;
+ else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictOptimize)
+ optimizedFunction = operationPutByIdNonStrictOptimize;
+ else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictOptimize)
+ optimizedFunction = operationPutByIdDirectStrictOptimize;
+ else {
+ ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictOptimize);
+ optimizedFunction = operationPutByIdDirectNonStrictOptimize;
+ }
+ repatchCall(codeBlock, stubInfo.callReturnLocation, optimizedFunction);
+ resetPutByIDCheckAndLoad(stubInfo);
+ MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+}
+
+void resetIn(CodeBlock*, StructureStubInfo& stubInfo)
+{
+ MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+}
+
+} // namespace JSC
+
+#endif
diff --git a/Source/JavaScriptCore/jit/Repatch.h b/Source/JavaScriptCore/jit/Repatch.h
new file mode 100644
index 000000000..443e944a3
--- /dev/null
+++ b/Source/JavaScriptCore/jit/Repatch.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef Repatch_h
+#define Repatch_h
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "CallVariant.h"
+#include "JITOperations.h"
+#include "PutKind.h"
+
+namespace JSC {
+
+void repatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
+void buildGetByIDList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
+void buildGetByIDProtoList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
+void repatchPutByID(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void buildPutByIdList(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void repatchIn(ExecState*, JSCell*, const Identifier&, bool wasFound, const PropertySlot&, StructureStubInfo&);
+void linkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr);
+void linkSlowFor(ExecState*, CallLinkInfo&);
+void unlinkFor(VM&, CallLinkInfo&);
+void linkVirtualFor(ExecState*, CallLinkInfo&);
+void linkPolymorphicCall(ExecState*, CallLinkInfo&, CallVariant);
+void resetGetByID(CodeBlock*, StructureStubInfo&);
+void resetPutByID(CodeBlock*, StructureStubInfo&);
+void resetIn(CodeBlock*, StructureStubInfo&);
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+#endif // Repatch_h
diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp
new file mode 100644
index 000000000..93d670d6c
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ScratchRegisterAllocator.h"
+
+#if ENABLE(JIT)
+
+#include "JSCInlines.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "VM.h"
+
+namespace JSC {
+
+ScratchRegisterAllocator::ScratchRegisterAllocator(const RegisterSet& usedRegisters)
+ : m_usedRegisters(usedRegisters)
+ , m_numberOfReusedRegisters(0)
+{
+}
+
+ScratchRegisterAllocator::~ScratchRegisterAllocator() { }
+
+void ScratchRegisterAllocator::lock(GPRReg reg)
+{
+ if (reg == InvalidGPRReg)
+ return;
+ unsigned index = GPRInfo::toIndex(reg);
+ if (index == GPRInfo::InvalidIndex)
+ return;
+ m_lockedRegisters.setGPRByIndex(index);
+}
+
+void ScratchRegisterAllocator::lock(FPRReg reg)
+{
+ if (reg == InvalidFPRReg)
+ return;
+ unsigned index = FPRInfo::toIndex(reg);
+ if (index == FPRInfo::InvalidIndex)
+ return;
+ m_lockedRegisters.setFPRByIndex(index);
+}
+
+void ScratchRegisterAllocator::lock(JSValueRegs regs)
+{
+ lock(regs.tagGPR());
+ lock(regs.payloadGPR());
+}
+
+template<typename BankInfo>
+typename BankInfo::RegisterType ScratchRegisterAllocator::allocateScratch()
+{
+ // First try to allocate a register that is totally free.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg)
+ && !m_usedRegisters.get(reg)
+ && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ return reg;
+ }
+ }
+
+ // Since that failed, try to allocate a register that is not yet
+ // locked or used for scratch.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ m_numberOfReusedRegisters++;
+ return reg;
+ }
+ }
+
+ // We failed.
+ CRASH();
+ // Make some silly compilers happy.
+ return static_cast<typename BankInfo::RegisterType>(-1);
+}
+
+GPRReg ScratchRegisterAllocator::allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
+FPRReg ScratchRegisterAllocator::allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
+
+ScratchRegisterAllocator::PreservedState ScratchRegisterAllocator::preserveReusedRegistersByPushing(MacroAssembler& jit, ExtraStackSpace extraStackSpace)
+{
+ if (!didReuseRegisters())
+ return PreservedState(0, extraStackSpace);
+
+ RegisterSet registersToSpill;
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ FPRReg reg = FPRInfo::toRegister(i);
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToSpill.set(reg);
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+ GPRReg reg = GPRInfo::toRegister(i);
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToSpill.set(reg);
+ }
+
+ unsigned extraStackBytesAtTopOfStack = extraStackSpace == ExtraStackSpace::SpaceForCCall ? maxFrameExtentForSlowPathCall : 0;
+ unsigned stackAdjustmentSize = ScratchRegisterAllocator::preserveRegistersToStackForCall(jit, registersToSpill, extraStackBytesAtTopOfStack);
+
+ return PreservedState(stackAdjustmentSize, extraStackSpace);
+}
+
+void ScratchRegisterAllocator::restoreReusedRegistersByPopping(MacroAssembler& jit, const ScratchRegisterAllocator::PreservedState& preservedState)
+{
+ RELEASE_ASSERT(preservedState);
+ if (!didReuseRegisters())
+ return;
+
+ RegisterSet registersToFill;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ GPRReg reg = GPRInfo::toRegister(i);
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToFill.set(reg);
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ FPRReg reg = FPRInfo::toRegister(i);
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToFill.set(reg);
+ }
+
+ unsigned extraStackBytesAtTopOfStack =
+ preservedState.extraStackSpaceRequirement == ExtraStackSpace::SpaceForCCall ? maxFrameExtentForSlowPathCall : 0;
+ RegisterSet dontRestore; // Empty set. We want to restore everything.
+ ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToFill, dontRestore,
+ preservedState.numberOfBytesPreserved, extraStackBytesAtTopOfStack);
+}
+
+RegisterSet ScratchRegisterAllocator::usedRegistersForCall() const
+{
+ RegisterSet result = m_usedRegisters;
+ result.exclude(RegisterSet::registersToNotSaveForJSCall());
+ return result;
+}
+
+unsigned ScratchRegisterAllocator::desiredScratchBufferSizeForCall() const
+{
+ return usedRegistersForCall().numberOfSetRegisters() * sizeof(JSValue);
+}
+
+void ScratchRegisterAllocator::preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
+{
+ RegisterSet usedRegisters = usedRegistersForCall();
+ if (!usedRegisters.numberOfSetRegisters())
+ return;
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storePtr(reg, static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count);
+ count++;
+ }
+ if (GPRInfo::toIndex(reg) != GPRInfo::InvalidIndex
+ && scratchGPR == InvalidGPRReg
+ && !m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg))
+ scratchGPR = reg;
+ }
+ RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count), scratchGPR);
+ count++;
+ jit.storeDouble(reg, scratchGPR);
+ }
+ }
+ RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSizeForCall());
+
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
+}
+
+void ScratchRegisterAllocator::restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
+{
+ RegisterSet usedRegisters = usedRegistersForCall();
+ if (!usedRegisters.numberOfSetRegisters())
+ return;
+
+ if (scratchGPR == InvalidGPRReg) {
+ // Find a scratch register.
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
+ continue;
+ scratchGPR = GPRInfo::toRegister(i);
+ break;
+ }
+ }
+ RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
+
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
+
+ // Restore double registers first.
+ unsigned count = usedRegisters.numberOfSetGPRs();
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
+ jit.loadDouble(scratchGPR, reg);
+ }
+ }
+
+ count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg))
+ jit.loadPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), reg);
+ }
+}
+
+unsigned ScratchRegisterAllocator::preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraBytesAtTopOfStack)
+{
+ RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
+ if (!usedRegisters.numberOfSetRegisters())
+ return 0;
+
+ unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
+ stackOffset += extraBytesAtTopOfStack;
+ stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
+ jit.subPtr(
+ MacroAssembler::TrustedImm32(stackOffset),
+ MacroAssembler::stackPointerRegister);
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storePtr(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
+ count++;
+ }
+ }
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storeDouble(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
+ count++;
+ }
+ }
+
+ RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
+
+ return stackOffset;
+}
+
+void ScratchRegisterAllocator::restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraBytesAtTopOfStack)
+{
+ RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
+ if (!usedRegisters.numberOfSetRegisters()) {
+ RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == 0);
+ return;
+ }
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ if (!ignore.get(reg))
+ jit.loadPtr(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
+ count++;
+ }
+ }
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ if (!ignore.get(reg))
+ jit.loadDouble(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
+ count++;
+ }
+ }
+
+ unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
+ stackOffset += extraBytesAtTopOfStack;
+ stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
+
+ RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
+ RELEASE_ASSERT(stackOffset == numberOfStackBytesUsedForRegisterPreservation);
+
+ jit.addPtr(
+ MacroAssembler::TrustedImm32(stackOffset),
+ MacroAssembler::stackPointerRegister);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
new file mode 100644
index 000000000..014997eca
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
@@ -0,0 +1,112 @@
+/*
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef ScratchRegisterAllocator_h
+#define ScratchRegisterAllocator_h
+
+#if ENABLE(JIT)
+
+#include "MacroAssembler.h"
+#include "RegisterSet.h"
+#include "TempRegisterSet.h"
+
+namespace JSC {
+
+struct ScratchBuffer;
+
+// This class provides a low-level register allocator for use in stubs.
+
+class ScratchRegisterAllocator {
+public:
+ ScratchRegisterAllocator() { }
+ ScratchRegisterAllocator(const RegisterSet& usedRegisters);
+ ~ScratchRegisterAllocator();
+
+ void lock(GPRReg);
+ void lock(FPRReg);
+ void lock(JSValueRegs);
+
+ template<typename BankInfo>
+ typename BankInfo::RegisterType allocateScratch();
+
+ GPRReg allocateScratchGPR();
+ FPRReg allocateScratchFPR();
+
+ bool didReuseRegisters() const
+ {
+ return !!m_numberOfReusedRegisters;
+ }
+
+ unsigned numberOfReusedRegisters() const
+ {
+ return m_numberOfReusedRegisters;
+ }
+
+ RegisterSet usedRegisters() const { return m_usedRegisters; }
+
+ enum class ExtraStackSpace { SpaceForCCall, NoExtraSpace };
+
+ struct PreservedState {
+ PreservedState()
+ : numberOfBytesPreserved(std::numeric_limits<unsigned>::max())
+ , extraStackSpaceRequirement(ExtraStackSpace::SpaceForCCall)
+ { }
+
+ PreservedState(unsigned numberOfBytes, ExtraStackSpace extraStackSpace)
+ : numberOfBytesPreserved(numberOfBytes)
+ , extraStackSpaceRequirement(extraStackSpace)
+ { }
+
+ explicit operator bool() const { return numberOfBytesPreserved != std::numeric_limits<unsigned>::max(); }
+
+ unsigned numberOfBytesPreserved;
+ ExtraStackSpace extraStackSpaceRequirement;
+ };
+
+ PreservedState preserveReusedRegistersByPushing(MacroAssembler& jit, ExtraStackSpace);
+ void restoreReusedRegistersByPopping(MacroAssembler& jit, const PreservedState&);
+
+ RegisterSet usedRegistersForCall() const;
+
+ unsigned desiredScratchBufferSizeForCall() const;
+
+ void preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
+ void restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
+
+ static unsigned preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraPaddingInBytes);
+ static void restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraPaddingInBytes);
+
+private:
+ RegisterSet m_usedRegisters;
+ TempRegisterSet m_lockedRegisters;
+ TempRegisterSet m_scratchRegisters;
+ unsigned m_numberOfReusedRegisters;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // ScratchRegisterAllocator_h
diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp b/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
new file mode 100644
index 000000000..f43551e00
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SetupVarargsFrame.h"
+
+#if ENABLE(JIT)
+
+#include "Interpreter.h"
+#include "JSCInlines.h"
+#include "StackAlignment.h"
+
+namespace JSC {
+
+void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR)
+{
+ jit.move(numUsedSlotsGPR, resultGPR);
+ // We really want to make sure the size of the new call frame is a multiple of
+ // stackAlignmentRegisters(), however it is easier to accomplish this by
+ // rounding numUsedSlotsGPR to the next multiple of stackAlignmentRegisters().
+ // Together with the rounding below, we will assure that the new call frame is
+ // located on a stackAlignmentRegisters() boundary and a multiple of
+ // stackAlignmentRegisters() in size.
+ jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
+ jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
+
+ jit.addPtr(lengthGPR, resultGPR);
+ jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR);
+
+ // resultGPR now has the required frame size in Register units
+ // Round resultGPR to next multiple of stackAlignmentRegisters()
+ jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
+ jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
+
+ // Now resultGPR has the right stack frame offset in Register units.
+ jit.negPtr(resultGPR);
+ jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR);
+ jit.addPtr(GPRInfo::callFrameRegister, resultGPR);
+}
+
+void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
+{
+ CCallHelpers::JumpList end;
+
+ if (argCountRecovery.isConstant()) {
+ // FIXME: We could constant-fold a lot of the computation below in this case.
+ // https://bugs.webkit.org/show_bug.cgi?id=141486
+ jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
+ } else
+ jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
+ if (firstVarArgOffset) {
+ CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
+ jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
+ CCallHelpers::Jump endVarArgs = jit.jump();
+ sufficientArguments.link(&jit);
+ jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
+ endVarArgs.link(&jit);
+ }
+ slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1)));
+
+ emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);
+
+ slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2));
+
+ // Initialize ArgumentCount.
+ jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
+
+ // Copy arguments.
+ jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
+ CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
+ // scratchGPR1: argumentCount
+
+ CCallHelpers::Label copyLoop = jit.label();
+ int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
+#if USE(JSVALUE64)
+ jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
+ jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+#else // USE(JSVALUE64), so this begins the 32-bit case
+ jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
+ jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
+ jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
+ jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
+#endif // USE(JSVALUE64), end of 32-bit case
+ jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
+
+ done.link(&jit);
+}
+
+void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
+{
+ emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, nullptr, firstVarArgOffset, slowCase);
+}
+
+void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame* inlineCallFrame, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
+{
+ ValueRecovery argumentCountRecovery;
+ VirtualRegister firstArgumentReg;
+ if (inlineCallFrame) {
+ if (inlineCallFrame->isVarargs()) {
+ argumentCountRecovery = ValueRecovery::displacedInJSStack(
+ inlineCallFrame->argumentCountRegister, DataFormatInt32);
+ } else {
+ argumentCountRecovery = ValueRecovery::constant(
+ jsNumber(inlineCallFrame->arguments.size()));
+ }
+ if (inlineCallFrame->arguments.size() > 1)
+ firstArgumentReg = inlineCallFrame->arguments[1].virtualRegister();
+ else
+ firstArgumentReg = VirtualRegister(0);
+ } else {
+ argumentCountRecovery = ValueRecovery::displacedInJSStack(
+ VirtualRegister(JSStack::ArgumentCount), DataFormatInt32);
+ firstArgumentReg = VirtualRegister(CallFrame::argumentOffset(0));
+ }
+ emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, argumentCountRecovery, firstArgumentReg, firstVarArgOffset, slowCase);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.h b/Source/JavaScriptCore/jit/SetupVarargsFrame.h
new file mode 100644
index 000000000..0e8933a29
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SetupVarargsFrame.h
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SetupVarargsFrame_h
+#define SetupVarargsFrame_h
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+void emitSetVarargsFrame(CCallHelpers&, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR);
+
+// Assumes that SP refers to the last in-use stack location, and after this returns SP will point to
+// the newly created frame plus the native header. scratchGPR2 may be the same as numUsedSlotsGPR.
+void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
+
+// Variant that assumes normal stack frame.
+void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
+
+// Variant for potentially inlined stack frames.
+void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame*, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // SetupVarargsFrame_h
+
diff --git a/Source/JavaScriptCore/jit/SlowPathCall.h b/Source/JavaScriptCore/jit/SlowPathCall.h
new file mode 100644
index 000000000..55da60cd0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SlowPathCall.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SlowPathCall_h
+#define SlowPathCall_h
+
+#include "CommonSlowPaths.h"
+#include "MacroAssemblerCodeRef.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+class JITSlowPathCall {
+public:
+ JITSlowPathCall(JIT* jit, Instruction* pc, SlowPathReturnType (SLOW_PATH *stub)(ExecState* exec, Instruction* pc))
+ : m_jit(jit)
+ , m_stub(stub)
+ , m_pc(pc)
+ {
+ }
+
+ JIT::Call call()
+ {
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
+#endif
+ m_jit->updateTopCallFrame();
+#if CPU(X86) && USE(JSVALUE32_64)
+ m_jit->addPtr(MacroAssembler::TrustedImm32(-8), MacroAssembler::stackPointerRegister);
+ m_jit->push(JIT::TrustedImm32(JIT::TrustedImmPtr(m_pc)));
+ m_jit->push(JIT::callFrameRegister);
+#elif CPU(X86_64) && OS(WINDOWS)
+ m_jit->addPtr(MacroAssembler::TrustedImm32(-16), MacroAssembler::stackPointerRegister);
+ m_jit->move(MacroAssembler::stackPointerRegister, JIT::argumentGPR0);
+ m_jit->move(JIT::callFrameRegister, JIT::argumentGPR1);
+ m_jit->move(JIT::TrustedImmPtr(m_pc), JIT::argumentGPR2);
+#else
+ m_jit->move(JIT::callFrameRegister, JIT::argumentGPR0);
+ m_jit->move(JIT::TrustedImmPtr(m_pc), JIT::argumentGPR1);
+#endif
+ JIT::Call call = m_jit->call();
+ m_jit->m_calls.append(CallRecord(call, m_jit->m_bytecodeOffset, m_stub.value()));
+
+#if CPU(X86) && USE(JSVALUE32_64)
+ m_jit->addPtr(MacroAssembler::TrustedImm32(16), MacroAssembler::stackPointerRegister);
+#elif CPU(X86_64) && OS(WINDOWS)
+ m_jit->pop(JIT::regT0); // vPC
+ m_jit->pop(JIT::regT1); // callFrame register
+#endif
+
+#if ENABLE(OPCODE_SAMPLING)
+ if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
+ m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
+#endif
+
+ m_jit->exceptionCheck();
+ return call;
+ }
+
+private:
+ JIT* m_jit;
+ FunctionPtr m_stub;
+ Instruction* m_pc;
+};
+
+} // namespace JS
+
+#endif // ENABLE(JIT)
+
+#endif // SlowPathCall_h
diff --git a/Source/JavaScriptCore/jit/SnippetOperand.h b/Source/JavaScriptCore/jit/SnippetOperand.h
new file mode 100644
index 000000000..67884b3c0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SnippetOperand.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SnippetOperand_h
+#define SnippetOperand_h
+
+#if ENABLE(JIT)
+
+#include "ResultType.h"
+
+namespace JSC {
+
+class SnippetOperand {
+ enum ConstOrVarType {
+ Variable,
+ ConstInt32,
+ ConstDouble
+ };
+
+public:
+ SnippetOperand()
+ : m_resultType(ResultType::unknownType())
+ { }
+
+ SnippetOperand(ResultType resultType)
+ : m_resultType(resultType)
+ { }
+
+ bool mightBeNumber() const { return m_resultType.mightBeNumber(); }
+ bool definitelyIsNumber() const { return m_resultType.definitelyIsNumber(); }
+
+ bool isConst() const { return m_type != Variable; }
+ bool isConstInt32() const { return m_type == ConstInt32; }
+ bool isConstDouble() const { return m_type == ConstDouble; }
+ bool isPositiveConstInt32() const { return isConstInt32() && asConstInt32() > 0; }
+
+ int64_t asRawBits() const { return m_val.rawBits; }
+
+ int32_t asConstInt32() const
+ {
+ ASSERT(m_type == ConstInt32);
+ return m_val.int32Val;
+ }
+
+ double asConstDouble() const
+ {
+ ASSERT(m_type == ConstDouble);
+ return m_val.doubleVal;
+ }
+
+ void setConstInt32(int32_t value)
+ {
+ m_type = ConstInt32;
+ m_val.int32Val = value;
+ }
+
+ void setConstDouble(double value)
+ {
+ m_type = ConstDouble;
+ m_val.doubleVal = value;
+ }
+
+private:
+ ResultType m_resultType;
+ ConstOrVarType m_type { Variable };
+ union {
+ int32_t int32Val;
+ double doubleVal;
+ int64_t rawBits;
+ } m_val;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // SnippetOperand_h
+
+
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 6b09781a0..6a2da6ded 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -29,7 +29,10 @@
#if ENABLE(JIT)
#include "Executable.h"
+#include "JIT.h"
+#include "JITInlines.h"
#include "JSInterfaceJIT.h"
+#include "JSStack.h"
#include "LinkBuffer.h"
namespace JSC {
@@ -37,12 +40,22 @@ namespace JSC {
class SpecializedThunkJIT : public JSInterfaceJIT {
public:
static const int ThisArgument = -1;
- SpecializedThunkJIT(int expectedArgCount)
+ SpecializedThunkJIT(VM* vm, int expectedArgCount)
+ : JSInterfaceJIT(vm)
{
+ emitFunctionPrologue();
+ emitSaveThenMaterializeTagRegisters();
// Check that we have the expected number of arguments
m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
}
+ explicit SpecializedThunkJIT(VM* vm)
+ : JSInterfaceJIT(vm)
+ {
+ emitFunctionPrologue();
+ emitSaveThenMaterializeTagRegisters();
+ }
+
void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
{
unsigned src = CallFrame::argumentOffset(argument);
@@ -58,7 +71,18 @@ namespace JSC {
void loadJSStringArgument(VM& vm, int argument, RegisterID dst)
{
loadCellArgument(argument, dst);
- m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::structureOffset()), TrustedImmPtr(vm.stringStructure.get())));
+ m_failures.append(branchStructure(NotEqual,
+ Address(dst, JSCell::structureIDOffset()),
+ vm.stringStructure.get()));
+ }
+
+ void loadArgumentWithSpecificClass(const ClassInfo* classInfo, int argument, RegisterID dst, RegisterID scratch)
+ {
+ loadCellArgument(argument, dst);
+ emitLoadStructure(dst, scratch, dst);
+ appendFailure(branchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfo)));
+ // We have to reload the argument since emitLoadStructure clobbered it.
+ loadCellArgument(argument, dst);
}
void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
@@ -78,14 +102,26 @@ namespace JSC {
{
m_failures.append(failure);
}
-
+#if USE(JSVALUE64)
void returnJSValue(RegisterID src)
{
if (src != regT0)
move(src, regT0);
- loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
+
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
+#else
+ void returnJSValue(RegisterID payload, RegisterID tag)
+ {
+ ASSERT_UNUSED(payload, payload == regT0);
+ ASSERT_UNUSED(tag, tag == regT1);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
+ ret();
+ }
+#endif
void returnDouble(FPRegisterID src)
{
@@ -98,14 +134,7 @@ namespace JSC {
move(tagTypeNumberRegister, regT0);
done.link(this);
#else
-#if !CPU(X86)
- // The src register is not clobbered by moveDoubleToInts with ARM, MIPS and SH4 macro assemblers, so let's use it.
moveDoubleToInts(src, regT0, regT1);
-#else
- storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
-#endif
Jump lowNonZero = branchTestPtr(NonZero, regT1);
Jump highNonZero = branchTestPtr(NonZero, regT0);
move(TrustedImm32(0), regT0);
@@ -113,7 +142,8 @@ namespace JSC {
lowNonZero.link(this);
highNonZero.link(this);
#endif
- loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
@@ -122,7 +152,8 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsInt32();
- loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
@@ -131,13 +162,14 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsJSCell();
- loadPtr(payloadFor(JSStack::CallerFrame, callFrameRegister), callFrameRegister);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
- MacroAssemblerCodeRef finalize(VM& vm, MacroAssemblerCodePtr fallback, const char* thunkKind)
+ MacroAssemblerCodeRef finalize(MacroAssemblerCodePtr fallback, const char* thunkKind)
{
- LinkBuffer patchBuffer(vm, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
for (unsigned i = 0; i < m_calls.size(); i++)
patchBuffer.link(m_calls[i].first, m_calls[i].second);
@@ -161,7 +193,31 @@ namespace JSC {
}
private:
+ void emitSaveThenMaterializeTagRegisters()
+ {
+#if USE(JSVALUE64)
+#if CPU(ARM64)
+ pushPair(tagTypeNumberRegister, tagMaskRegister);
+#else
+ push(tagTypeNumberRegister);
+ push(tagMaskRegister);
+#endif
+ emitMaterializeTagCheckRegisters();
+#endif
+ }
+ void emitRestoreSavedTagRegisters()
+ {
+#if USE(JSVALUE64)
+#if CPU(ARM64)
+ popPair(tagTypeNumberRegister, tagMaskRegister);
+#else
+ pop(tagMaskRegister);
+ pop(tagTypeNumberRegister);
+#endif
+#endif
+ }
+
void tagReturnAsInt32()
{
#if USE(JSVALUE64)
@@ -179,7 +235,7 @@ namespace JSC {
}
MacroAssembler::JumpList m_failures;
- Vector<std::pair<Call, FunctionPtr> > m_calls;
+ Vector<std::pair<Call, FunctionPtr>> m_calls;
};
}
diff --git a/Source/JavaScriptCore/jit/SpillRegistersMode.h b/Source/JavaScriptCore/jit/SpillRegistersMode.h
new file mode 100644
index 000000000..160df2c2e
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SpillRegistersMode.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef SpillRegistersMode_h
+#define SpillRegistersMode_h
+
+namespace JSC {
+
+enum SpillRegistersMode { NeedToSpill, DontSpill };
+
+}
+
+#endif
diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.cpp b/Source/JavaScriptCore/jit/TempRegisterSet.cpp
new file mode 100644
index 000000000..9c2e73d43
--- /dev/null
+++ b/Source/JavaScriptCore/jit/TempRegisterSet.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "TempRegisterSet.h"
+
+#if ENABLE(JIT)
+
+#include "JSCInlines.h"
+#include "RegisterSet.h"
+
+namespace JSC {
+
+TempRegisterSet::TempRegisterSet(const RegisterSet& other)
+{
+ clearAll();
+
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ GPRReg reg = GPRInfo::toRegister(i);
+ if (other.get(reg))
+ set(reg);
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ FPRReg reg = FPRInfo::toRegister(i);
+ if (other.get(reg))
+ set(reg);
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.h b/Source/JavaScriptCore/jit/TempRegisterSet.h
new file mode 100644
index 000000000..4c2102400
--- /dev/null
+++ b/Source/JavaScriptCore/jit/TempRegisterSet.h
@@ -0,0 +1,223 @@
+/*
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef TempRegisterSet_h
+#define TempRegisterSet_h
+
+#if ENABLE(JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+
+namespace JSC {
+
+class RegisterSet;
+
+class TempRegisterSet {
+public:
+ TempRegisterSet()
+ {
+ clearAll();
+ }
+
+ TempRegisterSet(const RegisterSet&);
+
+ void set(GPRReg reg)
+ {
+ setBit(GPRInfo::toIndex(reg));
+ }
+
+ void set(JSValueRegs regs)
+ {
+ if (regs.tagGPR() != InvalidGPRReg)
+ set(regs.tagGPR());
+ set(regs.payloadGPR());
+ }
+
+ void setGPRByIndex(unsigned index)
+ {
+ ASSERT(index < GPRInfo::numberOfRegisters);
+ setBit(index);
+ }
+
+ void clear(GPRReg reg)
+ {
+ clearBit(GPRInfo::toIndex(reg));
+ }
+
+ bool get(GPRReg reg) const
+ {
+ return getBit(GPRInfo::toIndex(reg));
+ }
+
+ bool getGPRByIndex(unsigned index) const
+ {
+ ASSERT(index < GPRInfo::numberOfRegisters);
+ return getBit(index);
+ }
+
+ // Return the index'th free GPR.
+ GPRReg getFreeGPR(unsigned index = 0) const
+ {
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (!getGPRByIndex(i) && !index--)
+ return GPRInfo::toRegister(i);
+ }
+ return InvalidGPRReg;
+ }
+
+ void set(FPRReg reg)
+ {
+ setBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ void setFPRByIndex(unsigned index)
+ {
+ ASSERT(index < FPRInfo::numberOfRegisters);
+ setBit(GPRInfo::numberOfRegisters + index);
+ }
+
+ void clear(FPRReg reg)
+ {
+ clearBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ bool get(FPRReg reg) const
+ {
+ return getBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ bool getFPRByIndex(unsigned index) const
+ {
+ ASSERT(index < FPRInfo::numberOfRegisters);
+ return getBit(GPRInfo::numberOfRegisters + index);
+ }
+
+ // Return the index'th free FPR.
+ FPRReg getFreeFPR(unsigned index = 0) const
+ {
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (!getFPRByIndex(i) && !index--)
+ return FPRInfo::toRegister(i);
+ }
+ return InvalidFPRReg;
+ }
+
+ template<typename BankInfo>
+ void setByIndex(unsigned index)
+ {
+ set(BankInfo::toRegister(index));
+ }
+
+ template<typename BankInfo>
+ bool getByIndex(unsigned index)
+ {
+ return get(BankInfo::toRegister(index));
+ }
+
+ unsigned numberOfSetGPRs() const
+ {
+ unsigned result = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (!getBit(i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+ unsigned numberOfSetFPRs() const
+ {
+ unsigned result = 0;
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (!getBit(GPRInfo::numberOfRegisters + i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+ unsigned numberOfSetRegisters() const
+ {
+ unsigned result = 0;
+ for (unsigned i = totalNumberOfRegisters; i--;) {
+ if (!getBit(i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+private:
+ void clearAll()
+ {
+ for (unsigned i = numberOfBytesInTempRegisterSet; i--;)
+ m_set[i] = 0;
+ }
+
+ void setBit(unsigned i)
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ m_set[i >> 3] |= (1 << (i & 7));
+ }
+
+ void clearBit(unsigned i)
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ m_set[i >> 3] &= ~(1 << (i & 7));
+ }
+
+ bool getBit(unsigned i) const
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ return !!(m_set[i >> 3] & (1 << (i & 7)));
+ }
+
+ static const unsigned totalNumberOfRegisters =
+ GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters;
+
+ static const unsigned numberOfBytesInTempRegisterSet =
+ (totalNumberOfRegisters + 7) >> 3;
+
+ uint8_t m_set[numberOfBytesInTempRegisterSet];
+};
+
+} // namespace JSC
+
+#else // ENABLE(JIT) -> so if JIT is disabled
+
+namespace JSC {
+
+// Define TempRegisterSet to something, to make it easier to refer to this type in code that
+// make be compiled when the JIT is disabled.
+
+struct TempRegisterSet { };
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
+#endif // TempRegisterSet_h
+
diff --git a/Source/JavaScriptCore/jit/ThunkGenerator.h b/Source/JavaScriptCore/jit/ThunkGenerator.h
index a9d7e04ee..031748cbe 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerator.h
+++ b/Source/JavaScriptCore/jit/ThunkGenerator.h
@@ -26,8 +26,6 @@
#ifndef ThunkGenerator_h
#define ThunkGenerator_h
-#include <wtf/Platform.h>
-
#if ENABLE(JIT)
namespace JSC {
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index 9684df2d0..4a71dfeb2 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,7 +27,14 @@
#include "ThunkGenerators.h"
#include "CodeBlock.h"
-#include "Operations.h"
+#include "DFGSpeculativeJIT.h"
+#include "JITOperations.h"
+#include "JSArray.h"
+#include "JSArrayIterator.h"
+#include "JSStack.h"
+#include "MathCommon.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
#include "SpecializedThunkJIT.h"
#include <wtf/InlineASM.h>
#include <wtf/StringPrintStream.h>
@@ -37,373 +44,341 @@
namespace JSC {
-static JSInterfaceJIT::Call generateSlowCaseFor(VM* vm, JSInterfaceJIT& jit)
+inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
{
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT2, JSInterfaceJIT::regT2);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT2, JSStack::ScopeChain);
-
- // Also initialize ReturnPC and CodeBlock, like a JS function would.
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
- jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
-
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- jit.restoreArgumentReference();
- JSInterfaceJIT::Call callNotJSFunction = jit.call();
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::callFrameRegister);
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
- jit.ret();
-
- return callNotJSFunction;
+ if (ASSERT_DISABLED)
+ return;
+ CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
+ jit.abortWithReason(TGInvalidPointer);
+ isNonZero.link(&jit);
+ jit.pushToSave(pointerGPR);
+ jit.load8(pointerGPR, pointerGPR);
+ jit.popToRestore(pointerGPR);
}
-static MacroAssemblerCodeRef linkForGenerator(VM* vm, FunctionPtr lazyLink, FunctionPtr notJSFunction, const char* name)
+// We will jump here if the JIT code tries to make a call, but the
+// linking helper (C++ code) decides to throw an exception instead.
+MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
{
- JSInterfaceJIT jit;
-
- JSInterfaceJIT::JumpList slowCase;
-
-#if USE(JSVALUE64)
- slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
- slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
-#else // USE(JSVALUE64)
- slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
- slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
-#endif // USE(JSVALUE64)
-
- // Finish canonical initialization before JS function call.
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- // Also initialize ReturnPC for use by lazy linking and exceptions.
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
-
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- jit.restoreArgumentReference();
- JSInterfaceJIT::Call callLazyLink = jit.call();
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
- jit.jump(JSInterfaceJIT::regT0);
-
- slowCase.link(&jit);
- JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
+ CCallHelpers jit(vm);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- patchBuffer.link(callLazyLink, lazyLink);
- patchBuffer.link(callNotJSFunction, notJSFunction);
-
- return FINALIZE_CODE(patchBuffer, ("link %s trampoline", name));
-}
+ // The call pushed a return address, so we need to pop it back off to re-align the stack,
+ // even though we won't use it.
+ jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
-MacroAssemblerCodeRef linkCallGenerator(VM* vm)
-{
- return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkCall), FunctionPtr(cti_op_call_NotJSFunction), "call");
-}
+ jit.copyCalleeSavesToVMCalleeSavesBuffer();
-MacroAssemblerCodeRef linkConstructGenerator(VM* vm)
-{
- return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkConstruct), FunctionPtr(cti_op_construct_NotJSConstruct), "construct");
+ jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ jit.jumpToExceptionHandler();
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
}
-MacroAssemblerCodeRef linkClosureCallGenerator(VM* vm)
+static void slowPathFor(
+ CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
{
- return linkForGenerator(vm, FunctionPtr(cti_vm_lazyLinkClosureCall), FunctionPtr(cti_op_call_NotJSFunction), "closure call");
+ jit.emitFunctionPrologue();
+ jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
+#if OS(WINDOWS) && CPU(X86_64)
+ // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
+ // Other argument values are shift by 1. Use space on the stack for our two return values.
+ // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
+ // and space for the 16 byte return area.
+ jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+ jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
+ jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
+ jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
+ jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+#else
+ if (maxFrameExtentForSlowPathCall)
+ jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+ jit.setupArgumentsWithExecState(GPRInfo::regT2);
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ if (maxFrameExtentForSlowPathCall)
+ jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+#endif
+
+ // This slow call will return the address of one of the following:
+ // 1) Exception throwing thunk.
+ // 2) Host call return value returner thingy.
+ // 3) The function to call.
+ // The second return value GPR will hold a non-zero value for tail calls.
+
+ emitPointerValidation(jit, GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+
+ RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
+ CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
+
+ jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
+ jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
+
+ doNotTrash.link(&jit);
+ jit.jump(GPRInfo::returnValueGPR);
}
-static MacroAssemblerCodeRef virtualForGenerator(VM* vm, FunctionPtr compile, FunctionPtr notJSFunction, const char* name, CodeSpecializationKind kind)
+MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
{
- JSInterfaceJIT jit;
-
- JSInterfaceJIT::JumpList slowCase;
-
-#if USE(JSVALUE64)
- slowCase.append(jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0));
-#else // USE(JSVALUE64)
- slowCase.append(jit.branch32(JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1, JSInterfaceJIT::TrustedImm32(JSValue::CellTag)));
-#endif // USE(JSVALUE64)
- slowCase.append(jit.emitJumpIfNotType(JSInterfaceJIT::regT0, JSInterfaceJIT::regT1, JSFunctionType));
-
- // Finish canonical initialization before JS function call.
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfScopeChain()), JSInterfaceJIT::regT1);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
- JSInterfaceJIT::Jump hasCodeBlock1 = jit.branch32(JSInterfaceJIT::GreaterThanOrEqual, JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfNumParametersFor(kind)), JSInterfaceJIT::TrustedImm32(0));
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3);
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- jit.restoreArgumentReference();
- JSInterfaceJIT::Call callCompile = jit.call();
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
-
- hasCodeBlock1.link(&jit);
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, FunctionExecutable::offsetOfJITCodeWithArityCheckFor(kind)), JSInterfaceJIT::regT0);
- jit.jump(JSInterfaceJIT::regT0);
-
- slowCase.link(&jit);
- JSInterfaceJIT::Call callNotJSFunction = generateSlowCaseFor(vm, jit);
+ // The return address is on the stack or in the link register. We will hence
+ // save the return address to the call frame while we make a C++ function call
+ // to perform linking and lazy compilation if necessary. We expect the callee
+ // to be in regT0/regT1 (payload/tag), the CallFrame to have already
+ // been adjusted, and all other registers to be available for use.
+ CCallHelpers jit(vm);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- patchBuffer.link(callCompile, compile);
- patchBuffer.link(callNotJSFunction, notJSFunction);
+ slowPathFor(jit, vm, operationLinkCall);
- return FINALIZE_CODE(patchBuffer, ("virtual %s trampoline", name));
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
}
-MacroAssemblerCodeRef virtualCallGenerator(VM* vm)
+// For closure optimizations, we only include calls, since if you're using closures for
+// object construction then you're going to lose big time anyway.
+MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
{
- return virtualForGenerator(vm, FunctionPtr(cti_op_call_jitCompile), FunctionPtr(cti_op_call_NotJSFunction), "call", CodeForCall);
+ CCallHelpers jit(vm);
+
+ slowPathFor(jit, vm, operationLinkPolymorphicCall);
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
}
-MacroAssemblerCodeRef virtualConstructGenerator(VM* vm)
+// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
+// path virtual call so that we can enable fast tail calls for megamorphic
+// virtual calls by using the shuffler.
+// https://bugs.webkit.org/show_bug.cgi?id=148831
+MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
{
- return virtualForGenerator(vm, FunctionPtr(cti_op_construct_jitCompile), FunctionPtr(cti_op_construct_NotJSConstruct), "construct", CodeForConstruct);
-}
+ // The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
+ // The return address is on the stack, or in the link register. We will hence
+ // jump to the callee, or save the return address to the call frame while we
+ // make a C++ function call to the appropriate JIT operation.
-MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM* vm)
-{
- JSInterfaceJIT jit;
+ CCallHelpers jit(vm);
+
+ CCallHelpers::JumpList slowCase;
+
+ // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
+ // slow path execution for the profiler.
+ jit.add32(
+ CCallHelpers::TrustedImm32(1),
+ CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
+
+ // FIXME: we should have a story for eliminating these checks. In many cases,
+ // the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
- // Check eax is a string
- JSInterfaceJIT::Jump failureCases1 = jit.emitJumpIfNotJSCell(JSInterfaceJIT::regT0);
- JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
- JSInterfaceJIT::NotEqual, JSInterfaceJIT::Address(
- JSInterfaceJIT::regT0, JSCell::structureOffset()),
- JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
-
- // Checks out okay! - get the length from the Ustring.
- jit.load32(
- JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
- JSInterfaceJIT::regT0);
-
- JSInterfaceJIT::Jump failureCases3 = jit.branch32(
- JSInterfaceJIT::LessThan, JSInterfaceJIT::regT0, JSInterfaceJIT::TrustedImm32(0));
-
- // regT0 contains a 64 bit value (is positive, is zero extended) so we don't need sign extend here.
- jit.emitFastArithIntToImmNoCheck(JSInterfaceJIT::regT0, JSInterfaceJIT::regT0);
+ jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
-#else // USE(JSVALUE64)
- // regT0 holds payload, regT1 holds tag
-
- JSInterfaceJIT::Jump failureCases1 = jit.branch32(
- JSInterfaceJIT::NotEqual, JSInterfaceJIT::regT1,
- JSInterfaceJIT::TrustedImm32(JSValue::CellTag));
- JSInterfaceJIT::Jump failureCases2 = jit.branchPtr(
- JSInterfaceJIT::NotEqual,
- JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSCell::structureOffset()),
- JSInterfaceJIT::TrustedImmPtr(vm->stringStructure.get()));
-
- // Checks out okay! - get the length from the Ustring.
- jit.load32(
- JSInterfaceJIT::Address(JSInterfaceJIT::regT0, JSString::offsetOfLength()),
- JSInterfaceJIT::regT2);
-
- JSInterfaceJIT::Jump failureCases3 = jit.branch32(
- JSInterfaceJIT::Above, JSInterfaceJIT::regT2, JSInterfaceJIT::TrustedImm32(INT_MAX));
- jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::regT0);
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::Int32Tag), JSInterfaceJIT::regT1);
-#endif // USE(JSVALUE64)
-
- jit.ret();
+ slowCase.append(
+ jit.branchTest64(
+ CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
+#else
+ slowCase.append(
+ jit.branch32(
+ CCallHelpers::NotEqual, GPRInfo::regT1,
+ CCallHelpers::TrustedImm32(JSValue::CellTag)));
+#endif
+ AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
+ slowCase.append(
+ jit.branchPtr(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
+ CCallHelpers::TrustedImmPtr(JSFunction::info())));
- JSInterfaceJIT::Call failureCases1Call = jit.makeTailRecursiveCall(failureCases1);
- JSInterfaceJIT::Call failureCases2Call = jit.makeTailRecursiveCall(failureCases2);
- JSInterfaceJIT::Call failureCases3Call = jit.makeTailRecursiveCall(failureCases3);
+ // Now we know we have a JSFunction.
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
+ GPRInfo::regT4);
+ jit.loadPtr(
+ CCallHelpers::Address(
+ GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
+ callLinkInfo.specializationKind())),
+ GPRInfo::regT4);
+ slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
- patchBuffer.link(failureCases1Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(failureCases2Call, FunctionPtr(cti_op_get_by_id_string_fail));
- patchBuffer.link(failureCases3Call, FunctionPtr(cti_op_get_by_id_string_fail));
+ // Now we know that we have a CodeBlock, and we're committed to making a fast
+ // call.
- return FINALIZE_CODE(patchBuffer, ("string length trampoline"));
+ // Make a tail call. This will return back to JIT code.
+ emitPointerValidation(jit, GPRInfo::regT4);
+ if (callLinkInfo.isTailCall()) {
+ jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
+ jit.prepareForTailCallSlow(GPRInfo::regT4);
+ }
+ jit.jump(GPRInfo::regT4);
+
+ slowCase.link(&jit);
+
+ // Here we don't know anything, so revert to the full slow path.
+
+ slowPathFor(jit, vm, operationVirtualCall);
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(
+ patchBuffer,
+ ("Virtual %s slow path thunk",
+ callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
}
-static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
+enum ThunkEntryType { EnterViaCall, EnterViaJump };
+
+static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
{
int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
- JSInterfaceJIT jit;
-
- jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+ JSInterfaceJIT jit(vm);
-#if CPU(X86)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+ if (entryType == EnterViaCall)
+ jit.emitFunctionPrologue();
+#if USE(JSVALUE64)
+ else if (entryType == EnterViaJump) {
+ // We're coming from a specialized thunk that has saved the prior tag registers' contents.
+ // Restore them now.
+#if CPU(ARM64)
+ jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
+#else
+ jit.pop(JSInterfaceJIT::tagMaskRegister);
+ jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
+#endif
+ }
+#endif
- jit.peek(JSInterfaceJIT::regT1);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
+ jit.emitPutToCallFrameHeader(0, JSStack::CodeBlock);
+ jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+#if CPU(X86)
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
// call the function
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
#elif CPU(X86_64)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.peek(JSInterfaceJIT::regT1);
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ReturnPC);
-
#if !OS(WINDOWS)
// Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
-
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#else
// Calling convention: f(ecx, edx, r8, r9, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- // Leave space for the callee parameter home addresses and align the stack.
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ // Leave space for the callee parameter home addresses.
+ // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
-#elif CPU(ARM)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
+#elif CPU(ARM64)
+ COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
+ COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
+ COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
- // Calling convention: f(r0 == regT0, r1 == regT1, ...);
// Host function signature: f(ExecState*);
- jit.move(JSInterfaceJIT::callFrameRegister, ARMRegisters::r0);
-
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARMRegisters::r1);
- jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- jit.loadPtr(JSInterfaceJIT::Address(ARMRegisters::r1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
-
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
-
-#elif CPU(SH4)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT2);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
-
- // Calling convention: f(r0 == regT4, r1 == regT5, ...);
- // Host function signature: f(ExecState*);
- jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT4);
-
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT5);
- jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
- jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT5, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
-
- jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction), JSInterfaceJIT::regT0);
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
-
-#elif CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetFromCallFrameHeaderPtr(JSStack::CallerFrame, JSInterfaceJIT::regT0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutToCallFrameHeader(JSInterfaceJIT::regT3, JSStack::ReturnPC);
-
- // Calling convention: f(a0, a1, a2, a3);
- // Host function signature: f(ExecState*);
-
- // Allocate stack space for 16 bytes (8-byte aligned)
- // 16 bytes (unused) for 4 arguments
+ jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
+
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
+ jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
+ jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
+#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+#if CPU(MIPS)
+ // Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
+#endif
- // Setup arg0
- jit.move(JSInterfaceJIT::callFrameRegister, MIPSRegisters::a0);
+ // Calling convention is f(argumentGPR0, argumentGPR1, ...).
+ // Host function signature is f(ExecState*).
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
- // Call
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, MIPSRegisters::a2);
- jit.loadPtr(JSInterfaceJIT::Address(MIPSRegisters::a2, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
+ jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
+#if CPU(MIPS)
// Restore stack space
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
-
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+#endif
#else
#error "JIT not supported on this platform."
UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
+ abortWithReason(TGNotSupported);
#endif
// Check for an exception
#if USE(JSVALUE64)
- jit.load64(&(vm->exception), JSInterfaceJIT::regT2);
+ jit.load64(vm->addressOfException(), JSInterfaceJIT::regT2);
JSInterfaceJIT::Jump exceptionHandler = jit.branchTest64(JSInterfaceJIT::NonZero, JSInterfaceJIT::regT2);
#else
JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
JSInterfaceJIT::NotEqual,
- JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(&vm->exception) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
- JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
+ JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
+ JSInterfaceJIT::TrustedImm32(0));
#endif
+ jit.emitFunctionEpilogue();
// Return.
jit.ret();
// Handle an exception
exceptionHandler.link(&jit);
- // Grab the return address.
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
-
- jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
- jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
- jit.poke(JSInterfaceJIT::callFrameRegister, OBJECT_OFFSETOF(struct JITStackFrame, callFrame) / sizeof(void*));
-
+ jit.copyCalleeSavesToVMCalleeSavesBuffer();
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
- // Set the return address.
- jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(ctiVMThrowTrampoline).value()), JSInterfaceJIT::regT1);
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT1);
- jit.ret();
+#if CPU(X86) && USE(JSVALUE32_64)
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
+ jit.push(JSInterfaceJIT::regT0);
+#else
+#if OS(WINDOWS)
+ // Allocate space on stack for the 4 parameter registers.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+#endif
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
+#endif
+ jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
+ jit.call(JSInterfaceJIT::regT3);
+#if CPU(X86) && USE(JSVALUE32_64)
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
+#elif OS(WINDOWS)
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+#endif
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
+ jit.jumpToExceptionHandler();
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
}
MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
@@ -411,11 +386,150 @@ MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
return nativeForGenerator(vm, CodeForCall);
}
+MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
+{
+ return nativeForGenerator(vm, CodeForCall, EnterViaJump);
+}
+
MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
{
return nativeForGenerator(vm, CodeForConstruct);
}
+MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
+{
+ JSInterfaceJIT jit(vm);
+
+ // We enter with fixup count in argumentGPR0
+ // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
+#if USE(JSVALUE64)
+#if OS(WINDOWS)
+ const GPRReg extraTemp = JSInterfaceJIT::regT0;
+#else
+ const GPRReg extraTemp = JSInterfaceJIT::regT5;
+#endif
+# if CPU(X86_64)
+ jit.pop(JSInterfaceJIT::regT4);
+# endif
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
+
+ // Check to see if we have extra slots we can use
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
+ jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
+ JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
+ JSInterfaceJIT::Label fillExtraSlots(jit.label());
+ jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
+ jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
+ jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
+ jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
+ JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
+ noExtraSlot.link(&jit);
+
+ jit.neg64(JSInterfaceJIT::argumentGPR0);
+
+ // Move current frame down argumentGPR0 number of slots
+ JSInterfaceJIT::Label copyLoop(jit.label());
+ jit.load64(JSInterfaceJIT::regT3, extraTemp);
+ jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
+
+ // Fill in argumentGPR0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
+ JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
+ jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
+
+ // Adjust call frame register and stack pointer to account for missing args
+ jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
+ jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
+ jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
+ jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
+
+ done.link(&jit);
+
+# if CPU(X86_64)
+ jit.push(JSInterfaceJIT::regT4);
+# endif
+ jit.ret();
+#else
+# if CPU(X86)
+ jit.pop(JSInterfaceJIT::regT4);
+# endif
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
+
+ // Check to see if we have extra slots we can use
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
+ jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
+ JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
+ JSInterfaceJIT::Label fillExtraSlots(jit.label());
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
+ jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
+ jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
+ jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
+ JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
+ noExtraSlot.link(&jit);
+
+ jit.neg32(JSInterfaceJIT::argumentGPR0);
+
+ // Move current frame down argumentGPR0 number of slots
+ JSInterfaceJIT::Label copyLoop(jit.label());
+ jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
+ jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
+
+ // Fill in argumentGPR0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
+ JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
+
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
+
+ // Adjust call frame register and stack pointer to account for missing args
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
+ jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
+ jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
+ jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
+
+ done.link(&jit);
+
+# if CPU(X86)
+ jit.push(JSInterfaceJIT::regT4);
+# endif
+ jit.ret();
+#endif
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("fixup arity"));
+}
+
+MacroAssemblerCodeRef unreachableGenerator(VM* vm)
+{
+ JSInterfaceJIT jit(vm);
+
+ jit.breakpoint();
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
+}
+
static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
{
// load string
@@ -456,67 +570,104 @@ static void charToString(SpecializedThunkJIT& jit, VM* vm, MacroAssembler::Regis
MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
stringCharLoad(jit, vm);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
}
MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
stringCharLoad(jit, vm);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "charAt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
}
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
// load char code
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
+}
+
+MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
+{
+ SpecializedThunkJIT jit(vm, 1);
+ MacroAssembler::Jump nonIntArgJump;
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
+
+ SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
+ jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnInt32(SpecializedThunkJIT::regT1);
+
+ if (jit.supportsFloatingPointTruncate()) {
+ nonIntArgJump.link(&jit);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
+ jit.appendFailure(jit.jump());
+ } else
+ jit.appendFailure(nonIntArgJump);
+
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
}
MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPointSqrt())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "sqrt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
}
#define UnaryDoubleOpWrapper(function) function##Wrapper
enum MathThunkCallingConvention { };
typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
-extern "C" {
-
-double jsRound(double) REFERENCED_FROM_ASM;
-double jsRound(double d)
-{
- double integer = ceil(d);
- return integer - (integer - d > 0.5);
-}
-}
+#if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
-#if CPU(X86_64) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
+#define defineUnaryDoubleOpWrapper(function) \
+ asm( \
+ ".text\n" \
+ ".globl " SYMBOL_STRING(function##Thunk) "\n" \
+ HIDE_SYMBOL(function##Thunk) "\n" \
+ SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "pushq %rax\n" \
+ "call " GLOBAL_REFERENCE(function) "\n" \
+ "popq %rcx\n" \
+ "ret\n" \
+ );\
+ extern "C" { \
+ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "pushl %ebx\n" \
+ "subl $20, %esp\n" \
+ "movsd %xmm0, (%esp) \n" \
+ "call __x86.get_pc_thunk.bx\n" \
+ "addl $_GLOBAL_OFFSET_TABLE_, %ebx\n" \
"call " GLOBAL_REFERENCE(function) "\n" \
+ "fstpl (%esp) \n" \
+ "movsd (%esp), %xmm0 \n" \
+ "addl $20, %esp\n" \
+ "popl %ebx\n" \
"ret\n" \
);\
extern "C" { \
@@ -524,19 +675,19 @@ double jsRound(double d)
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC) && (PLATFORM(MAC) || OS(LINUX))
+#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
- "subl $8, %esp\n" \
+ "subl $20, %esp\n" \
"movsd %xmm0, (%esp) \n" \
"call " GLOBAL_REFERENCE(function) "\n" \
"fstpl (%esp) \n" \
"movsd (%esp), %xmm0 \n" \
- "addl $8, %esp\n" \
+ "addl $20, %esp\n" \
"ret\n" \
);\
extern "C" { \
@@ -544,7 +695,7 @@ double jsRound(double d)
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
+#elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
@@ -566,6 +717,49 @@ double jsRound(double d)
MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+
+#elif CPU(ARM64)
+
+#define defineUnaryDoubleOpWrapper(function) \
+ asm( \
+ ".text\n" \
+ ".align 2\n" \
+ ".globl " SYMBOL_STRING(function##Thunk) "\n" \
+ HIDE_SYMBOL(function##Thunk) "\n" \
+ SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "b " GLOBAL_REFERENCE(function) "\n" \
+ ".previous" \
+ ); \
+ extern "C" { \
+ MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+
+#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
+
+// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
+static double (_cdecl *floorFunction)(double) = floor;
+static double (_cdecl *ceilFunction)(double) = ceil;
+static double (_cdecl *expFunction)(double) = exp;
+static double (_cdecl *logFunction)(double) = log;
+static double (_cdecl *jsRoundFunction)(double) = jsRound;
+
+#define defineUnaryDoubleOpWrapper(function) \
+ extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
+ { \
+ __asm \
+ { \
+ __asm sub esp, 20 \
+ __asm movsd mmword ptr [esp], xmm0 \
+ __asm call function##Function \
+ __asm fstp qword ptr [esp] \
+ __asm movsd xmm0, mmword ptr [esp] \
+ __asm add esp, 20 \
+ __asm ret \
+ } \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+
#else
#define defineUnaryDoubleOpWrapper(function) \
@@ -580,12 +774,11 @@ defineUnaryDoubleOpWrapper(ceil);
static const double oneConstant = 1.0;
static const double negativeHalfConstant = -0.5;
-static const double zeroConstant = 0.0;
static const double halfConstant = 0.5;
MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
MacroAssembler::Jump nonIntJump;
if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
@@ -593,10 +786,18 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+#if CPU(ARM64)
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+#else
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
+ jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
@@ -612,12 +813,13 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "floor");
+#endif // CPU(ARM64)
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
}
MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
@@ -625,18 +827,22 @@ MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
+ if (jit.supportsFloatingPointRounding())
+ jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ else
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
+
SpecializedThunkJIT::JumpList doubleResult;
jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "ceil");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
}
MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
@@ -647,12 +853,12 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
+ jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
intResult = jit.jump();
@@ -665,38 +871,38 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "round");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
}
MacroAssemblerCodeRef expThunkGenerator(VM* vm)
{
if (!UnaryDoubleOpWrapper(exp))
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "exp");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
}
MacroAssemblerCodeRef logThunkGenerator(VM* vm)
{
if (!UnaryDoubleOpWrapper(log))
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "log");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
}
MacroAssemblerCodeRef absThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(1);
+ SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPointAbs())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
MacroAssembler::Jump nonIntJump;
@@ -704,23 +910,23 @@ MacroAssemblerCodeRef absThunkGenerator(VM* vm)
jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
- jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
+ jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
// Shame about the double int conversion here.
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "abs");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
}
MacroAssemblerCodeRef powThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(2);
+ SpecializedThunkJIT jit(vm, 2);
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
MacroAssembler::Jump nonIntExponent;
jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
@@ -748,7 +954,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
if (jit.supportsFloatingPointSqrt()) {
nonIntExponent.link(&jit);
- jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
@@ -763,12 +969,12 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
} else
jit.appendFailure(nonIntExponent);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "pow");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
}
MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
{
- SpecializedThunkJIT jit(2);
+ SpecializedThunkJIT jit(vm, 2);
MacroAssembler::Jump nonIntArg0Jump;
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArg0Jump);
SpecializedThunkJIT::Label doneLoadingArg0(&jit);
@@ -782,8 +988,7 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg0Jump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
- jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
- jit.jump(doneLoadingArg0);
+ jit.appendFailure(jit.jump());
} else
jit.appendFailure(nonIntArg0Jump);
@@ -791,12 +996,27 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg1Jump.link(&jit);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
- jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
- jit.jump(doneLoadingArg1);
+ jit.appendFailure(jit.jump());
} else
jit.appendFailure(nonIntArg1Jump);
- return jit.finalize(*vm, vm->jitStubs->ctiNativeCall(vm), "imul");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
+}
+
+MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
+{
+ SpecializedThunkJIT jit(vm, 0);
+ if (!jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+
+#if USE(JSVALUE64)
+ jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
+#else
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+#endif
}
}
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.h b/Source/JavaScriptCore/jit/ThunkGenerators.h
index a4b0fc4b3..9fb8abb44 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.h
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.h
@@ -26,22 +26,32 @@
#ifndef ThunkGenerators_h
#define ThunkGenerators_h
+#include "CodeSpecializationKind.h"
#include "ThunkGenerator.h"
#if ENABLE(JIT)
namespace JSC {
-MacroAssemblerCodeRef linkCallGenerator(VM*);
-MacroAssemblerCodeRef linkConstructGenerator(VM*);
-MacroAssemblerCodeRef linkClosureCallGenerator(VM*);
-MacroAssemblerCodeRef virtualCallGenerator(VM*);
-MacroAssemblerCodeRef virtualConstructGenerator(VM*);
-MacroAssemblerCodeRef stringLengthTrampolineGenerator(VM*);
+class CallLinkInfo;
+class CCallHelpers;
+
+MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM*);
+
+MacroAssemblerCodeRef linkCallThunk(VM*, CallLinkInfo&, CodeSpecializationKind);
+MacroAssemblerCodeRef linkCallThunkGenerator(VM*);
+MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM*);
+
+MacroAssemblerCodeRef virtualThunkFor(VM*, CallLinkInfo&);
+
MacroAssemblerCodeRef nativeCallGenerator(VM*);
MacroAssemblerCodeRef nativeConstructGenerator(VM*);
+MacroAssemblerCodeRef nativeTailCallGenerator(VM*);
+MacroAssemblerCodeRef arityFixupGenerator(VM*);
+MacroAssemblerCodeRef unreachableGenerator(VM*);
MacroAssemblerCodeRef charCodeAtThunkGenerator(VM*);
MacroAssemblerCodeRef charAtThunkGenerator(VM*);
+MacroAssemblerCodeRef clz32ThunkGenerator(VM*);
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM*);
MacroAssemblerCodeRef absThunkGenerator(VM*);
MacroAssemblerCodeRef ceilThunkGenerator(VM*);
@@ -52,6 +62,7 @@ MacroAssemblerCodeRef roundThunkGenerator(VM*);
MacroAssemblerCodeRef sqrtThunkGenerator(VM*);
MacroAssemblerCodeRef powThunkGenerator(VM*);
MacroAssemblerCodeRef imulThunkGenerator(VM*);
+MacroAssemblerCodeRef randomThunkGenerator(VM*);
}
#endif // ENABLE(JIT)