summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/AssemblyHelpers.cpp630
-rw-r--r--Source/JavaScriptCore/jit/AssemblyHelpers.h1322
-rw-r--r--Source/JavaScriptCore/jit/BinarySwitch.cpp391
-rw-r--r--Source/JavaScriptCore/jit/BinarySwitch.h143
-rw-r--r--Source/JavaScriptCore/jit/CCallHelpers.cpp73
-rw-r--r--Source/JavaScriptCore/jit/CCallHelpers.h1119
-rw-r--r--Source/JavaScriptCore/jit/CachedRecovery.cpp71
-rw-r--r--Source/JavaScriptCore/jit/CachedRecovery.h134
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffleData.cpp68
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffleData.h52
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler.cpp776
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler.h804
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp305
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler64.cpp369
-rw-r--r--Source/JavaScriptCore/jit/CompactJITCodeMap.h41
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp73
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h47
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp463
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h113
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp194
-rw-r--r--Source/JavaScriptCore/jit/FPRInfo.h112
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp85
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h57
-rw-r--r--Source/JavaScriptCore/jit/GPRInfo.cpp51
-rw-r--r--Source/JavaScriptCore/jit/GPRInfo.h392
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.cpp1
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.h12
-rw-r--r--Source/JavaScriptCore/jit/ICStats.cpp128
-rw-r--r--Source/JavaScriptCore/jit/ICStats.h194
-rw-r--r--Source/JavaScriptCore/jit/IntrinsicEmitter.cpp136
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp655
-rw-r--r--Source/JavaScriptCore/jit/JIT.h462
-rw-r--r--Source/JavaScriptCore/jit/JITAddGenerator.cpp187
-rw-r--r--Source/JavaScriptCore/jit/JITAddGenerator.h79
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp1065
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp782
-rw-r--r--Source/JavaScriptCore/jit/JITBitAndGenerator.cpp85
-rw-r--r--Source/JavaScriptCore/jit/JITBitAndGenerator.h46
-rw-r--r--Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h68
-rw-r--r--Source/JavaScriptCore/jit/JITBitOrGenerator.cpp74
-rw-r--r--Source/JavaScriptCore/jit/JITBitOrGenerator.h46
-rw-r--r--Source/JavaScriptCore/jit/JITBitXorGenerator.cpp73
-rw-r--r--Source/JavaScriptCore/jit/JITBitXorGenerator.h46
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp297
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp319
-rw-r--r--Source/JavaScriptCore/jit/JITCode.cpp172
-rw-r--r--Source/JavaScriptCore/jit/JITCode.h82
-rw-r--r--Source/JavaScriptCore/jit/JITCompilationEffort.h8
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.cpp9
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.h35
-rw-r--r--Source/JavaScriptCore/jit/JITDivGenerator.cpp139
-rw-r--r--Source/JavaScriptCore/jit/JITDivGenerator.h82
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.cpp51
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.h17
-rw-r--r--Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp126
-rw-r--r--Source/JavaScriptCore/jit/JITInlineCacheGenerator.h59
-rw-r--r--Source/JavaScriptCore/jit/JITInlines.h619
-rw-r--r--Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp84
-rw-r--r--Source/JavaScriptCore/jit/JITLeftShiftGenerator.h46
-rw-r--r--Source/JavaScriptCore/jit/JITMathIC.h290
-rw-r--r--Source/JavaScriptCore/jit/JITMathICForwards.h46
-rw-r--r--Source/JavaScriptCore/jit/JITMathICInlineResult.h40
-rw-r--r--Source/JavaScriptCore/jit/JITMulGenerator.cpp254
-rw-r--r--Source/JavaScriptCore/jit/JITMulGenerator.h79
-rw-r--r--Source/JavaScriptCore/jit/JITNegGenerator.cpp127
-rw-r--r--Source/JavaScriptCore/jit/JITNegGenerator.h57
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp1283
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp946
-rw-r--r--Source/JavaScriptCore/jit/JITOperationWrappers.h413
-rw-r--r--Source/JavaScriptCore/jit/JITOperations.cpp2494
-rw-r--r--Source/JavaScriptCore/jit/JITOperations.h552
-rw-r--r--Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp46
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp1297
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp901
-rw-r--r--Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp140
-rw-r--r--Source/JavaScriptCore/jit/JITRightShiftGenerator.h60
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.cpp9
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.h48
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp54
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h59
-rw-r--r--Source/JavaScriptCore/jit/JITStubsARM.h302
-rw-r--r--Source/JavaScriptCore/jit/JITStubsARMv7.h351
-rw-r--r--Source/JavaScriptCore/jit/JITStubsMSVC64.asm44
-rw-r--r--Source/JavaScriptCore/jit/JITStubsX86.h649
-rw-r--r--Source/JavaScriptCore/jit/JITStubsX86Common.h148
-rw-r--r--Source/JavaScriptCore/jit/JITStubsX86_64.h218
-rw-r--r--Source/JavaScriptCore/jit/JITSubGenerator.cpp142
-rw-r--r--Source/JavaScriptCore/jit/JITSubGenerator.h76
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.cpp72
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.h72
-rw-r--r--Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp16
-rw-r--r--Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h15
-rw-r--r--Source/JavaScriptCore/jit/JITWorklist.cpp330
-rw-r--r--Source/JavaScriptCore/jit/JITWorklist.h83
-rw-r--r--Source/JavaScriptCore/jit/JITWriteBarrier.h147
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h56
-rw-r--r--Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp306
-rw-r--r--Source/JavaScriptCore/jit/PCToCodeOriginMap.h101
-rw-r--r--Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp137
-rw-r--r--Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h111
-rw-r--r--Source/JavaScriptCore/jit/Reg.cpp (renamed from Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp)39
-rw-r--r--Source/JavaScriptCore/jit/Reg.h248
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffset.cpp45
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffset.h77
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp72
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffsetList.h (renamed from Source/JavaScriptCore/jit/ClosureCallStubRoutine.h)58
-rw-r--r--Source/JavaScriptCore/jit/RegisterMap.h110
-rw-r--r--Source/JavaScriptCore/jit/RegisterSet.cpp301
-rw-r--r--Source/JavaScriptCore/jit/RegisterSet.h135
-rw-r--r--Source/JavaScriptCore/jit/Repatch.cpp2095
-rw-r--r--Source/JavaScriptCore/jit/Repatch.h52
-rw-r--r--Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp302
-rw-r--r--Source/JavaScriptCore/jit/ScratchRegisterAllocator.h198
-rw-r--r--Source/JavaScriptCore/jit/SetupVarargsFrame.cpp141
-rw-r--r--Source/JavaScriptCore/jit/SetupVarargsFrame.h43
-rw-r--r--Source/JavaScriptCore/jit/SlowPathCall.h9
-rw-r--r--Source/JavaScriptCore/jit/SnippetOperand.h104
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h51
-rw-r--r--Source/JavaScriptCore/jit/SpillRegistersMode.h32
-rw-r--r--Source/JavaScriptCore/jit/TagRegistersMode.cpp50
-rw-r--r--Source/JavaScriptCore/jit/TagRegistersMode.h42
-rw-r--r--Source/JavaScriptCore/jit/TempRegisterSet.cpp3
-rw-r--r--Source/JavaScriptCore/jit/TempRegisterSet.h27
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerator.h8
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp930
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.h33
-rw-r--r--Source/JavaScriptCore/jit/UnusedPointer.h5
127 files changed, 21464 insertions, 10486 deletions
diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
index ddf1d6359..783204792 100644
--- a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
+++ b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,6 +28,10 @@
#if ENABLE(JIT)
+#include "JITOperations.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+
namespace JSC {
ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin)
@@ -35,7 +39,7 @@ ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin)
if (!codeOrigin.inlineCallFrame)
return m_codeBlock->ownerExecutable();
- return codeOrigin.inlineCallFrame->executable.get();
+ return codeOrigin.inlineCallFrame->baselineCodeBlock->ownerExecutable();
}
Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock* codeBlock)
@@ -52,6 +56,117 @@ Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock*
return result.iterator->value;
}
+AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType(
+ JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode)
+{
+ AssemblyHelpers::JumpList result;
+
+ switch (descriptor.kind()) {
+ case InferredType::Bottom:
+ result.append(jump());
+ break;
+
+ case InferredType::Boolean:
+ result.append(branchIfNotBoolean(regs, tempGPR));
+ break;
+
+ case InferredType::Other:
+ result.append(branchIfNotOther(regs, tempGPR));
+ break;
+
+ case InferredType::Int32:
+ result.append(branchIfNotInt32(regs, mode));
+ break;
+
+ case InferredType::Number:
+ result.append(branchIfNotNumber(regs, tempGPR, mode));
+ break;
+
+ case InferredType::String:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotString(regs.payloadGPR()));
+ break;
+
+ case InferredType::Symbol:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotSymbol(regs.payloadGPR()));
+ break;
+
+ case InferredType::ObjectWithStructure:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(
+ branchStructure(
+ NotEqual,
+ Address(regs.payloadGPR(), JSCell::structureIDOffset()),
+ descriptor.structure()));
+ break;
+
+ case InferredType::ObjectWithStructureOrOther: {
+ Jump ok = branchIfOther(regs, tempGPR);
+ result.append(branchIfNotCell(regs, mode));
+ result.append(
+ branchStructure(
+ NotEqual,
+ Address(regs.payloadGPR(), JSCell::structureIDOffset()),
+ descriptor.structure()));
+ ok.link(this);
+ break;
+ }
+
+ case InferredType::Object:
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotObject(regs.payloadGPR()));
+ break;
+
+ case InferredType::ObjectOrOther: {
+ Jump ok = branchIfOther(regs, tempGPR);
+ result.append(branchIfNotCell(regs, mode));
+ result.append(branchIfNotObject(regs.payloadGPR()));
+ ok.link(this);
+ break;
+ }
+
+ case InferredType::Top:
+ break;
+ }
+
+ return result;
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR)
+{
+ return branch32(
+ Equal,
+ Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(FastTypedArray));
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR)
+{
+ return branch32(
+ NotEqual,
+ Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(FastTypedArray));
+}
+
+void AssemblyHelpers::incrementSuperSamplerCount()
+{
+ add32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<const void*>(&g_superSamplerCount)));
+}
+
+void AssemblyHelpers::decrementSuperSamplerCount()
+{
+ sub32(TrustedImm32(1), AbsoluteAddress(bitwise_cast<const void*>(&g_superSamplerCount)));
+}
+
+void AssemblyHelpers::purifyNaN(FPRReg fpr)
+{
+ MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr);
+ static const double NaN = PNaN;
+ loadDouble(TrustedImmPtr(&NaN), fpr);
+ notNaN.link(this);
+}
+
#if ENABLE(SAMPLING_FLAGS)
void AssemblyHelpers::setSamplingFlag(int32_t flag)
{
@@ -74,7 +189,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
{
#if CPU(X86_64)
Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu)));
- breakpoint();
+ abortWithReason(AHIsNotInt32);
checkInt32.link(this);
#else
UNUSED_PARAM(gpr);
@@ -84,14 +199,14 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
{
Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
- breakpoint();
+ abortWithReason(AHIsNotJSInt32);
checkJSInt32.link(this);
}
void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
{
Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
- breakpoint();
+ abortWithReason(AHIsNotJSNumber);
checkJSNumber.link(this);
}
@@ -100,16 +215,28 @@ void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
checkJSInt32.link(this);
- breakpoint();
+ abortWithReason(AHIsNotJSDouble);
checkJSNumber.link(this);
}
void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
{
Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
- breakpoint();
+ abortWithReason(AHIsNotCell);
checkCell.link(this);
}
+
+void AssemblyHelpers::jitAssertTagsInPlace()
+{
+ Jump ok = branch64(Equal, GPRInfo::tagTypeNumberRegister, TrustedImm64(TagTypeNumber));
+ abortWithReason(AHTagTypeNumberNotInPlace);
+ breakpoint();
+ ok.link(this);
+
+ ok = branch64(Equal, GPRInfo::tagMaskRegister, TrustedImm64(TagMask));
+ abortWithReason(AHTagMaskNotInPlace);
+ ok.link(this);
+}
#elif USE(JSVALUE32_64)
void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
{
@@ -119,7 +246,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
{
Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
- breakpoint();
+ abortWithReason(AHIsNotJSInt32);
checkJSInt32.link(this);
}
@@ -127,7 +254,7 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
{
Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
- breakpoint();
+ abortWithReason(AHIsNotJSNumber);
checkJSInt32.link(this);
checkJSDouble.link(this);
}
@@ -135,33 +262,510 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
{
Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
- breakpoint();
+ abortWithReason(AHIsNotJSDouble);
checkJSDouble.link(this);
}
void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
{
Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag));
- breakpoint();
+ abortWithReason(AHIsNotCell);
checkCell.link(this);
}
+
+void AssemblyHelpers::jitAssertTagsInPlace()
+{
+}
#endif // USE(JSVALUE32_64)
void AssemblyHelpers::jitAssertHasValidCallFrame()
{
Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7));
- breakpoint();
+ abortWithReason(AHCallFrameMisaligned);
checkCFR.link(this);
}
void AssemblyHelpers::jitAssertIsNull(GPRReg gpr)
{
Jump checkNull = branchTestPtr(Zero, gpr);
- breakpoint();
+ abortWithReason(AHIsNotNull);
checkNull.link(this);
}
+
+void AssemblyHelpers::jitAssertArgumentCountSane()
+{
+ Jump ok = branch32(Below, payloadFor(CallFrameSlot::argumentCount), TrustedImm32(10000000));
+ abortWithReason(AHInsaneArgumentCount);
+ ok.link(this);
+}
+
#endif // !ASSERT_DISABLED
+void AssemblyHelpers::jitReleaseAssertNoException()
+{
+ Jump noException;
+#if USE(JSVALUE64)
+ noException = branchTest64(Zero, AbsoluteAddress(vm()->addressOfException()));
+#elif USE(JSVALUE32_64)
+ noException = branch32(Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+#endif
+ abortWithReason(JITUncoughtExceptionAfterCall);
+ noException.link(this);
+}
+
+void AssemblyHelpers::callExceptionFuzz()
+{
+ if (!Options::useExceptionFuzz())
+ return;
+
+ EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
+
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ store64(GPRInfo::toRegister(i), buffer + i);
+#else
+ store32(GPRInfo::toRegister(i), buffer + i);
+#endif
+ }
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0));
+ }
+
+ // Set up one argument.
+#if CPU(X86)
+ poke(GPRInfo::callFrameRegister, 0);
+#else
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+#endif
+ move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
+ call(GPRInfo::nonPreservedNonReturnGPR);
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
+ loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i));
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+#if USE(JSVALUE64)
+ load64(buffer + i, GPRInfo::toRegister(i));
+#else
+ load32(buffer + i, GPRInfo::toRegister(i));
+#endif
+ }
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::emitJumpIfException()
+{
+ return emitExceptionCheck(NormalExceptionCheck);
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind, ExceptionJumpWidth width)
+{
+ callExceptionFuzz();
+
+ if (width == FarJumpWidth)
+ kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck);
+
+ Jump result;
+#if USE(JSVALUE64)
+ result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
+#elif USE(JSVALUE32_64)
+ result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+#endif
+
+ if (width == NormalJumpWidth)
+ return result;
+
+ PatchableJump realJump = patchableJump();
+ result.link(this);
+
+ return realJump.m_jump;
+}
+
+AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck()
+{
+ callExceptionFuzz();
+
+ Jump result;
+#if USE(JSVALUE64)
+ result = branchTest64(NonZero, AbsoluteAddress(vm()->addressOfException()));
+#elif USE(JSVALUE32_64)
+ result = branch32(NotEqual, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
+#endif
+
+ return result;
+}
+
+void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest)
+{
+ const Structure* structurePtr = static_cast<const Structure*>(structure.m_value);
+#if USE(JSVALUE64)
+ jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+ if (!ASSERT_DISABLED) {
+ Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id()));
+ jit.abortWithReason(AHStructureIDIsValid);
+ correctStructure.link(&jit);
+
+ Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()), TrustedImm32(structurePtr->indexingTypeIncludingHistory()));
+ jit.abortWithReason(AHIndexingTypeIsValid);
+ correctIndexingType.link(&jit);
+
+ Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type()));
+ jit.abortWithReason(AHTypeInfoIsValid);
+ correctType.link(&jit);
+
+ Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags()));
+ jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid);
+ correctFlags.link(&jit);
+ }
+#else
+ // Do a 32-bit wide store to initialize the cell's fields.
+ jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()));
+ jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+#endif
+}
+
+void AssemblyHelpers::loadProperty(GPRReg object, GPRReg offset, JSValueRegs result)
+{
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
+
+ loadPtr(Address(object, JSObject::butterflyOffset()), result.payloadGPR());
+ neg32(offset);
+ signExtend32ToPtr(offset, offset);
+ Jump ready = jump();
+
+ isInline.link(this);
+ addPtr(
+ TrustedImm32(
+ static_cast<int32_t>(sizeof(JSObject)) -
+ (static_cast<int32_t>(firstOutOfLineOffset) - 2) * static_cast<int32_t>(sizeof(EncodedJSValue))),
+ object, result.payloadGPR());
+
+ ready.link(this);
+
+ loadValue(
+ BaseIndex(
+ result.payloadGPR(), offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)),
+ result);
+}
+
+void AssemblyHelpers::emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch)
+{
+#if USE(JSVALUE64)
+ ASSERT(dest != scratch);
+ load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
+ loadPtr(vm()->heap.structureIDTable().base(), scratch);
+ loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
+#else
+ UNUSED_PARAM(scratch);
+ loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
+#endif
+}
+
+void AssemblyHelpers::makeSpaceOnStackForCCall()
+{
+ unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
+ if (stackOffset)
+ subPtr(TrustedImm32(stackOffset), stackPointerRegister);
+}
+
+void AssemblyHelpers::reclaimSpaceOnStackForCCall()
+{
+ unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
+ if (stackOffset)
+ addPtr(TrustedImm32(stackOffset), stackPointerRegister);
+}
+
+#if USE(JSVALUE64)
+template<typename LoadFromHigh, typename StoreToHigh, typename LoadFromLow, typename StoreToLow>
+void emitRandomThunkImpl(AssemblyHelpers& jit, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result, const LoadFromHigh& loadFromHigh, const StoreToHigh& storeToHigh, const LoadFromLow& loadFromLow, const StoreToLow& storeToLow)
+{
+ // Inlined WeakRandom::advance().
+ // uint64_t x = m_low;
+ loadFromLow(scratch0);
+ // uint64_t y = m_high;
+ loadFromHigh(scratch1);
+ // m_low = y;
+ storeToLow(scratch1);
+
+ // x ^= x << 23;
+ jit.move(scratch0, scratch2);
+ jit.lshift64(AssemblyHelpers::TrustedImm32(23), scratch2);
+ jit.xor64(scratch2, scratch0);
+
+ // x ^= x >> 17;
+ jit.move(scratch0, scratch2);
+ jit.rshift64(AssemblyHelpers::TrustedImm32(17), scratch2);
+ jit.xor64(scratch2, scratch0);
+
+ // x ^= y ^ (y >> 26);
+ jit.move(scratch1, scratch2);
+ jit.rshift64(AssemblyHelpers::TrustedImm32(26), scratch2);
+ jit.xor64(scratch1, scratch2);
+ jit.xor64(scratch2, scratch0);
+
+ // m_high = x;
+ storeToHigh(scratch0);
+
+ // return x + y;
+ jit.add64(scratch1, scratch0);
+
+ // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
+ jit.move(AssemblyHelpers::TrustedImm64((1ULL << 53) - 1), scratch1);
+ jit.and64(scratch1, scratch0);
+ // Now, scratch0 is always in range of int64_t. Safe to convert it to double with cvtsi2sdq.
+ jit.convertInt64ToDouble(scratch0, result);
+
+ // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
+ // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
+ static const double scale = 1.0 / (1ULL << 53);
+
+ // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
+ // It just reduces the exp part of the given 53bit double integer.
+ // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
+ // Now we get 53bit precision random double value in [0, 1).
+ jit.move(AssemblyHelpers::TrustedImmPtr(&scale), scratch1);
+ jit.mulDouble(AssemblyHelpers::Address(scratch1), result);
+}
+
+void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result)
+{
+ void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
+ void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
+
+ auto loadFromHigh = [&](GPRReg high) {
+ load64(highAddress, high);
+ };
+ auto storeToHigh = [&](GPRReg high) {
+ store64(high, highAddress);
+ };
+ auto loadFromLow = [&](GPRReg low) {
+ load64(lowAddress, low);
+ };
+ auto storeToLow = [&](GPRReg low) {
+ store64(low, lowAddress);
+ };
+
+ emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
+}
+
+void AssemblyHelpers::emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result)
+{
+ emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, scratch3);
+ emitLoadStructure(scratch3, scratch3, scratch0);
+ loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3);
+ // Now, scratch3 holds JSGlobalObject*.
+
+ auto loadFromHigh = [&](GPRReg high) {
+ load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()), high);
+ };
+ auto storeToHigh = [&](GPRReg high) {
+ store64(high, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()));
+ };
+ auto loadFromLow = [&](GPRReg low) {
+ load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()), low);
+ };
+ auto storeToLow = [&](GPRReg low) {
+ store64(low, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()));
+ };
+
+ emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
+}
+#endif
+
+void AssemblyHelpers::restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer()
+{
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+ RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+ RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ GPRReg scratch = InvalidGPRReg;
+ unsigned scratchGPREntryIndex = 0;
+
+ // Use the first GPR entry's register as our scratch.
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = allCalleeSaves->at(i);
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ if (entry.reg().isGPR()) {
+ scratchGPREntryIndex = i;
+ scratch = entry.reg().gpr();
+ break;
+ }
+ }
+ ASSERT(scratch != InvalidGPRReg);
+
+ loadPtr(&m_vm->topVMEntryFrame, scratch);
+ addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), scratch);
+
+ // Restore all callee saves except for the scratch.
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = allCalleeSaves->at(i);
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ if (entry.reg().isGPR()) {
+ if (i != scratchGPREntryIndex)
+ loadPtr(Address(scratch, entry.offset()), entry.reg().gpr());
+ } else
+ loadDouble(Address(scratch, entry.offset()), entry.reg().fpr());
+ }
+
+ // Restore the callee save value of the scratch.
+ RegisterAtOffset entry = allCalleeSaves->at(scratchGPREntryIndex);
+ ASSERT(!dontRestoreRegisters.get(entry.reg()));
+ ASSERT(entry.reg().isGPR());
+ ASSERT(scratch == entry.reg().gpr());
+ loadPtr(Address(scratch, entry.offset()), scratch);
+#endif
+}
+
+void AssemblyHelpers::emitDumbVirtualCall(CallLinkInfo* info)
+{
+ move(TrustedImmPtr(info), GPRInfo::regT2);
+ Call call = nearCall();
+ addLinkTask(
+ [=] (LinkBuffer& linkBuffer) {
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(&linkBuffer.vm(), *info);
+ info->setSlowStub(createJITStubRoutine(virtualThunk, linkBuffer.vm(), nullptr, true));
+ linkBuffer.link(call, CodeLocationLabel(virtualThunk.code()));
+ });
+}
+
+#if USE(JSVALUE64)
+void AssemblyHelpers::wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch)
+{
+ GPRReg input = inputAndResult;
+ // key += ~(key << 32);
+ move(input, scratch);
+ lshift64(TrustedImm32(32), scratch);
+ not64(scratch);
+ add64(scratch, input);
+ // key ^= (key >> 22);
+ move(input, scratch);
+ urshift64(TrustedImm32(22), scratch);
+ xor64(scratch, input);
+ // key += ~(key << 13);
+ move(input, scratch);
+ lshift64(TrustedImm32(13), scratch);
+ not64(scratch);
+ add64(scratch, input);
+ // key ^= (key >> 8);
+ move(input, scratch);
+ urshift64(TrustedImm32(8), scratch);
+ xor64(scratch, input);
+ // key += (key << 3);
+ move(input, scratch);
+ lshift64(TrustedImm32(3), scratch);
+ add64(scratch, input);
+ // key ^= (key >> 15);
+ move(input, scratch);
+ urshift64(TrustedImm32(15), scratch);
+ xor64(scratch, input);
+ // key += ~(key << 27);
+ move(input, scratch);
+ lshift64(TrustedImm32(27), scratch);
+ not64(scratch);
+ add64(scratch, input);
+ // key ^= (key >> 31);
+ move(input, scratch);
+ urshift64(TrustedImm32(31), scratch);
+ xor64(scratch, input);
+
+ // return static_cast<unsigned>(result)
+ void* mask = bitwise_cast<void*>(static_cast<uintptr_t>(UINT_MAX));
+ and64(TrustedImmPtr(mask), inputAndResult);
+}
+#endif // USE(JSVALUE64)
+
+void AssemblyHelpers::emitConvertValueToBoolean(JSValueRegs value, GPRReg result, GPRReg scratch, FPRReg valueAsFPR, FPRReg tempFPR, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject* globalObject, bool negateResult)
+{
+ // Implements the following control flow structure:
+ // if (value is boolean) {
+ // result = value === true
+ // } else if (value is integer) {
+ // result = value !== 0
+ // } else if (value is double) {
+ // result = value !== 0.0 && !isNaN(value);
+ // } else if (value is cell) {
+ // if (value is string) {
+ // result = value.length() !== 0;
+ // } else {
+ // do crazy things for masquerades as undefined
+ // }
+ // } else {
+ // result = false;
+ // }
+ //
+ // if (negateResult)
+ // result = !result;
+
+ JumpList done;
+ auto notBoolean = branchIfNotBoolean(value, result);
+#if USE(JSVALUE64)
+ compare32(negateResult ? NotEqual : Equal, value.gpr(), TrustedImm32(ValueTrue), result);
+#else
+ compare32(negateResult ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result);
+#endif
+ done.append(jump());
+
+ notBoolean.link(this);
+#if USE(JSVALUE64)
+ auto isNotNumber = branchIfNotNumber(value.gpr());
+#else
+ ASSERT(scratch != InvalidGPRReg);
+ auto isNotNumber = branchIfNotNumber(value, scratch);
+#endif
+ auto isDouble = branchIfNotInt32(value);
+
+ // It's an int32.
+ compare32(negateResult ? Equal : NotEqual, value.payloadGPR(), TrustedImm32(0), result);
+ done.append(jump());
+
+ isDouble.link(this);
+#if USE(JSVALUE64)
+ unboxDouble(value.gpr(), result, valueAsFPR);
+#else
+ unboxDouble(value, valueAsFPR, tempFPR);
+#endif
+ auto isZeroOrNaN = branchDoubleZeroOrNaN(valueAsFPR, tempFPR);
+ move(negateResult ? TrustedImm32(0) : TrustedImm32(1), result);
+ done.append(jump());
+ isZeroOrNaN.link(this);
+ move(negateResult ? TrustedImm32(1) : TrustedImm32(0), result);
+ done.append(jump());
+
+ isNotNumber.link(this);
+ auto isNotCellAndIsNotNumberAndIsNotBoolean = branchIfNotCell(value);
+ auto isCellButNotString = branch8(NotEqual,
+ Address(value.payloadGPR(), JSCell::typeInfoTypeOffset()), TrustedImm32(StringType));
+ load32(Address(value.payloadGPR(), JSString::offsetOfLength()), result);
+ compare32(negateResult ? Equal : NotEqual, result, TrustedImm32(0), result);
+ done.append(jump());
+
+ isCellButNotString.link(this);
+ if (shouldCheckMasqueradesAsUndefined) {
+ ASSERT(scratch != InvalidGPRReg);
+ JumpList isNotMasqueradesAsUndefined;
+ isNotMasqueradesAsUndefined.append(branchTest8(Zero, Address(value.payloadGPR(), JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)));
+ emitLoadStructure(value.payloadGPR(), result, scratch);
+ move(TrustedImmPtr(globalObject), scratch);
+ isNotMasqueradesAsUndefined.append(branchPtr(NotEqual, Address(result, Structure::globalObjectOffset()), scratch));
+ // We act like we are "undefined" here.
+ move(negateResult ? TrustedImm32(1) : TrustedImm32(0), result);
+ done.append(jump());
+ isNotMasqueradesAsUndefined.link(this);
+ }
+ move(negateResult ? TrustedImm32(0) : TrustedImm32(1), result);
+ done.append(jump());
+
+ // null or undefined.
+ isNotCellAndIsNotNumberAndIsNotBoolean.link(this);
+ move(negateResult ? TrustedImm32(1) : TrustedImm32(0), result);
+
+ done.link(this);
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.h b/Source/JavaScriptCore/jit/AssemblyHelpers.h
index 36d583139..49c7f9c87 100644
--- a/Source/JavaScriptCore/jit/AssemblyHelpers.h
+++ b/Source/JavaScriptCore/jit/AssemblyHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,18 +23,23 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef AssemblyHelpers_h
-#define AssemblyHelpers_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
#include "CodeBlock.h"
#include "FPRInfo.h"
#include "GPRInfo.h"
+#include "Heap.h"
+#include "InlineCallFrame.h"
#include "JITCode.h"
#include "MacroAssembler.h"
+#include "MarkedSpace.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "RegisterAtOffsetList.h"
+#include "RegisterSet.h"
+#include "SuperSampler.h"
+#include "TypeofType.h"
#include "VM.h"
namespace JSC {
@@ -54,12 +59,395 @@ public:
ASSERT(m_baselineCodeBlock->jitType() == JITCode::None || JITCode::isBaselineCode(m_baselineCodeBlock->jitType()));
}
}
-
+
CodeBlock* codeBlock() { return m_codeBlock; }
VM* vm() { return m_vm; }
AssemblerType_T& assembler() { return m_assembler; }
+
+ void checkStackPointerAlignment()
+ {
+ // This check is both unneeded and harder to write correctly for ARM64
+#if !defined(NDEBUG) && !CPU(ARM64)
+ Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf));
+ abortWithReason(AHStackPointerMisaligned);
+ stackPointerAligned.link(this);
+#endif
+ }
+
+ template<typename T>
+ void storeCell(T cell, Address address)
+ {
+#if USE(JSVALUE64)
+ store64(cell, address);
+#else
+ store32(cell, address.withOffset(PayloadOffset));
+ store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset));
+#endif
+ }
+
+ void loadCell(Address address, GPRReg gpr)
+ {
+#if USE(JSVALUE64)
+ load64(address, gpr);
+#else
+ load32(address.withOffset(PayloadOffset), gpr);
+#endif
+ }
+
+ void storeValue(JSValueRegs regs, Address address)
+ {
+#if USE(JSVALUE64)
+ store64(regs.gpr(), address);
+#else
+ store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
+ store32(regs.tagGPR(), address.withOffset(TagOffset));
+#endif
+ }
+
+ void storeValue(JSValueRegs regs, BaseIndex address)
+ {
+#if USE(JSVALUE64)
+ store64(regs.gpr(), address);
+#else
+ store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
+ store32(regs.tagGPR(), address.withOffset(TagOffset));
+#endif
+ }
+
+ void storeValue(JSValueRegs regs, void* address)
+ {
+#if USE(JSVALUE64)
+ store64(regs.gpr(), address);
+#else
+ store32(regs.payloadGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset));
+ store32(regs.tagGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset));
+#endif
+ }
+
+ void loadValue(Address address, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ load64(address, regs.gpr());
+#else
+ if (address.base == regs.payloadGPR()) {
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ } else {
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ }
+#endif
+ }
+
+ void loadValue(BaseIndex address, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ load64(address, regs.gpr());
+#else
+ if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) {
+ // We actually could handle the case where the registers are aliased to both
+ // tag and payload, but we don't for now.
+ RELEASE_ASSERT(address.base != regs.tagGPR());
+ RELEASE_ASSERT(address.index != regs.tagGPR());
+
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ } else {
+ load32(address.withOffset(PayloadOffset), regs.payloadGPR());
+ load32(address.withOffset(TagOffset), regs.tagGPR());
+ }
+#endif
+ }
+
+ void loadValue(void* address, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ load64(address, regs.gpr());
+#else
+ load32(bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset), regs.payloadGPR());
+ load32(bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset), regs.tagGPR());
+#endif
+ }
+ // Note that this clobbers offset.
+ void loadProperty(GPRReg object, GPRReg offset, JSValueRegs result);
+
+ void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs)
+ {
+#if USE(JSVALUE32_64)
+ if (destRegs.tagGPR() == srcRegs.payloadGPR()) {
+ if (destRegs.payloadGPR() == srcRegs.tagGPR()) {
+ swap(srcRegs.payloadGPR(), srcRegs.tagGPR());
+ return;
+ }
+ move(srcRegs.payloadGPR(), destRegs.payloadGPR());
+ move(srcRegs.tagGPR(), destRegs.tagGPR());
+ return;
+ }
+ move(srcRegs.tagGPR(), destRegs.tagGPR());
+ move(srcRegs.payloadGPR(), destRegs.payloadGPR());
+#else
+ move(srcRegs.gpr(), destRegs.gpr());
+#endif
+ }
+
+ void moveValue(JSValue value, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ move(Imm64(JSValue::encode(value)), regs.gpr());
+#else
+ move(Imm32(value.tag()), regs.tagGPR());
+ move(Imm32(value.payload()), regs.payloadGPR());
+#endif
+ }
+
+ void moveTrustedValue(JSValue value, JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ move(TrustedImm64(JSValue::encode(value)), regs.gpr());
+#else
+ move(TrustedImm32(value.tag()), regs.tagGPR());
+ move(TrustedImm32(value.payload()), regs.payloadGPR());
+#endif
+ }
+
+ void storeTrustedValue(JSValue value, Address address)
+ {
+#if USE(JSVALUE64)
+ store64(TrustedImm64(JSValue::encode(value)), address);
+#else
+ store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
+ store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
+#endif
+ }
+
+ void storeTrustedValue(JSValue value, BaseIndex address)
+ {
+#if USE(JSVALUE64)
+ store64(TrustedImm64(JSValue::encode(value)), address);
+#else
+ store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
+ store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
+#endif
+ }
+
+ void emitSaveCalleeSavesFor(CodeBlock* codeBlock)
+ {
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontSaveRegisters.get(entry.reg()))
+ continue;
+ storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset()));
+ }
+ }
+
+ enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame };
+
+ void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp)
+ {
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+#if USE(JSVALUE64)
+ RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
+#endif
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontSaveRegisters.get(entry.reg()))
+ continue;
+
+ GPRReg registerToWrite;
+
+#if USE(JSVALUE32_64)
+ UNUSED_PARAM(tagRegisterMode);
+ UNUSED_PARAM(temp);
+#else
+ if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) {
+ registerToWrite = temp;
+ loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite);
+ } else
+#endif
+ registerToWrite = entry.reg().gpr();
+
+ storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset()));
+ }
+ }
+
+ void emitRestoreCalleeSavesFor(CodeBlock* codeBlock)
+ {
+ ASSERT(codeBlock);
+
+ RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
+ RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
+ unsigned registerCount = calleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = calleeSaves->at(i);
+ if (dontRestoreRegisters.get(entry.reg()))
+ continue;
+ loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr());
+ }
+ }
+
+ void emitSaveCalleeSaves()
+ {
+ emitSaveCalleeSavesFor(codeBlock());
+ }
+
+ void emitSaveThenMaterializeTagRegisters()
+ {
+#if USE(JSVALUE64)
+#if CPU(ARM64)
+ pushPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#else
+ push(GPRInfo::tagTypeNumberRegister);
+ push(GPRInfo::tagMaskRegister);
+#endif
+ emitMaterializeTagCheckRegisters();
+#endif
+ }
+
+ void emitRestoreCalleeSaves()
+ {
+ emitRestoreCalleeSavesFor(codeBlock());
+ }
+
+ void emitRestoreSavedTagRegisters()
+ {
+#if USE(JSVALUE64)
+#if CPU(ARM64)
+ popPair(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#else
+ pop(GPRInfo::tagMaskRegister);
+ pop(GPRInfo::tagTypeNumberRegister);
+#endif
+#endif
+ }
+
+ void copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+ {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+ GPRReg temp1 = usedRegisters.getFreeGPR(0);
+
+ loadPtr(&m_vm->topVMEntryFrame, temp1);
+ addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1);
+
+ RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+ RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset entry = allCalleeSaves->at(i);
+ if (dontCopyRegisters.get(entry.reg()))
+ continue;
+ if (entry.reg().isGPR())
+ storePtr(entry.reg().gpr(), Address(temp1, entry.offset()));
+ else
+ storeDouble(entry.reg().fpr(), Address(temp1, entry.offset()));
+ }
+#else
+ UNUSED_PARAM(usedRegisters);
+#endif
+ }
+
+ void restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+
+ void copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
+ {
+#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
+ GPRReg temp1 = usedRegisters.getFreeGPR(0);
+ GPRReg temp2 = usedRegisters.getFreeGPR(1);
+ FPRReg fpTemp = usedRegisters.getFreeFPR();
+ ASSERT(temp2 != InvalidGPRReg);
+
+ ASSERT(codeBlock());
+
+ // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
+ loadPtr(&m_vm->topVMEntryFrame, temp1);
+ addPtr(TrustedImm32(VMEntryFrame::calleeSaveRegistersBufferOffset()), temp1);
+
+ RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
+ RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters();
+ RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
+ unsigned registerCount = allCalleeSaves->size();
+
+ for (unsigned i = 0; i < registerCount; i++) {
+ RegisterAtOffset vmEntry = allCalleeSaves->at(i);
+ if (dontCopyRegisters.get(vmEntry.reg()))
+ continue;
+ RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(vmEntry.reg());
+
+ if (vmEntry.reg().isGPR()) {
+ GPRReg regToStore;
+ if (currentFrameEntry) {
+ // Load calleeSave from stack into temp register
+ regToStore = temp2;
+ loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore);
+ } else
+ // Just store callee save directly
+ regToStore = vmEntry.reg().gpr();
+
+ storePtr(regToStore, Address(temp1, vmEntry.offset()));
+ } else {
+ FPRReg fpRegToStore;
+ if (currentFrameEntry) {
+ // Load calleeSave from stack into temp register
+ fpRegToStore = fpTemp;
+ loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore);
+ } else
+ // Just store callee save directly
+ fpRegToStore = vmEntry.reg().fpr();
+
+ storeDouble(fpRegToStore, Address(temp1, vmEntry.offset()));
+ }
+ }
+#else
+ UNUSED_PARAM(usedRegisters);
+#endif
+ }
+
+ void emitMaterializeTagCheckRegisters()
+ {
+#if USE(JSVALUE64)
+ move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
+ orPtr(MacroAssembler::TrustedImm32(TagBitTypeOther), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#endif
+ }
+
#if CPU(X86_64) || CPU(X86)
+ static size_t prologueStackPointerDelta()
+ {
+ // Prologue only saves the framePointerRegister
+ return sizeof(void*);
+ }
+
+ void emitFunctionPrologue()
+ {
+ push(framePointerRegister);
+ move(stackPointerRegister, framePointerRegister);
+ }
+
+ void emitFunctionEpilogueWithEmptyFrame()
+ {
+ pop(framePointerRegister);
+ }
+
+ void emitFunctionEpilogue()
+ {
+ move(framePointerRegister, stackPointerRegister);
+ pop(framePointerRegister);
+ }
+
void preserveReturnAddressAfterCall(GPRReg reg)
{
pop(reg);
@@ -77,6 +465,29 @@ public:
#endif // CPU(X86_64) || CPU(X86)
#if CPU(ARM) || CPU(ARM64)
+ static size_t prologueStackPointerDelta()
+ {
+ // Prologue saves the framePointerRegister and linkRegister
+ return 2 * sizeof(void*);
+ }
+
+ void emitFunctionPrologue()
+ {
+ pushPair(framePointerRegister, linkRegister);
+ move(stackPointerRegister, framePointerRegister);
+ }
+
+ void emitFunctionEpilogueWithEmptyFrame()
+ {
+ popPair(framePointerRegister, linkRegister);
+ }
+
+ void emitFunctionEpilogue()
+ {
+ move(framePointerRegister, stackPointerRegister);
+ emitFunctionEpilogueWithEmptyFrame();
+ }
+
ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
{
move(linkRegister, reg);
@@ -94,49 +505,65 @@ public:
#endif
#if CPU(MIPS)
- ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
+ static size_t prologueStackPointerDelta()
{
- move(returnAddressRegister, reg);
+ // Prologue saves the framePointerRegister and returnAddressRegister
+ return 2 * sizeof(void*);
}
- ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
+ void emitFunctionPrologue()
{
- move(reg, returnAddressRegister);
+ pushPair(framePointerRegister, returnAddressRegister);
+ move(stackPointerRegister, framePointerRegister);
}
- ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
+ void emitFunctionEpilogueWithEmptyFrame()
{
- loadPtr(address, returnAddressRegister);
+ popPair(framePointerRegister, returnAddressRegister);
+ }
+
+ void emitFunctionEpilogue()
+ {
+ move(framePointerRegister, stackPointerRegister);
+ emitFunctionEpilogueWithEmptyFrame();
}
-#endif
-#if CPU(SH4)
ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
{
- m_assembler.stspr(reg);
+ move(returnAddressRegister, reg);
}
ALWAYS_INLINE void restoreReturnAddressBeforeReturn(RegisterID reg)
{
- m_assembler.ldspr(reg);
+ move(reg, returnAddressRegister);
}
ALWAYS_INLINE void restoreReturnAddressBeforeReturn(Address address)
{
- loadPtrLinkReg(address);
+ loadPtr(address, returnAddressRegister);
}
#endif
- void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to)
+ void emitGetFromCallFrameHeaderPtr(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+ {
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+ }
+ void emitGetFromCallFrameHeader32(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
{
- loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
+ load32(Address(from, entry * sizeof(Register)), to);
}
- void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
+#if USE(JSVALUE64)
+ void emitGetFromCallFrameHeader64(int entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+ {
+ load64(Address(from, entry * sizeof(Register)), to);
+ }
+#endif // USE(JSVALUE64)
+ void emitPutToCallFrameHeader(GPRReg from, int entry)
{
storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
}
- void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+ void emitPutToCallFrameHeader(void* value, int entry)
{
storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
}
@@ -150,10 +577,6 @@ public:
storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()));
}
- void emitGetReturnPCFromCallFrameHeaderPtr(RegisterID to)
- {
- loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), to);
- }
void emitPutReturnPCToCallFrameHeader(RegisterID from)
{
storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
@@ -163,15 +586,291 @@ public:
storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
}
- Jump branchIfNotCell(GPRReg reg)
+ // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
+ // fields before the code from emitFunctionPrologue() has executed.
+ // First, the access is via the stack pointer. Second, the address calculation must also take
+ // into account that the stack pointer may not have been adjusted down for the return PC and/or
+ // caller's frame pointer. On some platforms, the callee is responsible for pushing the
+ // "link register" containing the return address in the function prologue.
+#if USE(JSVALUE64)
+ void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, int entry)
+ {
+ storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta()));
+ }
+#else
+ void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, int entry)
+ {
+ storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ }
+
+ void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, int entry)
+ {
+ storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+ JumpList branchIfNotEqual(JSValueRegs regs, JSValue value)
+ {
+#if USE(JSVALUE64)
+ return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value)));
+#else
+ JumpList result;
+ result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag())));
+ if (value.isEmpty() || value.isUndefinedOrNull())
+ return result; // These don't have anything interesting in the payload.
+ result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())));
+ return result;
+#endif
+ }
+
+ Jump branchIfEqual(JSValueRegs regs, JSValue value)
+ {
+#if USE(JSVALUE64)
+ return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value)));
+#else
+ Jump notEqual;
+ // These don't have anything interesting in the payload.
+ if (!value.isEmpty() && !value.isUndefinedOrNull())
+ notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()));
+ Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag()));
+ if (notEqual.isSet())
+ notEqual.link(this);
+ return result;
+#endif
+ }
+
+ Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
{
#if USE(JSVALUE64)
- return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
+ if (mode == HaveTagRegisters)
+ return branchTest64(NonZero, reg, GPRInfo::tagMaskRegister);
+ return branchTest64(NonZero, reg, TrustedImm64(TagMask));
#else
+ UNUSED_PARAM(mode);
return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
#endif
}
+ Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ return branchIfNotCell(regs.gpr(), mode);
+#else
+ return branchIfNotCell(regs.tagGPR(), mode);
+#endif
+ }
+
+ Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == HaveTagRegisters)
+ return branchTest64(Zero, reg, GPRInfo::tagMaskRegister);
+ return branchTest64(Zero, reg, TrustedImm64(TagMask));
+#else
+ UNUSED_PARAM(mode);
+ return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag));
+#endif
+ }
+ Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ return branchIfCell(regs.gpr(), mode);
+#else
+ return branchIfCell(regs.tagGPR(), mode);
+#endif
+ }
+
+ Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ and64(TrustedImm32(~TagBitUndefined), tempGPR);
+ return branch64(Equal, tempGPR, TrustedImm64(ValueNull));
+#else
+ or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag));
+#endif
+ }
+
+ Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ and64(TrustedImm32(~TagBitUndefined), tempGPR);
+ return branch64(NotEqual, tempGPR, TrustedImm64(ValueNull));
+#else
+ or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag));
+#endif
+ }
+
+ Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == HaveTagRegisters)
+ return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+ return branch64(AboveOrEqual, regs.gpr(), TrustedImm64(TagTypeNumber));
+#else
+ UNUSED_PARAM(mode);
+ return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
+#endif
+ }
+
+#if USE(JSVALUE64)
+ Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
+ {
+ if (mode == HaveTagRegisters)
+ return branch64(Below, gpr, GPRInfo::tagTypeNumberRegister);
+ return branch64(Below, gpr, TrustedImm64(TagTypeNumber));
+ }
+#endif
+
+ Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ return branchIfNotInt32(regs.gpr(), mode);
+#else
+ UNUSED_PARAM(mode);
+ return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
+#endif
+ }
+
+ // Note that the tempGPR is not used in 64-bit mode.
+ Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ UNUSED_PARAM(tempGPR);
+ return branchIfNumber(regs.gpr(), mode);
+#else
+ UNUSED_PARAM(mode);
+ add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
+#endif
+ }
+
+#if USE(JSVALUE64)
+ Jump branchIfNumber(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
+ {
+ if (mode == HaveTagRegisters)
+ return branchTest64(NonZero, reg, GPRInfo::tagTypeNumberRegister);
+ return branchTest64(NonZero, reg, TrustedImm64(TagTypeNumber));
+ }
+#endif
+
+ // Note that the tempGPR is not used in 64-bit mode.
+ Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ UNUSED_PARAM(tempGPR);
+ return branchIfNotNumber(regs.gpr(), mode);
+#else
+ UNUSED_PARAM(mode);
+ add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
+ return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
+#endif
+ }
+
+#if USE(JSVALUE64)
+ Jump branchIfNotNumber(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
+ {
+ if (mode == HaveTagRegisters)
+ return branchTest64(Zero, reg, GPRInfo::tagTypeNumberRegister);
+ return branchTest64(Zero, reg, TrustedImm64(TagTypeNumber));
+ }
+#endif
+
+ Jump branchIfNotDoubleKnownNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == HaveTagRegisters)
+ return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
+ return branchTest64(Zero, regs.gpr(), TrustedImm64(TagTypeNumber));
+#else
+ UNUSED_PARAM(mode);
+ return branch32(AboveOrEqual, regs.tagGPR(), TrustedImm32(JSValue::LowestTag));
+#endif
+ }
+
+ // Note that the tempGPR is not used in 32-bit mode.
+ Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
+ return branchTest64(Zero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
+#else
+ UNUSED_PARAM(tempGPR);
+ return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag));
+#endif
+ }
+
+ // Note that the tempGPR is not used in 32-bit mode.
+ Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR)
+ {
+#if USE(JSVALUE64)
+ move(regs.gpr(), tempGPR);
+ xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
+ return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
+#else
+ UNUSED_PARAM(tempGPR);
+ return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag));
+#endif
+ }
+
+ Jump branchIfObject(GPRReg cellGPR)
+ {
+ return branch8(
+ AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ }
+
+ Jump branchIfNotObject(GPRReg cellGPR)
+ {
+ return branch8(
+ Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ }
+ Jump branchIfType(GPRReg cellGPR, JSType type)
+ {
+ return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
+ }
+
+ Jump branchIfNotType(GPRReg cellGPR, JSType type)
+ {
+ return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
+ }
+
+ Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); }
+ Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); }
+ Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); }
+ Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); }
+ Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); }
+ Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); }
+
+ Jump branchIfEmpty(JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ return branchTest64(Zero, regs.gpr());
+#else
+ return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::EmptyValueTag));
+#endif
+ }
+
+ JumpList branchIfNotType(
+ JSValueRegs, GPRReg tempGPR, const InferredType::Descriptor&,
+ TagRegistersMode = HaveTagRegisters);
+
+ template<typename T>
+ Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure)
+ {
+#if USE(JSVALUE64)
+ return branch32(condition, leftHandSide, TrustedImm32(structure->id()));
+#else
+ return branchPtr(condition, leftHandSide, TrustedImmPtr(structure));
+#endif
+ }
+
+ Jump branchIfFastTypedArray(GPRReg baseGPR);
+ Jump branchIfNotFastTypedArray(GPRReg baseGPR);
+
static Address addressForByteOffset(ptrdiff_t byteOffset)
{
return Address(GPRInfo::callFrameRegister, byteOffset);
@@ -183,6 +882,10 @@ public:
}
static Address addressFor(VirtualRegister virtualRegister)
{
+ // NB. It's tempting on some architectures to sometimes use an offset from the stack
+ // register because for some offsets that will encode to a smaller instruction. But we
+ // cannot do this. We use this in places where the stack pointer has been moved to some
+ // unpredictable location.
ASSERT(virtualRegister.isValid());
return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register));
}
@@ -194,7 +897,7 @@ public:
static Address tagFor(VirtualRegister virtualRegister)
{
ASSERT(virtualRegister.isValid());
- return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset);
}
static Address tagFor(int operand)
{
@@ -204,33 +907,69 @@ public:
static Address payloadFor(VirtualRegister virtualRegister)
{
ASSERT(virtualRegister.isValid());
- return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset);
}
static Address payloadFor(int operand)
{
return payloadFor(static_cast<VirtualRegister>(operand));
}
- Jump branchIfNotObject(GPRReg structureReg)
+ // Access to our fixed callee CallFrame.
+ static Address calleeFrameSlot(int slot)
+ {
+ ASSERT(slot >= CallerFrameAndPC::sizeInRegisters);
+ return Address(stackPointerRegister, sizeof(Register) * (slot - CallerFrameAndPC::sizeInRegisters));
+ }
+
+ // Access to our fixed callee CallFrame.
+ static Address calleeArgumentSlot(int argument)
+ {
+ return calleeFrameSlot(virtualRegisterForArgument(argument).offset());
+ }
+
+ static Address calleeFrameTagSlot(int slot)
+ {
+ return calleeFrameSlot(slot).withOffset(TagOffset);
+ }
+
+ static Address calleeFramePayloadSlot(int slot)
+ {
+ return calleeFrameSlot(slot).withOffset(PayloadOffset);
+ }
+
+ static Address calleeArgumentTagSlot(int argument)
{
- return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ return calleeArgumentSlot(argument).withOffset(TagOffset);
}
- static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
+ static Address calleeArgumentPayloadSlot(int argument)
{
- if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
+ return calleeArgumentSlot(argument).withOffset(PayloadOffset);
+ }
+
+ static Address calleeFrameCallerFrame()
+ {
+ return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset());
+ }
+
+ static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg, GPRReg preserve5 = InvalidGPRReg)
+ {
+ if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0 && preserve5 != GPRInfo::regT0)
return GPRInfo::regT0;
- if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
+ if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1 && preserve5 != GPRInfo::regT1)
return GPRInfo::regT1;
- if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
+ if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2 && preserve5 != GPRInfo::regT2)
return GPRInfo::regT2;
- if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
+ if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3 && preserve5 != GPRInfo::regT3)
return GPRInfo::regT3;
- return GPRInfo::regT4;
+ if (preserve1 != GPRInfo::regT4 && preserve2 != GPRInfo::regT4 && preserve3 != GPRInfo::regT4 && preserve4 != GPRInfo::regT4 && preserve5 != GPRInfo::regT4)
+ return GPRInfo::regT4;
+
+ return GPRInfo::regT5;
}
// Add a debug call. This call has no effect on JIT code execution state.
@@ -257,7 +996,7 @@ public:
move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0);
storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0);
-#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS) || CPU(SH4)
+#if CPU(X86_64) || CPU(ARM) || CPU(ARM64) || CPU(MIPS)
move(TrustedImmPtr(buffer), GPRInfo::argumentGPR2);
move(TrustedImmPtr(argument), GPRInfo::argumentGPR1);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
@@ -298,6 +1037,8 @@ public:
void jitAssertIsCell(GPRReg);
void jitAssertHasValidCallFrame();
void jitAssertIsNull(GPRReg);
+ void jitAssertTagsInPlace();
+ void jitAssertArgumentCountSane();
#else
void jitAssertIsInt32(GPRReg) { }
void jitAssertIsJSInt32(GPRReg) { }
@@ -306,8 +1047,17 @@ public:
void jitAssertIsCell(GPRReg) { }
void jitAssertHasValidCallFrame() { }
void jitAssertIsNull(GPRReg) { }
+ void jitAssertTagsInPlace() { }
+ void jitAssertArgumentCountSane() { }
#endif
+ void jitReleaseAssertNoException();
+
+ void incrementSuperSamplerCount();
+ void decrementSuperSamplerCount();
+
+ void purifyNaN(FPRReg);
+
// These methods convert between doubles, and doubles boxed and JSValues.
#if USE(JSVALUE64)
GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
@@ -317,14 +1067,28 @@ public:
jitAssertIsJSDouble(gpr);
return gpr;
}
- FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
+ FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
{
- jitAssertIsJSDouble(gpr);
- add64(GPRInfo::tagTypeNumberRegister, gpr);
- move64ToDouble(gpr, fpr);
+ add64(GPRInfo::tagTypeNumberRegister, gpr, resultGPR);
+ move64ToDouble(resultGPR, fpr);
return fpr;
}
+ FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
+ {
+ jitAssertIsJSDouble(gpr);
+ return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr);
+ }
+ void boxDouble(FPRReg fpr, JSValueRegs regs)
+ {
+ boxDouble(fpr, regs.gpr());
+ }
+
+ void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg)
+ {
+ unboxDouble(regs.payloadGPR(), resultGPR, destFPR);
+ }
+
// Here are possible arrangements of source, target, scratch:
// - source, target, scratch can all be separate registers.
// - source and target can be the same but scratch is separate.
@@ -357,18 +1121,82 @@ public:
{
moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
}
+
+ void boxDouble(FPRReg fpr, JSValueRegs regs)
+ {
+ boxDouble(fpr, regs.tagGPR(), regs.payloadGPR());
+ }
+ void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR)
+ {
+ unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR);
+ }
+
+ void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR)
+ {
+ unboxDouble(regs, destFPR, scratchFPR);
+ }
#endif
- enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
- Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
+ void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR)
{
#if USE(JSVALUE64)
- return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
-#elif USE(JSVALUE32_64)
- return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(vm()->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ add32(TrustedImm32(ValueFalse), boolGPR, payloadGPR);
+#else
+ move(boolGPR, payloadGPR);
#endif
}
+ void boxBooleanPayload(bool value, GPRReg payloadGPR)
+ {
+#if USE(JSVALUE64)
+ move(TrustedImm32(ValueFalse + value), payloadGPR);
+#else
+ move(TrustedImm32(value), payloadGPR);
+#endif
+ }
+
+ void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs)
+ {
+ boxBooleanPayload(boolGPR, boxedRegs.payloadGPR());
+#if USE(JSVALUE32_64)
+ move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR());
+#endif
+ }
+
+ void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters)
+ {
+#if USE(JSVALUE64)
+ if (mode == DoNotHaveTagRegisters) {
+ move(intGPR, boxedRegs.gpr());
+ or64(TrustedImm64(TagTypeNumber), boxedRegs.gpr());
+ } else
+ or64(GPRInfo::tagTypeNumberRegister, intGPR, boxedRegs.gpr());
+#else
+ UNUSED_PARAM(mode);
+ move(intGPR, boxedRegs.payloadGPR());
+ move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR());
+#endif
+ }
+
+ void boxCell(GPRReg cellGPR, JSValueRegs boxedRegs)
+ {
+#if USE(JSVALUE64)
+ move(cellGPR, boxedRegs.gpr());
+#else
+ move(cellGPR, boxedRegs.payloadGPR());
+ move(TrustedImm32(JSValue::CellTag), boxedRegs.tagGPR());
+#endif
+ }
+
+ void callExceptionFuzz();
+
+ enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
+ enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth };
+ JS_EXPORT_PRIVATE Jump emitExceptionCheck(
+ ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
+ JS_EXPORT_PRIVATE Jump emitNonPatchableExceptionCheck();
+ Jump emitJumpIfException();
+
#if ENABLE(SAMPLING_COUNTERS)
static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
{
@@ -394,7 +1222,7 @@ public:
{
if (!codeOrigin.inlineCallFrame)
return codeBlock()->isStrictMode();
- return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
+ return codeOrigin.inlineCallFrame->isStrictMode();
}
ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
@@ -421,64 +1249,391 @@ public:
return m_baselineCodeBlock;
}
- VirtualRegister baselineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
+ static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame)
{
if (!inlineCallFrame)
- return baselineCodeBlock()->argumentsRegister();
-
- return VirtualRegister(baselineCodeBlockForInlineCallFrame(
- inlineCallFrame)->argumentsRegister().offset() + inlineCallFrame->stackOffset);
+ return VirtualRegister(CallFrame::argumentOffset(0));
+ if (inlineCallFrame->arguments.size() <= 1)
+ return virtualRegisterForLocal(0);
+ ValueRecovery recovery = inlineCallFrame->arguments[1];
+ RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
+ return recovery.virtualRegister();
}
- VirtualRegister baselineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
+ static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin)
+ {
+ return argumentsStart(codeOrigin.inlineCallFrame);
+ }
+
+ static VirtualRegister argumentCount(InlineCallFrame* inlineCallFrame)
+ {
+ ASSERT(!inlineCallFrame || inlineCallFrame->isVarargs());
+ if (!inlineCallFrame)
+ return VirtualRegister(CallFrameSlot::argumentCount);
+ return inlineCallFrame->argumentCountRegister;
+ }
+
+ static VirtualRegister argumentCount(const CodeOrigin& codeOrigin)
{
- return baselineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
+ return argumentCount(codeOrigin.inlineCallFrame);
}
- SymbolTable* symbolTableFor(const CodeOrigin& codeOrigin)
+ void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch);
+
+ void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
{
- return baselineCodeBlockFor(codeOrigin)->symbolTable();
+ emitStoreStructureWithTypeInfo(*this, structure, dest);
}
- int offsetOfLocals(const CodeOrigin& codeOrigin)
+ void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch)
{
- if (!codeOrigin.inlineCallFrame)
- return 0;
- return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register);
+#if USE(JSVALUE64)
+ load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch);
+ store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+#else
+ // Store all the info flags using a single 32-bit wide load and store.
+ load32(MacroAssembler::Address(structure, Structure::indexingTypeIncludingHistoryOffset()), scratch);
+ store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeAndMiscOffset()));
+
+ // Store the StructureID
+ storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
+#endif
}
- int offsetOfArgumentsIncludingThis(InlineCallFrame* inlineCallFrame)
+ static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest);
+
+ Jump barrierBranchWithoutFence(GPRReg cell)
{
- if (!inlineCallFrame)
- return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register);
- if (inlineCallFrame->arguments.size() <= 1)
- return 0;
- ValueRecovery recovery = inlineCallFrame->arguments[1];
- RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
- return (recovery.virtualRegister().offset() - 1) * sizeof(Register);
+ return branch8(Above, Address(cell, JSCell::cellStateOffset()), TrustedImm32(blackThreshold));
+ }
+
+ Jump barrierBranchWithoutFence(JSCell* cell)
+ {
+ uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
+ return branch8(Above, AbsoluteAddress(address), TrustedImm32(blackThreshold));
}
- int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin)
+ Jump barrierBranch(GPRReg cell, GPRReg scratchGPR)
{
- return offsetOfArgumentsIncludingThis(codeOrigin.inlineCallFrame);
+ load8(Address(cell, JSCell::cellStateOffset()), scratchGPR);
+ return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold()));
}
- void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
+ Jump barrierBranch(JSCell* cell, GPRReg scratchGPR)
+ {
+ uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
+ load8(address, scratchGPR);
+ return branch32(Above, scratchGPR, AbsoluteAddress(vm()->heap.addressOfBarrierThreshold()));
+ }
+
+ void barrierStoreLoadFence()
{
- UNUSED_PARAM(owner);
- UNUSED_PARAM(scratch1);
- UNUSED_PARAM(scratch2);
- UNUSED_PARAM(useKind);
- ASSERT(owner != scratch1);
- ASSERT(owner != scratch2);
- ASSERT(scratch1 != scratch2);
+ if (!Options::useConcurrentBarriers())
+ return;
+ Jump ok = jumpIfMutatorFenceNotNeeded();
+ memoryFence();
+ ok.link(this);
+ }
+
+ void mutatorFence()
+ {
+ if (isX86())
+ return;
+ Jump ok = jumpIfMutatorFenceNotNeeded();
+ storeFence();
+ ok.link(this);
+ }
+
+ void storeButterfly(GPRReg butterfly, GPRReg object)
+ {
+ if (isX86()) {
+ storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+ return;
+ }
-#if ENABLE(WRITE_BARRIER_PROFILING)
- emitCount(WriteBarrierCounters::jitCounterFor(useKind));
-#endif
+ Jump ok = jumpIfMutatorFenceNotNeeded();
+ storeFence();
+ storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+ storeFence();
+ Jump done = jump();
+ ok.link(this);
+ storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+ done.link(this);
+ }
+
+ void nukeStructureAndStoreButterfly(GPRReg butterfly, GPRReg object)
+ {
+ if (isX86()) {
+ or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
+ storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+ return;
+ }
+
+ Jump ok = jumpIfMutatorFenceNotNeeded();
+ or32(TrustedImm32(bitwise_cast<int32_t>(nukedStructureIDBit())), Address(object, JSCell::structureIDOffset()));
+ storeFence();
+ storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+ storeFence();
+ Jump done = jump();
+ ok.link(this);
+ storePtr(butterfly, Address(object, JSObject::butterflyOffset()));
+ done.link(this);
}
+
+ Jump jumpIfMutatorFenceNotNeeded()
+ {
+ return branchTest8(Zero, AbsoluteAddress(vm()->heap.addressOfMutatorShouldBeFenced()));
+ }
+
+ // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
+ // functor is called at those points where we have pinpointed a type. One way to use this is to
+ // have the functor emit the code to put the type string into an appropriate register and then
+ // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow
+ // case. It is passed the unlinked jump to the slow case.
+ template<typename Functor, typename SlowPathFunctor>
+ void emitTypeOf(
+ JSValueRegs regs, GPRReg tempGPR, const Functor& functor,
+ const SlowPathFunctor& slowPathFunctor)
+ {
+ // Implements the following branching structure:
+ //
+ // if (is cell) {
+ // if (is object) {
+ // if (is function) {
+ // return function;
+ // } else if (doesn't have call trap and doesn't masquerade as undefined) {
+ // return object
+ // } else {
+ // return slowPath();
+ // }
+ // } else if (is string) {
+ // return string
+ // } else {
+ // return symbol
+ // }
+ // } else if (is number) {
+ // return number
+ // } else if (is null) {
+ // return object
+ // } else if (is boolean) {
+ // return boolean
+ // } else {
+ // return undefined
+ // }
+
+ Jump notCell = branchIfNotCell(regs);
+
+ GPRReg cellGPR = regs.payloadGPR();
+ Jump notObject = branchIfNotObject(cellGPR);
+
+ Jump notFunction = branchIfNotFunction(cellGPR);
+ functor(TypeofType::Function, false);
+
+ notFunction.link(this);
+ slowPathFunctor(
+ branchTest8(
+ NonZero,
+ Address(cellGPR, JSCell::typeInfoFlagsOffset()),
+ TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData)));
+ functor(TypeofType::Object, false);
+
+ notObject.link(this);
+
+ Jump notString = branchIfNotString(cellGPR);
+ functor(TypeofType::String, false);
+ notString.link(this);
+ functor(TypeofType::Symbol, false);
+
+ notCell.link(this);
+ Jump notNumber = branchIfNotNumber(regs, tempGPR);
+ functor(TypeofType::Number, false);
+ notNumber.link(this);
+
+ JumpList notNull = branchIfNotEqual(regs, jsNull());
+ functor(TypeofType::Object, false);
+ notNull.link(this);
+
+ Jump notBoolean = branchIfNotBoolean(regs, tempGPR);
+ functor(TypeofType::Boolean, false);
+ notBoolean.link(this);
+
+ functor(TypeofType::Undefined, true);
+ }
+
+ void emitDumbVirtualCall(CallLinkInfo*);
+
Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
+
+ void makeSpaceOnStackForCCall();
+ void reclaimSpaceOnStackForCCall();
+
+#if USE(JSVALUE64)
+ void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result);
+ void emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
+#endif
+
+ // Call this if you know that the value held in allocatorGPR is non-null. This DOES NOT mean
+ // that allocator is non-null; allocator can be null as a signal that we don't know what the
+ // value of allocatorGPR is.
+ void emitAllocateWithNonNullAllocator(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
+ {
+ // NOTE: This is carefully written so that we can call it while we disallow scratch
+ // register usage.
+
+ if (Options::forceGCSlowPaths()) {
+ slowPath.append(jump());
+ return;
+ }
+
+ Jump popPath;
+ Jump done;
+
+ load32(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, remaining)), resultGPR);
+ popPath = branchTest32(Zero, resultGPR);
+ if (allocator)
+ add32(TrustedImm32(-allocator->cellSize()), resultGPR, scratchGPR);
+ else {
+ if (isX86()) {
+ move(resultGPR, scratchGPR);
+ sub32(Address(allocatorGPR, MarkedAllocator::offsetOfCellSize()), scratchGPR);
+ } else {
+ load32(Address(allocatorGPR, MarkedAllocator::offsetOfCellSize()), scratchGPR);
+ sub32(resultGPR, scratchGPR, scratchGPR);
+ }
+ }
+ negPtr(resultGPR);
+ store32(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, remaining)));
+ Address payloadEndAddr = Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, payloadEnd));
+ if (isX86())
+ addPtr(payloadEndAddr, resultGPR);
+ else {
+ loadPtr(payloadEndAddr, scratchGPR);
+ addPtr(scratchGPR, resultGPR);
+ }
+
+ done = jump();
+
+ popPath.link(this);
+
+ loadPtr(Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, head)), resultGPR);
+ slowPath.append(branchTestPtr(Zero, resultGPR));
+
+ // The object is half-allocated: we have what we know is a fresh object, but
+ // it's still on the GC's free list.
+ loadPtr(Address(resultGPR), scratchGPR);
+ storePtr(scratchGPR, Address(allocatorGPR, MarkedAllocator::offsetOfFreeList() + OBJECT_OFFSETOF(FreeList, head)));
+
+ done.link(this);
+ }
+
+ void emitAllocate(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, GPRReg scratchGPR, JumpList& slowPath)
+ {
+ if (!allocator)
+ slowPath.append(branchTestPtr(Zero, allocatorGPR));
+ emitAllocateWithNonNullAllocator(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
+ }
+
+ template<typename StructureType>
+ void emitAllocateJSCell(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, StructureType structure, GPRReg scratchGPR, JumpList& slowPath)
+ {
+ emitAllocate(resultGPR, allocator, allocatorGPR, scratchGPR, slowPath);
+ emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR);
+ }
+
+ template<typename StructureType, typename StorageType>
+ void emitAllocateJSObject(GPRReg resultGPR, MarkedAllocator* allocator, GPRReg allocatorGPR, StructureType structure, StorageType storage, GPRReg scratchGPR, JumpList& slowPath)
+ {
+ emitAllocateJSCell(resultGPR, allocator, allocatorGPR, structure, scratchGPR, slowPath);
+ storePtr(storage, Address(resultGPR, JSObject::butterflyOffset()));
+ }
+
+ template<typename ClassType, typename StructureType, typename StorageType>
+ void emitAllocateJSObjectWithKnownSize(
+ GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1,
+ GPRReg scratchGPR2, JumpList& slowPath, size_t size)
+ {
+ MarkedAllocator* allocator = subspaceFor<ClassType>(*vm())->allocatorFor(size);
+ if (!allocator) {
+ slowPath.append(jump());
+ return;
+ }
+ move(TrustedImmPtr(allocator), scratchGPR1);
+ emitAllocateJSObject(resultGPR, allocator, scratchGPR1, structure, storage, scratchGPR2, slowPath);
+ }
+
+ template<typename ClassType, typename StructureType, typename StorageType>
+ void emitAllocateJSObject(GPRReg resultGPR, StructureType structure, StorageType storage, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+ {
+ emitAllocateJSObjectWithKnownSize<ClassType>(resultGPR, structure, storage, scratchGPR1, scratchGPR2, slowPath, ClassType::allocationSize(0));
+ }
+
+ // allocationSize can be aliased with any of the other input GPRs. If it's not aliased then it
+ // won't be clobbered.
+ void emitAllocateVariableSized(GPRReg resultGPR, Subspace& subspace, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+ {
+ static_assert(!(MarkedSpace::sizeStep & (MarkedSpace::sizeStep - 1)), "MarkedSpace::sizeStep must be a power of two.");
+
+ unsigned stepShift = getLSBSet(MarkedSpace::sizeStep);
+
+ add32(TrustedImm32(MarkedSpace::sizeStep - 1), allocationSize, scratchGPR1);
+ urshift32(TrustedImm32(stepShift), scratchGPR1);
+ slowPath.append(branch32(Above, scratchGPR1, TrustedImm32(MarkedSpace::largeCutoff >> stepShift)));
+ move(TrustedImmPtr(subspace.allocatorForSizeStep() - 1), scratchGPR2);
+ loadPtr(BaseIndex(scratchGPR2, scratchGPR1, timesPtr()), scratchGPR1);
+
+ emitAllocate(resultGPR, nullptr, scratchGPR1, scratchGPR2, slowPath);
+ }
+
+ template<typename ClassType, typename StructureType>
+ void emitAllocateVariableSizedCell(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+ {
+ Subspace& subspace = *subspaceFor<ClassType>(*vm());
+ emitAllocateVariableSized(resultGPR, subspace, allocationSize, scratchGPR1, scratchGPR2, slowPath);
+ emitStoreStructureWithTypeInfo(structure, resultGPR, scratchGPR2);
+ }
+
+ template<typename ClassType, typename StructureType>
+ void emitAllocateVariableSizedJSObject(GPRReg resultGPR, StructureType structure, GPRReg allocationSize, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+ {
+ emitAllocateVariableSizedCell<ClassType>(resultGPR, structure, allocationSize, scratchGPR1, scratchGPR2, slowPath);
+ storePtr(TrustedImmPtr(0), Address(resultGPR, JSObject::butterflyOffset()));
+ }
+
+ void emitConvertValueToBoolean(JSValueRegs value, GPRReg result, GPRReg scratchIfShouldCheckMasqueradesAsUndefined, FPRReg, FPRReg, bool shouldCheckMasqueradesAsUndefined, JSGlobalObject*, bool negateResult = false);
+
+ template<typename ClassType>
+ void emitAllocateDestructibleObject(GPRReg resultGPR, Structure* structure, GPRReg scratchGPR1, GPRReg scratchGPR2, JumpList& slowPath)
+ {
+ emitAllocateJSObject<ClassType>(resultGPR, TrustedImmPtr(structure), TrustedImmPtr(0), scratchGPR1, scratchGPR2, slowPath);
+ storePtr(TrustedImmPtr(structure->classInfo()), Address(resultGPR, JSDestructibleObject::classInfoOffset()));
+ }
+
+ void emitInitializeInlineStorage(GPRReg baseGPR, unsigned inlineCapacity)
+ {
+ for (unsigned i = 0; i < inlineCapacity; ++i)
+ storeTrustedValue(JSValue(), Address(baseGPR, JSObject::offsetOfInlineStorage() + i * sizeof(EncodedJSValue)));
+ }
+
+ void emitInitializeInlineStorage(GPRReg baseGPR, GPRReg inlineCapacity)
+ {
+ Jump empty = branchTest32(Zero, inlineCapacity);
+ Label loop = label();
+ sub32(TrustedImm32(1), inlineCapacity);
+ storeTrustedValue(JSValue(), BaseIndex(baseGPR, inlineCapacity, TimesEight, JSObject::offsetOfInlineStorage()));
+ branchTest32(NonZero, inlineCapacity).linkTo(loop, this);
+ empty.link(this);
+ }
+
+ void emitInitializeOutOfLineStorage(GPRReg butterflyGPR, unsigned outOfLineCapacity)
+ {
+ for (unsigned i = 0; i < outOfLineCapacity; ++i)
+ storeTrustedValue(JSValue(), Address(butterflyGPR, -sizeof(IndexingHeader) - (i + 1) * sizeof(EncodedJSValue)));
+ }
+
+#if USE(JSVALUE64)
+ void wangsInt64Hash(GPRReg inputAndResult, GPRReg scratch);
+#endif
protected:
VM* m_vm;
@@ -491,6 +1646,3 @@ protected:
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // AssemblyHelpers_h
-
diff --git a/Source/JavaScriptCore/jit/BinarySwitch.cpp b/Source/JavaScriptCore/jit/BinarySwitch.cpp
new file mode 100644
index 000000000..f3ddcfca9
--- /dev/null
+++ b/Source/JavaScriptCore/jit/BinarySwitch.cpp
@@ -0,0 +1,391 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "BinarySwitch.h"
+
+#if ENABLE(JIT)
+
+#include "JSCInlines.h"
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+static const bool verbose = false;
+
+static unsigned globalCounter; // We use a different seed every time we are invoked.
+
+BinarySwitch::BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type type)
+ : m_value(value)
+ , m_weakRandom(globalCounter++)
+ , m_index(0)
+ , m_caseIndex(UINT_MAX)
+ , m_type(type)
+{
+ if (cases.isEmpty())
+ return;
+
+ if (verbose)
+ dataLog("Original cases: ", listDump(cases), "\n");
+
+ for (unsigned i = 0; i < cases.size(); ++i)
+ m_cases.append(Case(cases[i], i));
+
+ std::sort(m_cases.begin(), m_cases.end());
+
+ if (verbose)
+ dataLog("Sorted cases: ", listDump(m_cases), "\n");
+
+ for (unsigned i = 1; i < m_cases.size(); ++i)
+ RELEASE_ASSERT(m_cases[i - 1] < m_cases[i]);
+
+ build(0, false, m_cases.size());
+}
+
+BinarySwitch::~BinarySwitch()
+{
+}
+
+bool BinarySwitch::advance(MacroAssembler& jit)
+{
+ if (m_cases.isEmpty()) {
+ m_fallThrough.append(jit.jump());
+ return false;
+ }
+
+ if (m_index == m_branches.size()) {
+ RELEASE_ASSERT(m_jumpStack.isEmpty());
+ return false;
+ }
+
+ for (;;) {
+ const BranchCode& code = m_branches[m_index++];
+ switch (code.kind) {
+ case NotEqualToFallThrough:
+ switch (m_type) {
+ case Int32:
+ m_fallThrough.append(jit.branch32(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_fallThrough.append(jit.branchPtr(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case NotEqualToPush:
+ switch (m_type) {
+ case Int32:
+ m_jumpStack.append(jit.branch32(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_jumpStack.append(jit.branchPtr(
+ MacroAssembler::NotEqual, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case LessThanToPush:
+ switch (m_type) {
+ case Int32:
+ m_jumpStack.append(jit.branch32(
+ MacroAssembler::LessThan, m_value,
+ MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
+ break;
+ case IntPtr:
+ m_jumpStack.append(jit.branchPtr(
+ MacroAssembler::LessThan, m_value,
+ MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
+ break;
+ }
+ break;
+ case Pop:
+ m_jumpStack.takeLast().link(&jit);
+ break;
+ case ExecuteCase:
+ m_caseIndex = code.index;
+ return true;
+ }
+ }
+}
+
+void BinarySwitch::build(unsigned start, bool hardStart, unsigned end)
+{
+ if (verbose)
+ dataLog("Building with start = ", start, ", hardStart = ", hardStart, ", end = ", end, "\n");
+
+ auto append = [&] (const BranchCode& code) {
+ if (verbose)
+ dataLog("==> ", code, "\n");
+ m_branches.append(code);
+ };
+
+ unsigned size = end - start;
+
+ RELEASE_ASSERT(size);
+
+ // This code uses some random numbers to keep things balanced. It's important to keep in mind
+ // that this does not improve average-case throughput under the assumption that all cases fire
+ // with equal probability. It just ensures that there will not be some switch structure that
+ // when combined with some input will always produce pathologically good or pathologically bad
+ // performance.
+
+ const unsigned leafThreshold = 3;
+
+ if (size <= leafThreshold) {
+ if (verbose)
+ dataLog("It's a leaf.\n");
+
+ // It turns out that for exactly three cases or less, it's better to just compare each
+ // case individually. This saves 1/6 of a branch on average, and up to 1/3 of a branch in
+ // extreme cases where the divide-and-conquer bottoms out in a lot of 3-case subswitches.
+ //
+ // This assumes that we care about the cost of hitting some case more than we care about
+ // bottoming out in a default case. I believe that in most places where we use switch
+ // statements, we are more likely to hit one of the cases than we are to fall through to
+ // default. Intuitively, if we wanted to improve the performance of default, we would
+ // reduce the value of leafThreshold to 2 or even to 1. See below for a deeper discussion.
+
+ bool allConsecutive = false;
+
+ if ((hardStart || (start && m_cases[start - 1].value == m_cases[start].value - 1))
+ && start + size < m_cases.size()
+ && m_cases[start + size - 1].value == m_cases[start + size].value - 1) {
+ allConsecutive = true;
+ for (unsigned i = 0; i < size - 1; ++i) {
+ if (m_cases[start + i].value + 1 != m_cases[start + i + 1].value) {
+ allConsecutive = false;
+ break;
+ }
+ }
+ }
+
+ if (verbose)
+ dataLog("allConsecutive = ", allConsecutive, "\n");
+
+ Vector<unsigned, 3> localCaseIndices;
+ for (unsigned i = 0; i < size; ++i)
+ localCaseIndices.append(start + i);
+
+ std::random_shuffle(
+ localCaseIndices.begin(), localCaseIndices.end(),
+ [this] (unsigned n) {
+ // We use modulo to get a random number in the range we want fully knowing that
+ // this introduces a tiny amount of bias, but we're fine with such tiny bias.
+ return m_weakRandom.getUint32() % n;
+ });
+
+ for (unsigned i = 0; i < size - 1; ++i) {
+ append(BranchCode(NotEqualToPush, localCaseIndices[i]));
+ append(BranchCode(ExecuteCase, localCaseIndices[i]));
+ append(BranchCode(Pop));
+ }
+
+ if (!allConsecutive)
+ append(BranchCode(NotEqualToFallThrough, localCaseIndices.last()));
+
+ append(BranchCode(ExecuteCase, localCaseIndices.last()));
+ return;
+ }
+
+ if (verbose)
+ dataLog("It's not a leaf.\n");
+
+ // There are two different strategies we could consider here:
+ //
+ // Isolate median and split: pick a median and check if the comparison value is equal to it;
+ // if so, execute the median case. Otherwise check if the value is less than the median, and
+ // recurse left or right based on this. This has two subvariants: we could either first test
+ // equality for the median and then do the less-than, or we could first do the less-than and
+ // then check equality on the not-less-than path.
+ //
+ // Ignore median and split: do a less-than comparison on a value that splits the cases in two
+ // equal-sized halves. Recurse left or right based on the comparison. Do not test for equality
+ // against the median (or anything else); let the recursion handle those equality comparisons
+ // once we bottom out in a list that case 3 cases or less (see above).
+ //
+ // I'll refer to these strategies as Isolate and Ignore. I initially believed that Isolate
+ // would be faster since it leads to less branching for some lucky cases. It turns out that
+ // Isolate is almost a total fail in the average, assuming all cases are equally likely. How
+ // bad Isolate is depends on whether you believe that doing two consecutive branches based on
+ // the same comparison is cheaper than doing the compare/branches separately. This is
+ // difficult to evaluate. For small immediates that aren't blinded, we just care about
+ // avoiding a second compare instruction. For large immediates or when blinding is in play, we
+ // also care about the instructions used to materialize the immediate a second time. Isolate
+ // can help with both costs since it involves first doing a < compare+branch on some value,
+ // followed by a == compare+branch on the same exact value (or vice-versa). Ignore will do a <
+ // compare+branch on some value, and then the == compare+branch on that same value will happen
+ // much later.
+ //
+ // To evaluate these costs, I wrote the recurrence relation for Isolate and Ignore, assuming
+ // that ComparisonCost is the cost of a compare+branch and ChainedComparisonCost is the cost
+ // of a compare+branch on some value that you've just done another compare+branch for. These
+ // recurrence relations compute the total cost incurred if you executed the switch statement
+ // on each matching value. So the average cost of hitting some case can be computed as
+ // Isolate[n]/n or Ignore[n]/n, respectively for the two relations.
+ //
+ // Isolate[1] = ComparisonCost
+ // Isolate[2] = (2 + 1) * ComparisonCost
+ // Isolate[3] = (3 + 2 + 1) * ComparisonCost
+ // Isolate[n_] := With[
+ // {medianIndex = Floor[n/2] + If[EvenQ[n], RandomInteger[], 1]},
+ // ComparisonCost + ChainedComparisonCost +
+ // (ComparisonCost * (medianIndex - 1) + Isolate[medianIndex - 1]) +
+ // (2 * ComparisonCost * (n - medianIndex) + Isolate[n - medianIndex])]
+ //
+ // Ignore[1] = ComparisonCost
+ // Ignore[2] = (2 + 1) * ComparisonCost
+ // Ignore[3] = (3 + 2 + 1) * ComparisonCost
+ // Ignore[n_] := With[
+ // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
+ // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
+ // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])]
+ //
+ // This does not account for the average cost of hitting the default case. See further below
+ // for a discussion of that.
+ //
+ // It turns out that for ComparisonCost = 1 and ChainedComparisonCost = 1, Ignore is always
+ // better than Isolate. If we assume that ChainedComparisonCost = 0, then Isolate wins for
+ // switch statements that have 20 cases or fewer, though the margin of victory is never large
+ // - it might sometimes save an average of 0.3 ComparisonCost. For larger switch statements,
+ // we see divergence between the two with Ignore winning. This is of course rather
+ // unrealistic since the chained comparison is never free. For ChainedComparisonCost = 0.5, we
+ // see Isolate winning for 10 cases or fewer, by maybe 0.2 ComparisonCost. Again we see
+ // divergence for large switches with Ignore winning, for example if a switch statement has
+ // 100 cases then Ignore saves one branch on average.
+ //
+ // Our current JIT backends don't provide for optimization for chained comparisons, except for
+ // reducing the code for materializing the immediate if the immediates are large or blinding
+ // comes into play. Probably our JIT backends live somewhere north of
+ // ChainedComparisonCost = 0.5.
+ //
+ // This implies that using the Ignore strategy is likely better. If we wanted to incorporate
+ // the Isolate strategy, we'd want to determine the switch size threshold at which the two
+ // cross over and then use Isolate for switches that are smaller than that size.
+ //
+ // The average cost of hitting the default case is similar, but involves a different cost for
+ // the base cases: you have to assume that you will always fail each branch. For the Ignore
+ // strategy we would get this recurrence relation; the same kind of thing happens to the
+ // Isolate strategy:
+ //
+ // Ignore[1] = ComparisonCost
+ // Ignore[2] = (2 + 2) * ComparisonCost
+ // Ignore[3] = (3 + 3 + 3) * ComparisonCost
+ // Ignore[n_] := With[
+ // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
+ // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
+ // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])]
+ //
+ // This means that if we cared about the default case more, we would likely reduce
+ // leafThreshold. Reducing it to 2 would reduce the average cost of the default case by 1/3
+ // in the most extreme cases (num switch cases = 3, 6, 12, 24, ...). But it would also
+ // increase the average cost of taking one of the non-default cases by 1/3. Typically the
+ // difference is 1/6 in either direction. This makes it a very simple trade-off: if we believe
+ // that the default case is more important then we would want leafThreshold to be 2, and the
+ // default case would become 1/6 faster on average. But we believe that most switch statements
+ // are more likely to take one of the cases than the default, so we use leafThreshold = 3
+ // and get a 1/6 speed-up on average for taking an explicit case.
+
+ unsigned medianIndex = (start + end) / 2;
+
+ if (verbose)
+ dataLog("medianIndex = ", medianIndex, "\n");
+
+ // We want medianIndex to point to the thing we will do a less-than compare against. We want
+ // this less-than compare to split the current sublist into equal-sized sublists, or
+ // nearly-equal-sized with some randomness if we're in the odd case. With the above
+ // calculation, in the odd case we will have medianIndex pointing at either the element we
+ // want or the element to the left of the one we want. Consider the case of five elements:
+ //
+ // 0 1 2 3 4
+ //
+ // start will be 0, end will be 5. The average is 2.5, which rounds down to 2. If we do
+ // value < 2, then we will split the list into 2 elements on the left and three on the right.
+ // That's pretty good, but in this odd case we'd like to at random choose 3 instead to ensure
+ // that we don't become unbalanced on the right. This does not improve throughput since one
+ // side will always get shafted, and that side might still be odd, in which case it will also
+ // have two sides and one of them will get shafted - and so on. We just want to avoid
+ // deterministic pathologies.
+ //
+ // In the even case, we will always end up pointing at the element we want:
+ //
+ // 0 1 2 3
+ //
+ // start will be 0, end will be 4. So, the average is 2, which is what we'd like.
+ if (size & 1) {
+ RELEASE_ASSERT(medianIndex - start + 1 == end - medianIndex);
+ medianIndex += m_weakRandom.getUint32() & 1;
+ } else
+ RELEASE_ASSERT(medianIndex - start == end - medianIndex);
+
+ RELEASE_ASSERT(medianIndex > start);
+ RELEASE_ASSERT(medianIndex + 1 < end);
+
+ if (verbose)
+ dataLog("fixed medianIndex = ", medianIndex, "\n");
+
+ append(BranchCode(LessThanToPush, medianIndex));
+ build(medianIndex, true, end);
+ append(BranchCode(Pop));
+ build(start, hardStart, medianIndex);
+}
+
+void BinarySwitch::Case::dump(PrintStream& out) const
+{
+ out.print("<value: " , value, ", index: ", index, ">");
+}
+
+void BinarySwitch::BranchCode::dump(PrintStream& out) const
+{
+ switch (kind) {
+ case NotEqualToFallThrough:
+ out.print("NotEqualToFallThrough");
+ break;
+ case NotEqualToPush:
+ out.print("NotEqualToPush");
+ break;
+ case LessThanToPush:
+ out.print("LessThanToPush");
+ break;
+ case Pop:
+ out.print("Pop");
+ break;
+ case ExecuteCase:
+ out.print("ExecuteCase");
+ break;
+ }
+
+ if (index != UINT_MAX)
+ out.print("(", index, ")");
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/BinarySwitch.h b/Source/JavaScriptCore/jit/BinarySwitch.h
new file mode 100644
index 000000000..c2569d335
--- /dev/null
+++ b/Source/JavaScriptCore/jit/BinarySwitch.h
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+#include <wtf/WeakRandom.h>
+
+namespace JSC {
+
+// The BinarySwitch class makes it easy to emit a switch statement over either
+// 32-bit integers or pointers, where the switch uses a tree of branches
+// rather than a jump table. This makes it particularly useful if the case
+// values are too far apart to make a jump table practical, or if there are
+// sufficiently few cases that the total cost of log(numCases) branches is
+// less than the cost of an indirected jump.
+//
+// In an effort to simplify the logic of emitting code for each case, this
+// uses an iterator style, rather than a functor callback style. This makes
+// sense because even the iterator implementation found herein is relatively
+// simple, whereas the code it's used from is usually quite complex - one
+// example being the trie-of-trees string switch implementation, where the
+// code emitted for each case involves recursing to emit code for a sub-trie.
+//
+// Use this like so:
+//
+// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32);
+// while (switch.advance(jit)) {
+// int value = switch.caseValue();
+// unsigned index = switch.caseIndex(); // index into casesVector, above
+// ... // generate code for this case
+// ... = jit.jump(); // you have to jump out yourself; falling through causes undefined behavior
+// }
+// switch.fallThrough().link(&jit);
+
+class BinarySwitch {
+public:
+ enum Type {
+ Int32,
+ IntPtr
+ };
+
+ BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type);
+ ~BinarySwitch();
+
+ unsigned caseIndex() const { return m_cases[m_caseIndex].index; }
+ int64_t caseValue() const { return m_cases[m_caseIndex].value; }
+
+ bool advance(MacroAssembler&);
+
+ MacroAssembler::JumpList& fallThrough() { return m_fallThrough; }
+
+private:
+ void build(unsigned start, bool hardStart, unsigned end);
+
+ GPRReg m_value;
+
+ struct Case {
+ Case() { }
+
+ Case(int64_t value, unsigned index)
+ : value(value)
+ , index(index)
+ {
+ }
+
+ bool operator<(const Case& other) const
+ {
+ return value < other.value;
+ }
+
+ void dump(PrintStream& out) const;
+
+ int64_t value;
+ unsigned index;
+ };
+
+ Vector<Case> m_cases;
+
+ enum BranchKind {
+ NotEqualToFallThrough,
+ NotEqualToPush,
+ LessThanToPush,
+ Pop,
+ ExecuteCase
+ };
+
+ struct BranchCode {
+ BranchCode() { }
+
+ BranchCode(BranchKind kind, unsigned index = UINT_MAX)
+ : kind(kind)
+ , index(index)
+ {
+ }
+
+ void dump(PrintStream& out) const;
+
+ BranchKind kind;
+ unsigned index;
+ };
+
+ WeakRandom m_weakRandom;
+
+ Vector<BranchCode> m_branches;
+
+ unsigned m_index;
+ unsigned m_caseIndex;
+ Vector<MacroAssembler::Jump> m_jumpStack;
+
+ MacroAssembler::JumpList m_fallThrough;
+
+ Type m_type;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CCallHelpers.cpp b/Source/JavaScriptCore/jit/CCallHelpers.cpp
new file mode 100644
index 000000000..3c3df618f
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CCallHelpers.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CCallHelpers.h"
+
+#if ENABLE(JIT)
+
+#include "ShadowChicken.h"
+
+namespace JSC {
+
+void CCallHelpers::logShadowChickenProloguePacket(GPRReg shadowPacket, GPRReg scratch1, GPRReg scope)
+{
+ storePtr(GPRInfo::callFrameRegister, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, frame)));
+ loadPtr(Address(GPRInfo::callFrameRegister, OBJECT_OFFSETOF(CallerFrameAndPC, callerFrame)), scratch1);
+ storePtr(scratch1, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callerFrame)));
+ loadPtr(addressFor(CallFrameSlot::callee), scratch1);
+ storePtr(scratch1, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callee)));
+ storePtr(scope, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, scope)));
+}
+
+void CCallHelpers::logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlock* codeBlock, CallSiteIndex callSiteIndex)
+{
+ storePtr(GPRInfo::callFrameRegister, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, frame)));
+ storePtr(TrustedImmPtr(ShadowChicken::Packet::tailMarker()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callee)));
+ storeValue(thisRegs, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, thisValue)));
+ storePtr(scope, Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, scope)));
+ storePtr(TrustedImmPtr(codeBlock), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, codeBlock)));
+ store32(TrustedImm32(callSiteIndex.bits()), Address(shadowPacket, OBJECT_OFFSETOF(ShadowChicken::Packet, callSiteIndex)));
+}
+
+void CCallHelpers::ensureShadowChickenPacket(GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2)
+{
+ ASSERT(!RegisterSet::argumentGPRS().get(scratch1NonArgGPR));
+ move(TrustedImmPtr(vm()->shadowChicken().addressOfLogCursor()), scratch1NonArgGPR);
+ loadPtr(Address(scratch1NonArgGPR), shadowPacket);
+ Jump ok = branchPtr(Below, shadowPacket, TrustedImmPtr(vm()->shadowChicken().logEnd()));
+ setupArgumentsExecState();
+ move(TrustedImmPtr(bitwise_cast<void*>(operationProcessShadowChickenLog)), scratch1NonArgGPR);
+ call(scratch1NonArgGPR);
+ move(TrustedImmPtr(vm()->shadowChicken().addressOfLogCursor()), scratch1NonArgGPR);
+ loadPtr(Address(scratch1NonArgGPR), shadowPacket);
+ ok.link(this);
+ addPtr(TrustedImm32(sizeof(ShadowChicken::Packet)), shadowPacket, scratch2);
+ storePtr(scratch2, Address(scratch1NonArgGPR));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CCallHelpers.h b/Source/JavaScriptCore/jit/CCallHelpers.h
index afcccd1ca..8a3c90de6 100644
--- a/Source/JavaScriptCore/jit/CCallHelpers.h
+++ b/Source/JavaScriptCore/jit/CCallHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,24 +23,55 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CCallHelpers_h
-#define CCallHelpers_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
#include "AssemblyHelpers.h"
#include "GPRInfo.h"
+#include "RegisterMap.h"
+#include "StackAlignment.h"
namespace JSC {
+#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64))
+#define POKE_ARGUMENT_OFFSET 4
+#else
+#define POKE_ARGUMENT_OFFSET 0
+#endif
+
+// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
+// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
+#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
+#define EABI_32BIT_DUMMY_ARG CCallHelpers::TrustedImm32(0),
+#else
+#define EABI_32BIT_DUMMY_ARG
+#endif
+
class CCallHelpers : public AssemblyHelpers {
public:
CCallHelpers(VM* vm, CodeBlock* codeBlock = 0)
: AssemblyHelpers(vm, codeBlock)
{
}
+
+ // The most general helper for setting arguments that fit in a GPR, if you can compute each
+ // argument without using any argument registers. You usually want one of the setupArguments*()
+ // methods below instead of this. This thing is most useful if you have *a lot* of arguments.
+ template<typename Functor>
+ void setupArgument(unsigned argumentIndex, const Functor& functor)
+ {
+ unsigned numberOfRegs = GPRInfo::numberOfArgumentRegisters; // Disguise the constant from clang's tautological compare warning.
+ if (argumentIndex < numberOfRegs) {
+ functor(GPRInfo::toArgumentRegister(argumentIndex));
+ return;
+ }
+
+ functor(GPRInfo::nonArgGPR0);
+ poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET + argumentIndex - GPRInfo::numberOfArgumentRegisters);
+ }
+
+ void setupArgumentsWithExecState() { setupArgumentsExecState(); }
// These methods used to sort arguments into the correct registers.
// On X86 we use cdecl calling conventions, which pass all arguments on the
@@ -95,6 +126,13 @@ public:
addCallArgument(arg2);
}
+ ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ resetCallArguments();
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
resetCallArguments();
@@ -182,6 +220,15 @@ public:
addCallArgument(arg2);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
{
resetCallArguments();
@@ -239,6 +286,15 @@ public:
addCallArgument(arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
resetCallArguments();
@@ -275,6 +331,36 @@ public:
addCallArgument(arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
{
resetCallArguments();
@@ -284,6 +370,58 @@ public:
addCallArgument(arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3)
{
resetCallArguments();
@@ -347,6 +485,57 @@ public:
addCallArgument(arg3);
addCallArgument(arg4);
}
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
{
@@ -359,6 +548,27 @@ public:
addCallArgument(arg5);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
{
resetCallArguments();
@@ -379,6 +589,17 @@ public:
addCallArgument(arg4);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3)
{
resetCallArguments();
@@ -438,6 +659,17 @@ public:
addCallArgument(arg4);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5)
{
resetCallArguments();
@@ -493,6 +725,16 @@ public:
addCallArgument(arg4);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
{
resetCallArguments();
@@ -503,6 +745,16 @@ public:
addCallArgument(arg4);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
{
resetCallArguments();
@@ -557,7 +809,7 @@ public:
addCallArgument(arg4);
addCallArgument(arg5);
}
-
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6)
{
resetCallArguments();
@@ -569,6 +821,44 @@ public:
addCallArgument(arg5);
addCallArgument(arg6);
}
+
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5, TrustedImmPtr arg6)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ addCallArgument(arg7);
+ }
ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
{
@@ -578,6 +868,14 @@ public:
addCallArgument(arg2);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
{
resetCallArguments();
@@ -586,6 +884,21 @@ public:
addCallArgument(arg2);
addCallArgument(arg3);
}
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, GPRReg arg8)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ addCallArgument(arg6);
+ addCallArgument(arg7);
+ addCallArgument(arg8);
+ }
+
#endif // !NUMBER_OF_ARGUMENT_REGISTERS
// These methods are suitable for any calling convention that provides for
// at least 4 argument registers, e.g. X86_64, ARMv7.
@@ -672,6 +985,15 @@ public:
swap(destB, destC);
}
+ void setupFourStubArgsGPR(GPRReg destA, GPRReg destB, GPRReg destC, GPRReg destD, GPRReg srcA, GPRReg srcB, GPRReg srcC, GPRReg srcD)
+ {
+ setupStubArgsGPR<4>({ { destA, destB, destC, destD } }, { { srcA, srcB, srcC, srcD } });
+ }
+ void setupFiveStubArgsGPR(GPRReg destA, GPRReg destB, GPRReg destC, GPRReg destD, GPRReg destE, GPRReg srcA, GPRReg srcB, GPRReg srcC, GPRReg srcD, GPRReg srcE)
+ {
+ setupStubArgsGPR<5>({ { destA, destB, destC, destD, destE } }, { { srcA, srcB, srcC, srcD, srcE } });
+ }
+
#if CPU(X86_64) || CPU(ARM64)
template<FPRReg destA, FPRReg destB>
void setupTwoStubArgsFPR(FPRReg srcA, FPRReg srcB)
@@ -730,12 +1052,6 @@ public:
setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
}
-#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64))
-#define POKE_ARGUMENT_OFFSET 4
-#else
-#define POKE_ARGUMENT_OFFSET 0
-#endif
-
#if CPU(X86_64) || CPU(ARM64)
ALWAYS_INLINE void setupArguments(FPRReg arg1)
{
@@ -749,14 +1065,41 @@ public:
ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
{
+#if OS(WINDOWS) && CPU(X86_64)
+ // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments.
+ // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
+ moveDouble(arg1, FPRInfo::argumentFPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+#else
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ move(arg2, GPRInfo::argumentGPR1);
+#endif
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2)
+ {
+#if OS(WINDOWS) && CPU(X86_64)
+ // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments.
+ // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
+ moveDouble(arg1, FPRInfo::argumentFPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+#else
moveDouble(arg1, FPRInfo::argumentFPR0);
move(arg2, GPRInfo::argumentGPR1);
+#endif
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
{
+#if OS(WINDOWS) && CPU(X86_64)
+ // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments.
+ // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
+ moveDouble(arg3, FPRInfo::argumentFPR3);
+#else
moveDouble(arg3, FPRInfo::argumentFPR0);
+#endif
setupStubArguments(arg1, arg2);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
@@ -790,6 +1133,13 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2)
+ {
+ moveDouble(arg1, FPRInfo::argumentFPR0);
+ move(arg2, GPRInfo::argumentGPR1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
{
moveDouble(arg3, FPRInfo::argumentFPR0);
@@ -830,6 +1180,13 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR3);
+ assembler().vmov(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, arg1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
{
setupStubArguments(arg1, arg2);
@@ -883,6 +1240,13 @@ public:
poke(arg2, 4);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, TrustedImm32 arg2)
+ {
+ assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg1);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ poke(arg2, 4);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
{
setupStubArguments(arg1, arg2);
@@ -890,46 +1254,15 @@ public:
poke(arg3, 4);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, FPRReg arg2, GPRReg arg3)
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32, FPRReg arg2, GPRReg arg3)
{
setupArgumentsWithExecState(arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, FPRReg arg4)
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32, FPRReg arg4)
{
setupArgumentsWithExecState(arg1, arg2, arg4);
}
-#elif CPU(SH4)
- ALWAYS_INLINE void setupArguments(FPRReg arg1)
- {
- moveDouble(arg1, FPRInfo::argumentFPR0);
- }
-
- ALWAYS_INLINE void setupArguments(FPRReg arg1, FPRReg arg2)
- {
- if (arg2 != FPRInfo::argumentFPR0) {
- moveDouble(arg1, FPRInfo::argumentFPR0);
- moveDouble(arg2, FPRInfo::argumentFPR1);
- } else if (arg1 != FPRInfo::argumentFPR1) {
- moveDouble(arg2, FPRInfo::argumentFPR1);
- moveDouble(arg1, FPRInfo::argumentFPR0);
- } else
- swapDouble(FPRInfo::argumentFPR0, FPRInfo::argumentFPR1);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
- {
- moveDouble(arg1, FPRInfo::argumentFPR0);
- move(arg2, GPRInfo::argumentGPR1);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
- {
- moveDouble(arg3, FPRInfo::argumentFPR0);
- setupStubArguments(arg1, arg2);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
#else
#error "JIT not supported on this platform."
#endif
@@ -939,6 +1272,12 @@ public:
move(arg1, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2)
+ {
+ move(arg2, GPRInfo::argumentGPR1);
+ move(arg1, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
{
setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
@@ -956,6 +1295,12 @@ public:
move(arg4, GPRInfo::argumentGPR3);
}
+ ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2, arg3);
+ move(arg4, GPRInfo::argumentGPR3);
+ }
+
ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImmPtr arg4)
{
setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR2>(arg1, arg3);
@@ -1000,6 +1345,14 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+#if OS(WINDOWS) && CPU(X86_64)
+ ALWAYS_INLINE void setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32 arg1)
+ {
+ move(arg1, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ }
+#endif
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
{
setupStubArguments(arg1, arg2);
@@ -1083,6 +1436,14 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, TrustedImm32 arg3)
{
move(arg1, GPRInfo::argumentGPR1);
@@ -1133,6 +1494,14 @@ public:
move(arg3, GPRInfo::argumentGPR3);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
{
@@ -1157,6 +1526,14 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3)
{
move(arg3, GPRInfo::argumentGPR3);
@@ -1165,6 +1542,14 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3)
{
move(arg2, GPRInfo::argumentGPR2);
@@ -1238,12 +1623,40 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
{
poke(arg4, POKE_ARGUMENT_OFFSET);
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
{
poke(arg4, POKE_ARGUMENT_OFFSET);
@@ -1268,12 +1681,55 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+#if CPU(X86_64)
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+#endif
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
{
poke(arg4, POKE_ARGUMENT_OFFSET);
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1294,6 +1750,12 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4, TrustedImm32 arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1356,6 +1818,30 @@ public:
poke(arg4, POKE_ARGUMENT_OFFSET);
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
{
@@ -1385,6 +1871,47 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
+ {
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1392,6 +1919,13 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1399,6 +1933,13 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4, TrustedImmPtr arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, TrustedImm32 arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1414,6 +1955,14 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
{
poke(arg6, POKE_ARGUMENT_OFFSET + 2);
@@ -1422,6 +1971,23 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1469,6 +2035,22 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6)
+ {
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7)
{
poke(arg7, POKE_ARGUMENT_OFFSET + 3);
@@ -1487,6 +2069,46 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6, TrustedImmPtr arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, TrustedImmPtr arg8)
+ {
+ poke(arg8, POKE_ARGUMENT_OFFSET + 4);
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, GPRReg arg8)
+ {
+ poke(arg8, POKE_ARGUMENT_OFFSET + 4);
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, GPRReg arg8, GPRReg arg9)
+ {
+ poke(arg9, POKE_ARGUMENT_OFFSET + 5);
+ poke(arg8, POKE_ARGUMENT_OFFSET + 4);
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7)
{
poke(arg7, POKE_ARGUMENT_OFFSET + 3);
@@ -1496,6 +2118,15 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7)
+ {
+ poke(arg7, POKE_ARGUMENT_OFFSET + 3);
+ poke(arg6, POKE_ARGUMENT_OFFSET + 2);
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET);
@@ -1503,14 +2134,41 @@ public:
move(arg3, GPRInfo::argumentGPR2);
move(arg4, GPRInfo::argumentGPR3);
}
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4, GPRReg arg5)
+ {
+ poke(arg5, POKE_ARGUMENT_OFFSET + 1);
+ poke(arg4, POKE_ARGUMENT_OFFSET);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
#if NUMBER_OF_ARGUMENT_REGISTERS >= 5
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
+ {
+ setupFourStubArgsGPR(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4, arg1, arg2, arg3, arg4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
void setupStubArguments134(GPRReg arg1, GPRReg arg3, GPRReg arg4)
{
setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4>(arg1, arg3, arg4);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
{
setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4>(arg1, arg4);
@@ -1519,6 +2177,36 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImmPtr arg4)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4, GPRInfo::argumentGPR5>(arg1, arg4, arg5);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
+ {
+ setupFiveStubArgsGPR(GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4, GPRInfo::argumentGPR5, arg1, arg2, arg3, arg4, arg5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
{
setupStubArguments134(arg1, arg3, arg4);
@@ -1534,6 +2222,76 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg2, arg4);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
+ {
+ setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(arg5, GPRInfo::argumentGPR5);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
+ move(arg4, GPRInfo::argumentGPR4);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg1, arg2, arg4);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
{
setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg3, arg4);
@@ -1549,6 +2307,69 @@ public:
}
#endif
+ void setupArgumentsWithExecState(JSValueRegs arg)
+ {
+#if USE(JSVALUE64)
+ setupArgumentsWithExecState(arg.gpr());
+#else
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg.payloadGPR(), arg.tagGPR());
+#endif
+ }
+
+ void setupArgumentsWithExecState(JSValueRegs arg1, JSValueRegs arg2)
+ {
+#if USE(JSVALUE64)
+ setupArgumentsWithExecState(arg1.gpr(), arg2.gpr());
+#else
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2.payloadGPR(), arg2.tagGPR());
+#endif
+ }
+
+ void setupArgumentsWithExecState(JSValueRegs arg1, TrustedImmPtr arg2)
+ {
+#if USE(JSVALUE64)
+ setupArgumentsWithExecState(arg1.gpr(), arg2);
+#else
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2);
+#endif
+ }
+
+ void setupArgumentsWithExecState(JSValueRegs arg1, JSValueRegs arg2, TrustedImmPtr arg3)
+ {
+#if USE(JSVALUE64)
+ setupArgumentsWithExecState(arg1.gpr(), arg2.gpr(), arg3);
+#else
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2.payloadGPR(), arg2.tagGPR(), arg3);
+#endif
+ }
+
+ void setupArgumentsWithExecState(JSValueRegs arg1, JSValueRegs arg2, TrustedImmPtr arg3, TrustedImmPtr arg4)
+ {
+#if USE(JSVALUE64)
+ setupArgumentsWithExecState(arg1.gpr(), arg2.gpr(), arg3, arg4);
+#else
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2.payloadGPR(), arg2.tagGPR(), arg3, arg4);
+#endif
+ }
+
+ void setupArgumentsWithExecState(JSValueRegs arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
+ {
+#if USE(JSVALUE64)
+ setupArgumentsWithExecState(arg1.gpr(), arg2, arg3);
+#else
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1.payloadGPR(), arg1.tagGPR(), arg2, arg3);
+#endif
+ }
+
+ void setupArguments(JSValueRegs arg1)
+ {
+#if USE(JSVALUE64)
+ setupArguments(arg1.gpr());
+#else
+ setupArguments(arg1.payloadGPR(), arg1.tagGPR());
+#endif
+ }
+
void setupResults(GPRReg destA, GPRReg destB)
{
GPRReg srcA = GPRInfo::returnValueGPR;
@@ -1570,18 +2391,218 @@ public:
swap(destA, destB);
}
+ void setupResults(JSValueRegs regs)
+ {
+#if USE(JSVALUE64)
+ move(GPRInfo::returnValueGPR, regs.gpr());
+#else
+ setupResults(regs.payloadGPR(), regs.tagGPR());
+#endif
+ }
+
void jumpToExceptionHandler()
{
- // genericUnwind() leaves the handler CallFrame* in vm->callFrameForThrow,
+ // genericUnwind() leaves the handler CallFrame* in vm->callFrameForCatch,
// and the address of the handler in vm->targetMachinePCForThrow.
loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1);
jump(GPRInfo::regT1);
}
+
+ void prepareForTailCallSlow(GPRReg calleeGPR = InvalidGPRReg)
+ {
+ GPRReg temp1 = calleeGPR == GPRInfo::regT0 ? GPRInfo::regT3 : GPRInfo::regT0;
+ GPRReg temp2 = calleeGPR == GPRInfo::regT1 ? GPRInfo::regT3 : GPRInfo::regT1;
+ GPRReg temp3 = calleeGPR == GPRInfo::regT2 ? GPRInfo::regT3 : GPRInfo::regT2;
+
+ GPRReg newFramePointer = temp1;
+ GPRReg newFrameSizeGPR = temp2;
+ {
+ // The old frame size is its number of arguments (or number of
+ // parameters in case of arity fixup), plus the frame header size,
+ // aligned
+ GPRReg oldFrameSizeGPR = temp2;
+ {
+ GPRReg argCountGPR = oldFrameSizeGPR;
+ load32(Address(framePointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), argCountGPR);
+
+ {
+ GPRReg numParametersGPR = temp1;
+ {
+ GPRReg codeBlockGPR = numParametersGPR;
+ loadPtr(Address(framePointerRegister, CallFrameSlot::codeBlock * static_cast<int>(sizeof(Register))), codeBlockGPR);
+ load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);
+ }
+
+ ASSERT(numParametersGPR != argCountGPR);
+ Jump argumentCountWasNotFixedUp = branch32(BelowOrEqual, numParametersGPR, argCountGPR);
+ move(numParametersGPR, argCountGPR);
+ argumentCountWasNotFixedUp.link(this);
+ }
+
+ add32(TrustedImm32(stackAlignmentRegisters() + CallFrame::headerSizeInRegisters - 1), argCountGPR, oldFrameSizeGPR);
+ and32(TrustedImm32(-stackAlignmentRegisters()), oldFrameSizeGPR);
+ // We assume < 2^28 arguments
+ mul32(TrustedImm32(sizeof(Register)), oldFrameSizeGPR, oldFrameSizeGPR);
+ }
+
+ // The new frame pointer is at framePointer + oldFrameSize - newFrameSize
+ ASSERT(newFramePointer != oldFrameSizeGPR);
+ addPtr(framePointerRegister, oldFrameSizeGPR, newFramePointer);
+
+ // The new frame size is just the number of arguments plus the
+ // frame header size, aligned
+ ASSERT(newFrameSizeGPR != newFramePointer);
+ load32(Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)),
+ newFrameSizeGPR);
+ add32(TrustedImm32(stackAlignmentRegisters() + CallFrame::headerSizeInRegisters - 1), newFrameSizeGPR);
+ and32(TrustedImm32(-stackAlignmentRegisters()), newFrameSizeGPR);
+ // We assume < 2^28 arguments
+ mul32(TrustedImm32(sizeof(Register)), newFrameSizeGPR, newFrameSizeGPR);
+ }
+
+ GPRReg tempGPR = temp3;
+ ASSERT(tempGPR != newFramePointer && tempGPR != newFrameSizeGPR);
+
+ // We don't need the current frame beyond this point. Masquerade as our
+ // caller.
+#if CPU(ARM) || CPU(ARM64)
+ loadPtr(Address(framePointerRegister, sizeof(void*)), linkRegister);
+ subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
+#elif CPU(MIPS)
+ loadPtr(Address(framePointerRegister, sizeof(void*)), returnAddressRegister);
+ subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
+#elif CPU(X86) || CPU(X86_64)
+ loadPtr(Address(framePointerRegister, sizeof(void*)), tempGPR);
+ push(tempGPR);
+ subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ subPtr(newFrameSizeGPR, newFramePointer);
+ loadPtr(Address(framePointerRegister), framePointerRegister);
+
+
+ // We need to move the newFrameSizeGPR slots above the stack pointer by
+ // newFramePointer registers. We use pointer-sized chunks.
+ MacroAssembler::Label copyLoop(label());
+
+ subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
+ loadPtr(BaseIndex(stackPointerRegister, newFrameSizeGPR, TimesOne), tempGPR);
+ storePtr(tempGPR, BaseIndex(newFramePointer, newFrameSizeGPR, TimesOne));
+
+ branchTest32(MacroAssembler::NonZero, newFrameSizeGPR).linkTo(copyLoop, this);
+
+ // Ready for a jump!
+ move(newFramePointer, stackPointerRegister);
+ }
+
+#if NUMBER_OF_ARGUMENT_REGISTERS >= 4
+ template<unsigned NumberOfRegisters>
+ void setupStubArgsGPR(std::array<GPRReg, NumberOfRegisters> destinations, std::array<GPRReg, NumberOfRegisters> sources)
+ {
+ if (!ASSERT_DISABLED) {
+ RegisterSet set;
+ for (GPRReg dest : destinations)
+ set.set(dest);
+ ASSERT_WITH_MESSAGE(set.numberOfSetGPRs() == NumberOfRegisters, "Destinations should not be aliased.");
+ }
+
+ typedef std::pair<GPRReg, GPRReg> RegPair;
+ Vector<RegPair, NumberOfRegisters> pairs;
+
+ for (unsigned i = 0; i < NumberOfRegisters; ++i) {
+ if (sources[i] != destinations[i])
+ pairs.append(std::make_pair(sources[i], destinations[i]));
+ }
+
+#if !ASSERT_DISABLED
+ auto numUniqueSources = [&] () -> unsigned {
+ RegisterSet set;
+ for (auto& pair : pairs) {
+ GPRReg source = pair.first;
+ set.set(source);
+ }
+ return set.numberOfSetGPRs();
+ };
+
+ auto numUniqueDests = [&] () -> unsigned {
+ RegisterSet set;
+ for (auto& pair : pairs) {
+ GPRReg dest = pair.second;
+ set.set(dest);
+ }
+ return set.numberOfSetGPRs();
+ };
+#endif
+
+ while (pairs.size()) {
+ RegisterSet freeDestinations;
+ for (auto& pair : pairs) {
+ GPRReg dest = pair.second;
+ freeDestinations.set(dest);
+ }
+ for (auto& pair : pairs) {
+ GPRReg source = pair.first;
+ freeDestinations.clear(source);
+ }
+
+ if (freeDestinations.numberOfSetGPRs()) {
+ bool madeMove = false;
+ for (unsigned i = 0; i < pairs.size(); i++) {
+ auto& pair = pairs[i];
+ GPRReg source = pair.first;
+ GPRReg dest = pair.second;
+ if (freeDestinations.get(dest)) {
+ move(source, dest);
+ pairs.remove(i);
+ madeMove = true;
+ break;
+ }
+ }
+ ASSERT_UNUSED(madeMove, madeMove);
+ continue;
+ }
+
+ ASSERT(numUniqueDests() == numUniqueSources());
+ ASSERT(numUniqueDests() == pairs.size());
+ // The set of source and destination registers are equivalent sets. This means we don't have
+ // any free destination registers that won't also clobber a source. We get around this by
+ // exchanging registers.
+
+ GPRReg source = pairs[0].first;
+ GPRReg dest = pairs[0].second;
+ swap(source, dest);
+ pairs.remove(0);
+
+ GPRReg newSource = source;
+ for (auto& pair : pairs) {
+ GPRReg source = pair.first;
+ if (source == dest) {
+ pair.first = newSource;
+ break;
+ }
+ }
+
+ // We may have introduced pairs that have the same source and destination. Remove those now.
+ for (unsigned i = 0; i < pairs.size(); i++) {
+ auto& pair = pairs[i];
+ if (pair.first == pair.second) {
+ pairs.remove(i);
+ i--;
+ }
+ }
+ }
+ }
+#endif // NUMBER_OF_ARGUMENT_REGISTERS >= 4
+
+ // These operations clobber all volatile registers. They assume that there is room on the top of
+ // stack to marshall call arguments.
+ void logShadowChickenProloguePacket(GPRReg shadowPacket, GPRReg scratch1, GPRReg scope);
+ void logShadowChickenTailPacket(GPRReg shadowPacket, JSValueRegs thisRegs, GPRReg scope, CodeBlock*, CallSiteIndex);
+ // Leaves behind a pointer to the Packet we should write to in shadowPacket.
+ void ensureShadowChickenPacket(GPRReg shadowPacket, GPRReg scratch1NonArgGPR, GPRReg scratch2);
};
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // CCallHelpers_h
-
diff --git a/Source/JavaScriptCore/jit/CachedRecovery.cpp b/Source/JavaScriptCore/jit/CachedRecovery.cpp
new file mode 100644
index 000000000..f4aacc6c8
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CachedRecovery.cpp
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CachedRecovery.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+// We prefer loading doubles and undetermined JSValues into FPRs
+// because it would otherwise use up GPRs. Two in JSVALUE32_64.
+bool CachedRecovery::loadsIntoFPR() const
+{
+ switch (recovery().technique()) {
+ case DoubleDisplacedInJSStack:
+ case DisplacedInJSStack:
+#if USE(JSVALUE64)
+ case CellDisplacedInJSStack:
+#endif
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+// Integers, booleans and cells can be loaded into GPRs
+bool CachedRecovery::loadsIntoGPR() const
+{
+ switch (recovery().technique()) {
+ case Int32DisplacedInJSStack:
+#if USE(JSVALUE64)
+ case Int52DisplacedInJSStack:
+ case StrictInt52DisplacedInJSStack:
+ case DisplacedInJSStack:
+#endif
+ case BooleanDisplacedInJSStack:
+ case CellDisplacedInJSStack:
+ return true;
+
+ default:
+ return false;
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CachedRecovery.h b/Source/JavaScriptCore/jit/CachedRecovery.h
new file mode 100644
index 000000000..f627ac901
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CachedRecovery.h
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "ValueRecovery.h"
+#include "VirtualRegister.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+// A CachedRecovery is a wrapper around a ValueRecovery that records where said
+// value should go on the stack and/or in registers. Whenever we perform an
+// operation changing the ValueRecovery, we update the CachedRecovery's member
+// in place.
+class CachedRecovery {
+public:
+ CachedRecovery(ValueRecovery recovery)
+ : m_recovery { recovery }
+ {
+ }
+
+ CachedRecovery(CachedRecovery&) = delete;
+ CachedRecovery(CachedRecovery&&) = delete;
+ CachedRecovery& operator=(CachedRecovery&) = delete;
+ CachedRecovery& operator=(CachedRecovery&&) = delete;
+
+ const Vector<VirtualRegister, 1>& targets() const { return m_targets; }
+
+ void addTarget(VirtualRegister reg)
+ {
+ ASSERT(m_targets.isEmpty() || m_targets.last() < reg);
+ m_targets.append(reg);
+ }
+
+ void removeTarget(VirtualRegister reg)
+ {
+ ASSERT_UNUSED(reg, m_targets.last() == reg);
+ m_targets.shrink(m_targets.size() - 1);
+ }
+
+ void clearTargets()
+ {
+ m_targets.clear();
+ }
+
+ void setWantedJSValueRegs(JSValueRegs jsValueRegs)
+ {
+ ASSERT(m_wantedFPR == InvalidFPRReg);
+ m_wantedJSValueRegs = jsValueRegs;
+ }
+
+ void setWantedFPR(FPRReg fpr)
+ {
+ ASSERT(!m_wantedJSValueRegs);
+ m_wantedFPR = fpr;
+ }
+
+ // Determine whether converting this recovery into a JSValue will
+ // require additional GPRs and/or FPRs.
+ // This is guaranteed to only depend on the DataFormat, and the
+ // result of these calls will stay valid after loads and/or stores.
+ bool boxingRequiresGPR() const
+ {
+#if USE(JSVALUE64)
+ return recovery().dataFormat() == DataFormatDouble;
+#else
+ return false;
+#endif
+ }
+ bool boxingRequiresFPR() const
+ {
+#if USE(JSVALUE64)
+ switch (recovery().dataFormat()) {
+ case DataFormatInt52:
+ case DataFormatStrictInt52:
+ return true;
+
+ default:
+ return false;
+ }
+#else
+ return false;
+#endif
+ }
+
+ // This is used to determine what kind of register we need to be
+ // able to load a recovery. We only use it when a direct load is
+ // currently impossible, to determine whether we should spill a
+ // GPR or an FPR for loading this value.
+ bool loadsIntoGPR() const;
+ bool loadsIntoFPR() const;
+
+ ValueRecovery recovery() const { return m_recovery; }
+
+ void setRecovery(ValueRecovery recovery) { m_recovery = recovery; }
+
+ JSValueRegs wantedJSValueRegs() const { return m_wantedJSValueRegs; }
+
+ FPRReg wantedFPR() const { return m_wantedFPR; }
+private:
+ ValueRecovery m_recovery;
+ JSValueRegs m_wantedJSValueRegs;
+ FPRReg m_wantedFPR { InvalidFPRReg };
+ Vector<VirtualRegister, 1> m_targets;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp
new file mode 100644
index 000000000..567202c15
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffleData.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+
+namespace JSC {
+
+#if USE(JSVALUE64)
+
+void CallFrameShuffleData::setupCalleeSaveRegisters(CodeBlock* codeBlock)
+{
+ RegisterSet calleeSaveRegisters { RegisterSet::vmCalleeSaveRegisters() };
+ RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
+
+ for (size_t i = 0; i < registerSaveLocations->size(); ++i) {
+ RegisterAtOffset entry { registerSaveLocations->at(i) };
+ if (!calleeSaveRegisters.get(entry.reg()))
+ continue;
+
+ VirtualRegister saveSlot { entry.offsetAsIndex() };
+ registers[entry.reg()]
+ = ValueRecovery::displacedInJSStack(saveSlot, DataFormatJS);
+ }
+
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (!calleeSaveRegisters.get(reg))
+ continue;
+
+ if (registers[reg])
+ continue;
+
+ registers[reg] = ValueRecovery::inRegister(reg, DataFormatJS);
+ }
+}
+
+#endif // USE(JSVALUE64)
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.h b/Source/JavaScriptCore/jit/CallFrameShuffleData.h
new file mode 100644
index 000000000..7e3ad5f52
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffleData.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "RegisterMap.h"
+#include "ValueRecovery.h"
+
+namespace JSC {
+
+struct CallFrameShuffleData {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ unsigned numLocals { UINT_MAX };
+ ValueRecovery callee;
+ Vector<ValueRecovery> args;
+ unsigned numPassedArgs { UINT_MAX };
+#if USE(JSVALUE64)
+ RegisterMap<ValueRecovery> registers;
+ GPRReg tagTypeNumber { InvalidGPRReg };
+
+ void setupCalleeSaveRegisters(CodeBlock*);
+#endif
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp
new file mode 100644
index 000000000..ffbc7e6b0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp
@@ -0,0 +1,776 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffler.h"
+
+#if ENABLE(JIT)
+
+#include "CachedRecovery.h"
+#include "CCallHelpers.h"
+#include "CodeBlock.h"
+
+namespace JSC {
+
+CallFrameShuffler::CallFrameShuffler(CCallHelpers& jit, const CallFrameShuffleData& data)
+ : m_jit(jit)
+ , m_oldFrame(data.numLocals + CallerFrameAndPC::sizeInRegisters, nullptr)
+ , m_newFrame(data.args.size() + CallFrame::headerSizeInRegisters, nullptr)
+ , m_alignedOldFrameSize(CallFrame::headerSizeInRegisters
+ + roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters()))
+ , m_alignedNewFrameSize(CallFrame::headerSizeInRegisters
+ + roundArgumentCountToAlignFrame(data.args.size()))
+ , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize)
+ , m_lockedRegisters(RegisterSet::allRegisters())
+ , m_numPassedArgs(data.numPassedArgs)
+{
+ // We are allowed all the usual registers...
+ for (unsigned i = GPRInfo::numberOfRegisters; i--; )
+ m_lockedRegisters.clear(GPRInfo::toRegister(i));
+ for (unsigned i = FPRInfo::numberOfRegisters; i--; )
+ m_lockedRegisters.clear(FPRInfo::toRegister(i));
+ // ... as well as the runtime registers.
+ m_lockedRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
+
+ ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal());
+ addNew(VirtualRegister(CallFrameSlot::callee), data.callee);
+
+ for (size_t i = 0; i < data.args.size(); ++i) {
+ ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal());
+ addNew(virtualRegisterForArgument(i), data.args[i]);
+ }
+
+#if USE(JSVALUE64)
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (!data.registers[reg].isSet())
+ continue;
+
+ if (reg.isGPR())
+ addNew(JSValueRegs(reg.gpr()), data.registers[reg]);
+ else
+ addNew(reg.fpr(), data.registers[reg]);
+ }
+
+ m_tagTypeNumber = data.tagTypeNumber;
+ if (m_tagTypeNumber != InvalidGPRReg)
+ lockGPR(m_tagTypeNumber);
+#endif
+}
+
+void CallFrameShuffler::dump(PrintStream& out) const
+{
+ static const char* delimiter = " +-------------------------------+ ";
+ static const char* dangerDelimiter = " X-------------------------------X ";
+ static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
+ static const char* emptySpace = " ";
+ out.print(" ");
+ out.print(" Old frame ");
+ out.print(" New frame ");
+ out.print("\n");
+ int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3;
+ for (int i = 0; i < totalSize; ++i) {
+ VirtualRegister old { m_alignedOldFrameSize - i - 1 };
+ VirtualRegister newReg { old + m_frameDelta };
+
+ if (!isValidOld(old) && old != firstOld() - 1
+ && !isValidNew(newReg) && newReg != firstNew() - 1)
+ continue;
+
+ out.print(" ");
+ if (dangerFrontier() >= firstNew()
+ && (newReg == dangerFrontier() || newReg == firstNew() - 1))
+ out.print(dangerBoundsDelimiter);
+ else if (isValidOld(old))
+ out.print(isValidNew(newReg) && isDangerNew(newReg) ? dangerDelimiter : delimiter);
+ else if (old == firstOld() - 1)
+ out.print(delimiter);
+ else
+ out.print(emptySpace);
+ if (dangerFrontier() >= firstNew()
+ && (newReg == dangerFrontier() || newReg == firstNew() - 1))
+ out.print(dangerBoundsDelimiter);
+ else if (isValidNew(newReg) || newReg == firstNew() - 1)
+ out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter);
+ else
+ out.print(emptySpace);
+ out.print("\n");
+ if (old == firstOld())
+ out.print(" sp --> ");
+ else if (!old.offset())
+ out.print(" fp --> ");
+ else
+ out.print(" ");
+ if (isValidOld(old)) {
+ if (getOld(old)) {
+ auto str = toCString(old);
+ if (isValidNew(newReg) && isDangerNew(newReg))
+ out.printf(" X %18s X ", str.data());
+ else
+ out.printf(" | %18s | ", str.data());
+ } else if (isValidNew(newReg) && isDangerNew(newReg))
+ out.printf(" X%30s X ", "");
+ else
+ out.printf(" |%30s | ", "");
+ } else
+ out.print(emptySpace);
+ if (isValidNew(newReg)) {
+ const char d = isDangerNew(newReg) ? 'X' : '|';
+ auto str = toCString(newReg);
+ if (getNew(newReg)) {
+ if (getNew(newReg)->recovery().isConstant())
+ out.printf(" %c%8s <- constant %c ", d, str.data(), d);
+ else {
+ auto recoveryStr = toCString(getNew(newReg)->recovery());
+ out.printf(" %c%8s <- %18s %c ", d, str.data(),
+ recoveryStr.data(), d);
+ }
+ } else if (newReg == VirtualRegister { CallFrameSlot::argumentCount })
+ out.printf(" %c%8s <- %18zu %c ", d, str.data(), argCount(), d);
+ else
+ out.printf(" %c%30s %c ", d, "", d);
+ } else
+ out.print(emptySpace);
+ if (newReg == firstNew() - m_newFrameOffset && !isSlowPath())
+ out.print(" <-- new sp before jump (current ", m_newFrameBase, ") ");
+ if (newReg == firstNew())
+ out.print(" <-- new fp after prologue");
+ out.print("\n");
+ }
+ out.print(" ");
+ out.print(" Live registers ");
+ out.print(" Wanted registers ");
+ out.print("\n");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* oldCachedRecovery { m_registers[reg] };
+ CachedRecovery* newCachedRecovery { m_newRegisters[reg] };
+ if (!oldCachedRecovery && !newCachedRecovery)
+ continue;
+ out.print(" ");
+ if (oldCachedRecovery) {
+ auto str = toCString(reg);
+ out.printf(" %8s ", str.data());
+ } else
+ out.print(emptySpace);
+#if USE(JSVALUE32_64)
+ if (newCachedRecovery) {
+ JSValueRegs wantedJSValueRegs { newCachedRecovery->wantedJSValueRegs() };
+ if (reg.isFPR())
+ out.print(reg, " <- ", newCachedRecovery->recovery());
+ else {
+ if (reg.gpr() == wantedJSValueRegs.tagGPR())
+ out.print(reg.gpr(), " <- tag(", newCachedRecovery->recovery(), ")");
+ else
+ out.print(reg.gpr(), " <- payload(", newCachedRecovery->recovery(), ")");
+ }
+ }
+#else
+ if (newCachedRecovery)
+ out.print(" ", reg, " <- ", newCachedRecovery->recovery());
+#endif
+ out.print("\n");
+ }
+ out.print(" Locked registers: ");
+ bool firstLocked { true };
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (m_lockedRegisters.get(reg)) {
+ out.print(firstLocked ? "" : ", ", reg);
+ firstLocked = false;
+ }
+ }
+ out.print("\n");
+
+ if (isSlowPath())
+ out.print(" Using fp-relative addressing for slow path call\n");
+ else
+ out.print(" Using sp-relative addressing for jump (using ", m_newFrameBase, " as new sp)\n");
+ if (m_oldFrameOffset)
+ out.print(" Old frame offset is ", m_oldFrameOffset, "\n");
+ if (m_newFrameOffset)
+ out.print(" New frame offset is ", m_newFrameOffset, "\n");
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg)
+ out.print(" TagTypeNumber is currently in ", m_tagTypeNumber, "\n");
+#endif
+}
+
+CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery)
+{
+ ASSERT(!recovery.isConstant());
+ if (recovery.isInGPR())
+ return m_registers[recovery.gpr()];
+ if (recovery.isInFPR())
+ return m_registers[recovery.fpr()];
+#if USE(JSVALUE32_64)
+ if (recovery.technique() == InPair) {
+ ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]);
+ return m_registers[recovery.payloadGPR()];
+ }
+#endif
+ ASSERT(recovery.isInJSStack());
+ return getOld(recovery.virtualRegister());
+}
+
+CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery)
+{
+ ASSERT(!recovery.isConstant());
+ if (recovery.isInGPR())
+ return m_registers[recovery.gpr()] = cachedRecovery;
+ if (recovery.isInFPR())
+ return m_registers[recovery.fpr()] = cachedRecovery;
+#if USE(JSVALUE32_64)
+ if (recovery.technique() == InPair) {
+ m_registers[recovery.tagGPR()] = cachedRecovery;
+ return m_registers[recovery.payloadGPR()] = cachedRecovery;
+ }
+#endif
+ ASSERT(recovery.isInJSStack());
+ setOld(recovery.virtualRegister(), cachedRecovery);
+ return cachedRecovery;
+}
+
+void CallFrameShuffler::spill(CachedRecovery& cachedRecovery)
+{
+ ASSERT(!isSlowPath());
+ ASSERT(cachedRecovery.recovery().isInRegisters());
+
+ VirtualRegister spillSlot { 0 };
+ for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) {
+ if (slot >= newAsOld(firstNew()))
+ break;
+
+ if (getOld(slot))
+ continue;
+
+ spillSlot = slot;
+ break;
+ }
+ // We must have enough slots to be able to fit the whole callee's
+ // frame for the slow path - unless we are in the FTL. In that
+ // case, we are allowed to extend the frame *once*, since we are
+ // guaranteed to have enough available space for that.
+ if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) {
+ RELEASE_ASSERT(!m_didExtendFrame);
+ extendFrameIfNeeded();
+ spill(cachedRecovery);
+ return;
+ }
+
+ if (verbose)
+ dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n");
+ auto format = emitStore(cachedRecovery, addressForOld(spillSlot));
+ ASSERT(format != DataFormatNone);
+ updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format));
+}
+
+void CallFrameShuffler::emitDeltaCheck()
+{
+ if (ASSERT_DISABLED)
+ return;
+
+ GPRReg scratchGPR { getFreeGPR() };
+ if (scratchGPR != InvalidGPRReg) {
+ if (verbose)
+ dataLog(" Using ", scratchGPR, " for the fp-sp delta check\n");
+ m_jit.move(MacroAssembler::stackPointerRegister, scratchGPR);
+ m_jit.subPtr(GPRInfo::callFrameRegister, scratchGPR);
+ MacroAssembler::Jump ok = m_jit.branch32(
+ MacroAssembler::Equal, scratchGPR,
+ MacroAssembler::TrustedImm32(-numLocals() * sizeof(Register)));
+ m_jit.abortWithReason(JITUnexpectedCallFrameSize);
+ ok.link(&m_jit);
+ } else if (verbose)
+ dataLog(" Skipping the fp-sp delta check since there is too much pressure");
+}
+
+void CallFrameShuffler::extendFrameIfNeeded()
+{
+ ASSERT(!m_didExtendFrame);
+
+ VirtualRegister firstRead { firstOld() };
+ for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) {
+ if (getOld(firstRead))
+ break;
+ }
+ size_t availableSize = static_cast<size_t>(firstRead.offset() - firstOld().offset());
+ size_t wantedSize = m_newFrame.size() + m_newFrameOffset;
+
+ if (availableSize < wantedSize) {
+ size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize);
+ m_oldFrame.grow(m_oldFrame.size() + delta);
+ for (size_t i = 0; i < delta; ++i)
+ m_oldFrame[m_oldFrame.size() - i - 1] = nullptr;
+ m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister);
+
+ if (isSlowPath())
+ m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters;
+ else
+ m_oldFrameOffset = numLocals();
+
+ if (verbose)
+ dataLogF(" Not enough space - extending the old frame %zu slot\n", delta);
+ }
+
+ m_didExtendFrame = true;
+}
+
+void CallFrameShuffler::prepareForSlowPath()
+{
+ ASSERT(isUndecided());
+ emitDeltaCheck();
+
+ m_frameDelta = numLocals() + CallerFrameAndPC::sizeInRegisters;
+ m_newFrameBase = MacroAssembler::stackPointerRegister;
+ m_newFrameOffset = -CallerFrameAndPC::sizeInRegisters;
+
+ if (verbose)
+ dataLog("\n\nPreparing frame for slow path call:\n");
+
+ // When coming from the FTL, we need to extend the frame. In other
+ // cases, we may end up extending the frame if we previously
+ // spilled things (e.g. in polymorphic cache).
+ extendFrameIfNeeded();
+
+ if (verbose)
+ dataLog(*this);
+
+ prepareAny();
+
+ if (verbose)
+ dataLog("Ready for slow path call!\n");
+}
+
+void CallFrameShuffler::prepareForTailCall()
+{
+ ASSERT(isUndecided());
+ emitDeltaCheck();
+
+ // We'll use sp-based indexing so that we can load the
+ // caller's frame pointer into the fpr immediately
+ m_oldFrameBase = MacroAssembler::stackPointerRegister;
+ m_oldFrameOffset = numLocals();
+ m_newFrameBase = acquireGPR();
+#if CPU(X86)
+ // We load the frame pointer manually, but we need to ask the
+ // algorithm to move the return PC for us (it'd probably
+ // require a write to the danger zone). Since it'd be awkward
+ // to ask for half a value move, we ask that the whole thing
+ // be moved for us.
+ addNew(VirtualRegister { 0 },
+ ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS));
+
+ // sp will point to head0 and we will move it up half a slot
+ // manually
+ m_newFrameOffset = 0;
+#elif CPU(ARM) || CPU(MIPS)
+ // We load the the frame pointer and link register
+ // manually. We could ask the algorithm to load them for us,
+ // and it would allow us to use the link register as an extra
+ // temporary - but it'd mean that the frame pointer can also
+ // be used as an extra temporary, so we keep the link register
+ // locked instead.
+
+ // sp will point to head1 since the callee's prologue pushes
+ // the call frame and link register.
+ m_newFrameOffset = -1;
+#elif CPU(ARM64)
+ // We load the frame pointer and link register manually. We
+ // could ask the algorithm to load the link register for us
+ // (which would allow for its use as an extra temporary), but
+ // since its not in GPRInfo, we can't do it.
+
+ // sp will point to head2 since the callee's prologue pushes the
+ // call frame and link register
+ m_newFrameOffset = -2;
+#elif CPU(X86_64)
+ // We load the frame pointer manually, but we ask the
+ // algorithm to move the return PC for us (it'd probably
+ // require a write in the danger zone)
+ addNew(VirtualRegister { 1 },
+ ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS));
+
+ // sp will point to head1 since the callee's prologue pushes
+ // the call frame register
+ m_newFrameOffset = -1;
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+
+ if (verbose)
+ dataLog(" Emitting code for computing the new frame base\n");
+
+ // We compute the new frame base by first computing the top of the
+ // old frame (taking into account an argument count higher than
+ // the number of parameters), then substracting to it the aligned
+ // new frame size (adjusted).
+ m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), m_newFrameBase);
+ MacroAssembler::Jump argumentCountOK =
+ m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase,
+ MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters()));
+ m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + CallFrame::headerSizeInRegisters), m_newFrameBase);
+ m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase);
+ m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase);
+ MacroAssembler::Jump done = m_jit.jump();
+ argumentCountOK.link(&m_jit);
+ m_jit.move(
+ MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)),
+ m_newFrameBase);
+ done.link(&m_jit);
+
+ m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase);
+ m_jit.subPtr(
+ MacroAssembler::TrustedImm32(
+ (m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)),
+ m_newFrameBase);
+
+ // We load the link register manually for architectures that have one
+#if CPU(ARM) || CPU(ARM64)
+ m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
+ MacroAssembler::linkRegister);
+#elif CPU(MIPS)
+ m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
+ MacroAssembler::returnAddressRegister);
+#endif
+
+ // We want the frame pointer to always point to a valid frame, and
+ // we are going to trash the current one. Let's make it point to
+ // our caller's frame, since that's what we want to end up with.
+ m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister),
+ MacroAssembler::framePointerRegister);
+
+ if (verbose)
+ dataLog("Preparing frame for tail call:\n", *this);
+
+ prepareAny();
+
+#if CPU(X86)
+ if (verbose)
+ dataLog(" Simulating pop of the call frame register\n");
+ m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister);
+#endif
+
+ if (verbose)
+ dataLog("Ready for tail call!\n");
+}
+
+bool CallFrameShuffler::tryWrites(CachedRecovery& cachedRecovery)
+{
+ ASSERT(m_newFrameBase != InvalidGPRReg);
+
+ // If the value is already set up correctly, we don't have
+ // anything to do.
+ if (isSlowPath() && cachedRecovery.recovery().isInJSStack()
+ && cachedRecovery.targets().size() == 1
+ && newAsOld(cachedRecovery.targets()[0]) == cachedRecovery.recovery().virtualRegister()) {
+ cachedRecovery.clearTargets();
+ if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
+ clearCachedRecovery(cachedRecovery.recovery());
+ return true;
+ }
+
+ if (!canLoadAndBox(cachedRecovery))
+ return false;
+
+ emitLoad(cachedRecovery);
+ emitBox(cachedRecovery);
+ ASSERT(cachedRecovery.recovery().isInRegisters()
+ || cachedRecovery.recovery().isConstant());
+
+ if (verbose)
+ dataLog(" * Storing ", cachedRecovery.recovery());
+ for (size_t i = 0; i < cachedRecovery.targets().size(); ++i) {
+ VirtualRegister target { cachedRecovery.targets()[i] };
+ ASSERT(!isDangerNew(target));
+ if (verbose)
+ dataLog(!i ? " into " : ", and ", "NEW ", target);
+ emitStore(cachedRecovery, addressForNew(target));
+ setNew(target, nullptr);
+ }
+ if (verbose)
+ dataLog("\n");
+ cachedRecovery.clearTargets();
+ if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
+ clearCachedRecovery(cachedRecovery.recovery());
+
+ return true;
+}
+
+bool CallFrameShuffler::performSafeWrites()
+{
+ VirtualRegister firstSafe;
+ VirtualRegister end { lastNew() + 1 };
+ Vector<VirtualRegister> failures;
+
+ // For all cachedRecoveries that writes to the safe zone, if it
+ // doesn't also write to the danger zone, we try to perform
+ // the writes. This may free up danger slots, so we iterate
+ // again until it doesn't happen anymore.
+ //
+ // Note that even though we have a while block, we look at
+ // each slot of the new call frame at most once since in each
+ // iteration beyond the first, we only load up the portion of
+ // the new call frame that was dangerous and became safe due
+ // to the previous iteration.
+ do {
+ firstSafe = dangerFrontier() + 1;
+ if (verbose)
+ dataLog(" Trying safe writes (between NEW ", firstSafe, " and NEW ", end - 1, ")\n");
+ bool didProgress = false;
+ for (VirtualRegister reg = firstSafe; reg < end; reg += 1) {
+ CachedRecovery* cachedRecovery = getNew(reg);
+ if (!cachedRecovery) {
+ if (verbose)
+ dataLog(" + ", reg, " is OK.\n");
+ continue;
+ }
+ if (!hasOnlySafeWrites(*cachedRecovery)) {
+ if (verbose) {
+ dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
+ " but also has dangerous writes.\n");
+ }
+ continue;
+ }
+ if (cachedRecovery->wantedJSValueRegs()) {
+ if (verbose) {
+ dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
+ " but is also needed in registers.\n");
+ }
+ continue;
+ }
+ if (cachedRecovery->wantedFPR() != InvalidFPRReg) {
+ if (verbose) {
+ dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
+ " but is also needed in an FPR.\n");
+ }
+ continue;
+ }
+ if (!tryWrites(*cachedRecovery)) {
+ if (verbose)
+ dataLog(" - Unable to write to NEW ", reg, " from ", cachedRecovery->recovery(), "\n");
+ failures.append(reg);
+ }
+ didProgress = true;
+ }
+ end = firstSafe;
+
+ // If we have cachedRecoveries that failed to write, it is
+ // because they are on the stack and we didn't have enough
+ // registers available at the time to load them into. If
+ // we have a free register, we should try again because it
+ // could free up some danger slots.
+ if (didProgress && hasFreeRegister()) {
+ Vector<VirtualRegister> stillFailing;
+ for (VirtualRegister failed : failures) {
+ CachedRecovery* cachedRecovery = getNew(failed);
+ // It could have been handled later if it had
+ // several targets
+ if (!cachedRecovery)
+ continue;
+
+ ASSERT(hasOnlySafeWrites(*cachedRecovery)
+ && !cachedRecovery->wantedJSValueRegs()
+ && cachedRecovery->wantedFPR() == InvalidFPRReg);
+ if (!tryWrites(*cachedRecovery))
+ stillFailing.append(failed);
+ }
+ failures = WTFMove(stillFailing);
+ }
+ if (verbose && firstSafe != dangerFrontier() + 1)
+ dataLog(" We freed up danger slots!\n");
+ } while (firstSafe != dangerFrontier() + 1);
+
+ return failures.isEmpty();
+}
+
+void CallFrameShuffler::prepareAny()
+{
+ ASSERT(!isUndecided());
+
+ updateDangerFrontier();
+
+ // First, we try to store any value that goes above the danger
+ // frontier. This will never use more registers since we are only
+ // loading+storing if we ensure that any register used for the load
+ // will be freed up after the stores (i.e., all stores are above
+ // the danger frontier, and there is no wanted register).
+ performSafeWrites();
+
+ // At this point, we couldn't have more available registers than
+ // we have withouth spilling: all values currently in registers
+ // either require a write to the danger zone, or have a wanted
+ // register, which means that in any case they will have to go
+ // through registers again.
+
+ // We now slowly free up the danger zone by first loading the old
+ // value on the danger frontier, spilling as many registers as
+ // needed to do so and ensuring that the corresponding slot in the
+ // new frame is now ready to be written. Then, we store the old
+ // value to its target location if possible (we could have failed
+ // to load it previously due to high pressure). Finally, we write
+ // to any of the newly safe slots that we can, which could free up
+ // registers (hence why we do it eagerly).
+ for (VirtualRegister reg = dangerFrontier(); reg >= firstNew(); reg -= 1) {
+ if (reg == dangerFrontier()) {
+ if (verbose)
+ dataLog(" Next slot (NEW ", reg, ") is the danger frontier\n");
+ CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) };
+ ASSERT(cachedRecovery);
+ ensureLoad(*cachedRecovery);
+ emitLoad(*cachedRecovery);
+ ensureBox(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ if (hasOnlySafeWrites(*cachedRecovery))
+ tryWrites(*cachedRecovery);
+ } else if (verbose)
+ dataLog(" Next slot is NEW ", reg, "\n");
+
+ ASSERT(!isDangerNew(reg));
+ CachedRecovery* cachedRecovery = getNew(reg);
+ // This could be one of the header slots we don't care about.
+ if (!cachedRecovery) {
+ if (verbose)
+ dataLog(" + ", reg, " is OK\n");
+ continue;
+ }
+
+ if (canLoadAndBox(*cachedRecovery) && hasOnlySafeWrites(*cachedRecovery)
+ && !cachedRecovery->wantedJSValueRegs()
+ && cachedRecovery->wantedFPR() == InvalidFPRReg) {
+ emitLoad(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ bool writesOK = tryWrites(*cachedRecovery);
+ ASSERT_UNUSED(writesOK, writesOK);
+ } else if (verbose)
+ dataLog(" - ", cachedRecovery->recovery(), " can't be handled just yet.\n");
+ }
+ ASSERT(dangerFrontier() < firstNew());
+
+ // Now, the danger zone is empty, but we still have a couple of
+ // things to do:
+ //
+ // 1) There could be remaining safe writes that failed earlier due
+ // to high register pressure and had nothing to do with the
+ // danger zone whatsoever.
+ //
+ // 2) Some wanted registers could have to be loaded (this could
+ // happen either when making a call to a new function with a
+ // lower number of arguments - since above here, we only load
+ // wanted registers when they are at the danger frontier -, or
+ // if a wanted register got spilled).
+ //
+ // 3) Some wanted registers could have been loaded in the wrong
+ // registers
+ //
+ // 4) We have to take care of some bookkeeping - namely, storing
+ // the argument count and updating the stack pointer.
+
+ // At this point, we must have enough registers available for
+ // handling 1). None of the loads can fail because we have been
+ // eagerly freeing up registers in all the previous phases - so
+ // the only values that are in registers at this point must have
+ // wanted registers.
+ if (verbose)
+ dataLog(" Danger zone is clear, performing remaining writes.\n");
+ for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
+ CachedRecovery* cachedRecovery { getNew(reg) };
+ if (!cachedRecovery)
+ continue;
+
+ emitLoad(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ bool writesOK = tryWrites(*cachedRecovery);
+ ASSERT_UNUSED(writesOK, writesOK);
+ }
+
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg && m_newRegisters[m_tagTypeNumber])
+ releaseGPR(m_tagTypeNumber);
+#endif
+
+ // Handle 2) by loading all registers. We don't have to do any
+ // writes, since they have been taken care of above.
+ if (verbose)
+ dataLog(" Loading wanted registers into registers\n");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+ emitLoad(*cachedRecovery);
+ emitBox(*cachedRecovery);
+ ASSERT(cachedRecovery->targets().isEmpty());
+ }
+
+#if USE(JSVALUE64)
+ if (m_tagTypeNumber != InvalidGPRReg)
+ releaseGPR(m_tagTypeNumber);
+#endif
+
+ // At this point, we have read everything we cared about from the
+ // stack, and written everything we had to to the stack.
+ if (verbose)
+ dataLog(" Callee frame is fully set up\n");
+ if (!ASSERT_DISABLED) {
+ for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1)
+ ASSERT_UNUSED(reg, !getNew(reg));
+
+ for (CachedRecovery* cachedRecovery : m_cachedRecoveries) {
+ ASSERT_UNUSED(cachedRecovery, cachedRecovery->targets().isEmpty());
+ ASSERT(!cachedRecovery->recovery().isInJSStack());
+ }
+ }
+
+ // We need to handle 4) first because it implies releasing
+ // m_newFrameBase, which could be a wanted register.
+ if (verbose)
+ dataLog(" * Storing the argument count into ", VirtualRegister { CallFrameSlot::argumentCount }, "\n");
+ m_jit.store32(MacroAssembler::TrustedImm32(0),
+ addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(TagOffset));
+ RELEASE_ASSERT(m_numPassedArgs != UINT_MAX);
+ m_jit.store32(MacroAssembler::TrustedImm32(m_numPassedArgs),
+ addressForNew(VirtualRegister { CallFrameSlot::argumentCount }).withOffset(PayloadOffset));
+
+ if (!isSlowPath()) {
+ ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister);
+ if (verbose)
+ dataLog(" Releasing the new frame base pointer\n");
+ m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister);
+ releaseGPR(m_newFrameBase);
+ }
+
+ // Finally we handle 3)
+ if (verbose)
+ dataLog(" Ensuring wanted registers are in the right register\n");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+ emitDisplace(*cachedRecovery);
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.h b/Source/JavaScriptCore/jit/CallFrameShuffler.h
new file mode 100644
index 000000000..6c0ea33f0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler.h
@@ -0,0 +1,804 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CachedRecovery.h"
+#include "CallFrameShuffleData.h"
+#include "MacroAssembler.h"
+#include "RegisterSet.h"
+#include "StackAlignment.h"
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CallFrameShuffler {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ CallFrameShuffler(CCallHelpers&, const CallFrameShuffleData&);
+
+ void dump(PrintStream&) const;
+
+ // Any register that has been locked or acquired must be released
+ // before calling prepareForTailCall() or prepareForSlowPath().
+ void lockGPR(GPRReg gpr)
+ {
+ ASSERT(!m_lockedRegisters.get(gpr));
+ m_lockedRegisters.set(gpr);
+ if (verbose)
+ dataLog(" * Locking ", gpr, "\n");
+ }
+
+ GPRReg acquireGPR()
+ {
+ ensureGPR();
+ GPRReg gpr { getFreeGPR() };
+ ASSERT(!m_registers[gpr]);
+ lockGPR(gpr);
+ return gpr;
+ }
+
+ void releaseGPR(GPRReg gpr)
+ {
+ if (verbose) {
+ if (m_lockedRegisters.get(gpr))
+ dataLog(" * Releasing ", gpr, "\n");
+ else
+ dataLog(" * ", gpr, " was not locked\n");
+ }
+ m_lockedRegisters.clear(gpr);
+ }
+
+ void restoreGPR(GPRReg gpr)
+ {
+ if (!m_newRegisters[gpr])
+ return;
+
+ ensureGPR();
+#if USE(JSVALUE32_64)
+ GPRReg tempGPR { getFreeGPR() };
+ lockGPR(tempGPR);
+ ensureGPR();
+ releaseGPR(tempGPR);
+#endif
+ emitDisplace(*m_newRegisters[gpr]);
+ }
+
+ // You can only take a snapshot if the recovery has not started
+ // yet. The only operations that are valid before taking a
+ // snapshot are lockGPR(), acquireGPR() and releaseGPR().
+ //
+ // Locking status is *NOT* preserved by the snapshot: it only
+ // contains information about where the
+ // arguments/callee/callee-save registers are by taking into
+ // account any spilling that acquireGPR() could have done.
+ CallFrameShuffleData snapshot() const
+ {
+ ASSERT(isUndecided());
+
+ CallFrameShuffleData data;
+ data.numLocals = numLocals();
+ data.numPassedArgs = m_numPassedArgs;
+ data.callee = getNew(VirtualRegister { CallFrameSlot::callee })->recovery();
+ data.args.resize(argCount());
+ for (size_t i = 0; i < argCount(); ++i)
+ data.args[i] = getNew(virtualRegisterForArgument(i))->recovery();
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+#if USE(JSVALUE64)
+ data.registers[reg] = cachedRecovery->recovery();
+#else
+ RELEASE_ASSERT_NOT_REACHED();
+#endif
+ }
+ return data;
+ }
+
+ // Ask the shuffler to put the callee into some registers once the
+ // shuffling is done. You should call this before any of the
+ // prepare() methods, and must not take a snapshot afterwards, as
+ // this would crash 32bits platforms.
+ void setCalleeJSValueRegs(JSValueRegs jsValueRegs)
+ {
+ ASSERT(isUndecided());
+ ASSERT(!getNew(jsValueRegs));
+ CachedRecovery* cachedRecovery { getNew(VirtualRegister(CallFrameSlot::callee)) };
+ ASSERT(cachedRecovery);
+ addNew(jsValueRegs, cachedRecovery->recovery());
+ }
+
+ // Ask the suhffler to assume the callee has already be checked to
+ // be a cell. This is a no-op on 64bit platforms, but allows to
+ // free up a GPR on 32bit platforms.
+ // You obviously must have ensured that this is the case before
+ // running any of the prepare methods.
+ void assumeCalleeIsCell()
+ {
+#if USE(JSVALUE32_64)
+ CachedRecovery& calleeCachedRecovery = *getNew(VirtualRegister(CallFrameSlot::callee));
+ switch (calleeCachedRecovery.recovery().technique()) {
+ case InPair:
+ updateRecovery(
+ calleeCachedRecovery,
+ ValueRecovery::inGPR(
+ calleeCachedRecovery.recovery().payloadGPR(),
+ DataFormatCell));
+ break;
+ case DisplacedInJSStack:
+ updateRecovery(
+ calleeCachedRecovery,
+ ValueRecovery::displacedInJSStack(
+ calleeCachedRecovery.recovery().virtualRegister(),
+ DataFormatCell));
+ break;
+ case InFPR:
+ case UnboxedCellInGPR:
+ case CellDisplacedInJSStack:
+ break;
+ case Constant:
+ ASSERT(calleeCachedRecovery.recovery().constant().isCell());
+ break;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+#endif
+ }
+
+ // This will emit code to build the new frame over the old one.
+ void prepareForTailCall();
+
+ // This will emit code to build the new frame as if performing a
+ // regular call. However, the callee save registers will be
+ // restored, and any locals (not the header or arguments) of the
+ // current frame can be overwritten.
+ //
+ // A frame built using prepareForSlowPath() should be used either
+ // to throw an exception in, or destroyed using
+ // CCallHelpers::prepareForTailCallSlow() followed by a tail call.
+ void prepareForSlowPath();
+
+private:
+ static const bool verbose = false;
+
+ CCallHelpers& m_jit;
+
+ void prepareAny();
+
+ void spill(CachedRecovery&);
+
+ // "box" is arguably a bad name here. The meaning is that after
+ // calling emitBox(), your ensure that subsequently calling
+ // emitStore() will be able to store the value without additional
+ // transformation. In particular, this is a no-op for constants,
+ // and is a complete no-op on 32bits since any unboxed value can
+ // still be stored by storing the payload and a statically known
+ // tag.
+ void emitBox(CachedRecovery&);
+
+ bool canBox(CachedRecovery& cachedRecovery)
+ {
+ if (cachedRecovery.boxingRequiresGPR() && getFreeGPR() == InvalidGPRReg)
+ return false;
+
+ if (cachedRecovery.boxingRequiresFPR() && getFreeFPR() == InvalidFPRReg)
+ return false;
+
+ return true;
+ }
+
+ void ensureBox(CachedRecovery& cachedRecovery)
+ {
+ if (canBox(cachedRecovery))
+ return;
+
+ if (cachedRecovery.boxingRequiresGPR())
+ ensureGPR();
+
+ if (cachedRecovery.boxingRequiresFPR())
+ ensureFPR();
+ }
+
+ void emitLoad(CachedRecovery&);
+
+ bool canLoad(CachedRecovery&);
+
+ void ensureLoad(CachedRecovery& cachedRecovery)
+ {
+ if (canLoad(cachedRecovery))
+ return;
+
+ ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR());
+
+ if (cachedRecovery.loadsIntoFPR()) {
+ if (cachedRecovery.loadsIntoGPR())
+ ensureRegister();
+ else
+ ensureFPR();
+ } else
+ ensureGPR();
+ }
+
+ bool canLoadAndBox(CachedRecovery& cachedRecovery)
+ {
+ // We don't have interfering loads & boxes
+ ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR());
+ ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR());
+
+ return canLoad(cachedRecovery) && canBox(cachedRecovery);
+ }
+
+ DataFormat emitStore(CachedRecovery&, MacroAssembler::Address);
+
+ void emitDisplace(CachedRecovery&);
+
+ void emitDeltaCheck();
+
+ Bag<CachedRecovery> m_cachedRecoveries;
+
+ void updateRecovery(CachedRecovery& cachedRecovery, ValueRecovery recovery)
+ {
+ clearCachedRecovery(cachedRecovery.recovery());
+ cachedRecovery.setRecovery(recovery);
+ setCachedRecovery(recovery, &cachedRecovery);
+ }
+
+ CachedRecovery* getCachedRecovery(ValueRecovery);
+
+ CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*);
+
+ void clearCachedRecovery(ValueRecovery recovery)
+ {
+ if (!recovery.isConstant())
+ setCachedRecovery(recovery, nullptr);
+ }
+
+ CachedRecovery* addCachedRecovery(ValueRecovery recovery)
+ {
+ if (recovery.isConstant())
+ return m_cachedRecoveries.add(recovery);
+ CachedRecovery* cachedRecovery = getCachedRecovery(recovery);
+ if (!cachedRecovery)
+ return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery));
+ return cachedRecovery;
+ }
+
+ // This is the current recoveries present in the old frame's
+ // slots. A null CachedRecovery means we can trash the current
+ // value as we don't care about it.
+ Vector<CachedRecovery*> m_oldFrame;
+
+ int numLocals() const
+ {
+ return m_oldFrame.size() - CallerFrameAndPC::sizeInRegisters;
+ }
+
+ CachedRecovery* getOld(VirtualRegister reg) const
+ {
+ return m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1];
+ }
+
+ void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery)
+ {
+ m_oldFrame[CallerFrameAndPC::sizeInRegisters - reg.offset() - 1] = cachedRecovery;
+ }
+
+ VirtualRegister firstOld() const
+ {
+ return VirtualRegister { static_cast<int>(-numLocals()) };
+ }
+
+ VirtualRegister lastOld() const
+ {
+ return VirtualRegister { CallerFrameAndPC::sizeInRegisters - 1 };
+ }
+
+ bool isValidOld(VirtualRegister reg) const
+ {
+ return reg >= firstOld() && reg <= lastOld();
+ }
+
+ bool m_didExtendFrame { false };
+
+ void extendFrameIfNeeded();
+
+ // This stores, for each slot in the new frame, information about
+ // the recovery for the value that should eventually go into that
+ // slot.
+ //
+ // Once the slot has been written, the corresponding entry in
+ // m_newFrame will be empty.
+ Vector<CachedRecovery*> m_newFrame;
+
+ size_t argCount() const
+ {
+ return m_newFrame.size() - CallFrame::headerSizeInRegisters;
+ }
+
+ CachedRecovery* getNew(VirtualRegister newRegister) const
+ {
+ return m_newFrame[newRegister.offset()];
+ }
+
+ void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery)
+ {
+ m_newFrame[newRegister.offset()] = cachedRecovery;
+ }
+
+ void addNew(VirtualRegister newRegister, ValueRecovery recovery)
+ {
+ CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+ cachedRecovery->addTarget(newRegister);
+ setNew(newRegister, cachedRecovery);
+ }
+
+ VirtualRegister firstNew() const
+ {
+ return VirtualRegister { 0 };
+ }
+
+ VirtualRegister lastNew() const
+ {
+ return VirtualRegister { static_cast<int>(m_newFrame.size()) - 1 };
+ }
+
+ bool isValidNew(VirtualRegister reg) const
+ {
+ return reg >= firstNew() && reg <= lastNew();
+ }
+
+
+ int m_alignedOldFrameSize;
+ int m_alignedNewFrameSize;
+
+ // This is the distance, in slots, between the base of the new
+ // frame and the base of the old frame. It could be negative when
+ // preparing for a tail call to a function with smaller argument
+ // count.
+ //
+ // We will overwrite this appropriately for slow path calls, but
+ // we initialize it as if doing a fast path for the spills we
+ // could do while undecided (typically while calling acquireGPR()
+ // for a polymorphic call).
+ int m_frameDelta;
+
+ VirtualRegister newAsOld(VirtualRegister reg) const
+ {
+ return reg - m_frameDelta;
+ }
+
+ // This stores the set of locked registers, i.e. registers for
+ // which we have an implicit requirement that they are not changed.
+ //
+ // This will usually contains the link register on architectures
+ // that have one, any scratch register used by the macro assembler
+ // (e.g. r11 on X86_64), as well as any register that we use for
+ // addressing (see m_oldFrameBase and m_newFrameBase).
+ //
+ // We also use this to lock registers temporarily, for instance to
+ // ensure that we have at least 2 available registers for loading
+ // a pair on 32bits.
+ mutable RegisterSet m_lockedRegisters;
+
+ // This stores the current recoveries present in registers. A null
+ // CachedRecovery means we can trash the current value as we don't
+ // care about it.
+ RegisterMap<CachedRecovery*> m_registers;
+
+#if USE(JSVALUE64)
+ mutable GPRReg m_tagTypeNumber;
+
+ bool tryAcquireTagTypeNumber();
+#endif
+
+ // This stores, for each register, information about the recovery
+ // for the value that should eventually go into that register. The
+ // only registers that have a target recovery will be callee-save
+ // registers, as well as possibly one JSValueRegs for holding the
+ // callee.
+ //
+ // Once the correct value has been put into the registers, and
+ // contrary to what we do with m_newFrame, we keep the entry in
+ // m_newRegisters to simplify spilling.
+ RegisterMap<CachedRecovery*> m_newRegisters;
+
+ template<typename CheckFunctor>
+ Reg getFreeRegister(const CheckFunctor& check) const
+ {
+ Reg nonTemp { };
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (m_lockedRegisters.get(reg))
+ continue;
+
+ if (!check(reg))
+ continue;
+
+ if (!m_registers[reg]) {
+ if (!m_newRegisters[reg])
+ return reg;
+ if (!nonTemp)
+ nonTemp = reg;
+ }
+ }
+
+#if USE(JSVALUE64)
+ if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) {
+ ASSERT(m_lockedRegisters.get(m_tagTypeNumber));
+ m_lockedRegisters.clear(m_tagTypeNumber);
+ nonTemp = Reg { m_tagTypeNumber };
+ m_tagTypeNumber = InvalidGPRReg;
+ }
+#endif
+ return nonTemp;
+ }
+
+ GPRReg getFreeTempGPR() const
+ {
+ Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) };
+ if (!freeTempGPR)
+ return InvalidGPRReg;
+ return freeTempGPR.gpr();
+ }
+
+ GPRReg getFreeGPR() const
+ {
+ Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) };
+ if (!freeGPR)
+ return InvalidGPRReg;
+ return freeGPR.gpr();
+ }
+
+ FPRReg getFreeFPR() const
+ {
+ Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) };
+ if (!freeFPR)
+ return InvalidFPRReg;
+ return freeFPR.fpr();
+ }
+
+ bool hasFreeRegister() const
+ {
+ return static_cast<bool>(getFreeRegister([] (Reg) { return true; }));
+ }
+
+ // This frees up a register satisfying the check functor (this
+ // functor could theoretically have any kind of logic, but it must
+ // ensure that it will only return true for registers - spill
+ // assumes and asserts that it is passed a cachedRecovery stored in a
+ // register).
+ template<typename CheckFunctor>
+ void ensureRegister(const CheckFunctor& check)
+ {
+ // If we can spill a callee-save, that's best, because it will
+ // free up a register that would otherwise been taken for the
+ // longest amount of time.
+ //
+ // We could try to bias towards those that are not in their
+ // target registers yet, but the gain is probably super
+ // small. Unless you have a huge number of argument (at least
+ // around twice the number of available registers on your
+ // architecture), no spilling is going to take place anyways.
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (m_lockedRegisters.get(reg))
+ continue;
+
+ CachedRecovery* cachedRecovery { m_newRegisters[reg] };
+ if (!cachedRecovery)
+ continue;
+
+ if (check(*cachedRecovery)) {
+ if (verbose)
+ dataLog(" ", cachedRecovery->recovery(), " looks like a good spill candidate\n");
+ spill(*cachedRecovery);
+ return;
+ }
+ }
+
+ // We use the cachedRecovery associated with the first new slot we
+ // can, because that is the one for which a write will be
+ // possible the latest, i.e. that is the one that we would
+ // have had to retain in registers for the longest.
+ for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
+ CachedRecovery* cachedRecovery { getNew(reg) };
+ if (!cachedRecovery)
+ continue;
+
+ if (check(*cachedRecovery)) {
+ spill(*cachedRecovery);
+ return;
+ }
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
+ void ensureRegister()
+ {
+ if (hasFreeRegister())
+ return;
+
+ if (verbose)
+ dataLog(" Finding a register to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInGPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
+ if (cachedRecovery.recovery().isInFPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
+#if USE(JSVALUE32_64)
+ if (cachedRecovery.recovery().technique() == InPair) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+ && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
+ }
+#endif
+ return false;
+ });
+ }
+
+ void ensureTempGPR()
+ {
+ if (getFreeTempGPR() != InvalidGPRReg)
+ return;
+
+ if (verbose)
+ dataLog(" Finding a temp GPR to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInGPR()) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().gpr())
+ && !m_newRegisters[cachedRecovery.recovery().gpr()];
+ }
+#if USE(JSVALUE32_64)
+ if (cachedRecovery.recovery().technique() == InPair) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+ && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR())
+ && !m_newRegisters[cachedRecovery.recovery().tagGPR()]
+ && !m_newRegisters[cachedRecovery.recovery().payloadGPR()];
+ }
+#endif
+ return false;
+ });
+ }
+
+ void ensureGPR()
+ {
+ if (getFreeGPR() != InvalidGPRReg)
+ return;
+
+ if (verbose)
+ dataLog(" Finding a GPR to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInGPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
+#if USE(JSVALUE32_64)
+ if (cachedRecovery.recovery().technique() == InPair) {
+ return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
+ && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
+ }
+#endif
+ return false;
+ });
+ }
+
+ void ensureFPR()
+ {
+ if (getFreeFPR() != InvalidFPRReg)
+ return;
+
+ if (verbose)
+ dataLog(" Finding an FPR to spill\n");
+ ensureRegister(
+ [this] (const CachedRecovery& cachedRecovery) {
+ if (cachedRecovery.recovery().isInFPR())
+ return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
+ return false;
+ });
+ }
+
+ CachedRecovery* getNew(JSValueRegs jsValueRegs) const
+ {
+#if USE(JSVALUE64)
+ return m_newRegisters[jsValueRegs.gpr()];
+#else
+ ASSERT(
+ jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg
+ || m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]);
+ if (jsValueRegs.payloadGPR() == InvalidGPRReg)
+ return m_newRegisters[jsValueRegs.tagGPR()];
+ return m_newRegisters[jsValueRegs.payloadGPR()];
+#endif
+ }
+
+ void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery)
+ {
+ ASSERT(jsValueRegs && !getNew(jsValueRegs));
+ CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+#if USE(JSVALUE64)
+ if (cachedRecovery->wantedJSValueRegs())
+ m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr;
+ m_newRegisters[jsValueRegs.gpr()] = cachedRecovery;
+#else
+ if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) {
+ if (oldRegs.payloadGPR())
+ m_newRegisters[oldRegs.payloadGPR()] = nullptr;
+ if (oldRegs.tagGPR())
+ m_newRegisters[oldRegs.tagGPR()] = nullptr;
+ }
+ if (jsValueRegs.payloadGPR() != InvalidGPRReg)
+ m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery;
+ if (jsValueRegs.tagGPR() != InvalidGPRReg)
+ m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery;
+#endif
+ ASSERT(!cachedRecovery->wantedJSValueRegs());
+ cachedRecovery->setWantedJSValueRegs(jsValueRegs);
+ }
+
+ void addNew(FPRReg fpr, ValueRecovery recovery)
+ {
+ ASSERT(fpr != InvalidFPRReg && !m_newRegisters[fpr]);
+ CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
+ m_newRegisters[fpr] = cachedRecovery;
+ ASSERT(cachedRecovery->wantedFPR() == InvalidFPRReg);
+ cachedRecovery->setWantedFPR(fpr);
+ }
+
+ // m_oldFrameBase is the register relative to which we access
+ // slots in the old call frame, with an additional offset of
+ // m_oldFrameOffset.
+ //
+ // - For an actual tail call, m_oldFrameBase is the stack
+ // pointer, and m_oldFrameOffset is the number of locals of the
+ // tail caller's frame. We use such stack pointer-based
+ // addressing because it allows us to load the tail caller's
+ // caller's frame pointer in the frame pointer register
+ // immediately instead of awkwardly keeping it around on the
+ // stack.
+ //
+ // - For a slow path call, m_oldFrameBase is just the frame
+ // pointer, and m_oldFrameOffset is 0.
+ GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister };
+ int m_oldFrameOffset { 0 };
+
+ MacroAssembler::Address addressForOld(VirtualRegister reg) const
+ {
+ return MacroAssembler::Address(m_oldFrameBase,
+ (m_oldFrameOffset + reg.offset()) * sizeof(Register));
+ }
+
+ // m_newFrameBase is the register relative to which we access
+ // slots in the new call frame, and we always make it point to
+ // wherever the stack pointer will be right before making the
+ // actual call/jump. The actual base of the new frame is at offset
+ // m_newFrameOffset relative to m_newFrameBase.
+ //
+ // - For an actual tail call, m_newFrameBase is computed
+ // dynamically, and m_newFrameOffset varies between 0 and -2
+ // depending on the architecture's calling convention (see
+ // prepareForTailCall).
+ //
+ // - For a slow path call, m_newFrameBase is the actual stack
+ // pointer, and m_newFrameOffset is - CallerFrameAndPCSize,
+ // following the convention for a regular call.
+ GPRReg m_newFrameBase { InvalidGPRReg };
+ int m_newFrameOffset { 0};
+
+ bool isUndecided() const
+ {
+ return m_newFrameBase == InvalidGPRReg;
+ }
+
+ bool isSlowPath() const
+ {
+ return m_newFrameBase == MacroAssembler::stackPointerRegister;
+ }
+
+ MacroAssembler::Address addressForNew(VirtualRegister reg) const
+ {
+ return MacroAssembler::Address(m_newFrameBase,
+ (m_newFrameOffset + reg.offset()) * sizeof(Register));
+ }
+
+ // We use a concept of "danger zone". The danger zone consists of
+ // all the writes in the new frame that could overlap with reads
+ // in the old frame.
+ //
+ // Because we could have a higher actual number of arguments than
+ // parameters, when preparing a tail call, we need to assume that
+ // writing to a slot on the new frame could overlap not only with
+ // the corresponding slot in the old frame, but also with any slot
+ // above it. Thus, the danger zone consists of all writes between
+ // the first write and what I call the "danger frontier": the
+ // highest slot in the old frame we still care about. Thus, the
+ // danger zone contains all the slots between the first slot of
+ // the new frame and the danger frontier. Because the danger
+ // frontier is related to the new frame, it is stored as a virtual
+ // register *in the new frame*.
+ VirtualRegister m_dangerFrontier;
+
+ VirtualRegister dangerFrontier() const
+ {
+ ASSERT(!isUndecided());
+
+ return m_dangerFrontier;
+ }
+
+ bool isDangerNew(VirtualRegister reg) const
+ {
+ ASSERT(!isUndecided() && isValidNew(reg));
+ return reg <= dangerFrontier();
+ }
+
+ void updateDangerFrontier()
+ {
+ ASSERT(!isUndecided());
+
+ m_dangerFrontier = firstNew() - 1;
+ for (VirtualRegister reg = lastNew(); reg >= firstNew(); reg -= 1) {
+ if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg)))
+ continue;
+
+ m_dangerFrontier = reg;
+ if (verbose)
+ dataLog(" Danger frontier now at NEW ", m_dangerFrontier, "\n");
+ break;
+ }
+ if (verbose)
+ dataLog(" All clear! Danger zone is empty.\n");
+ }
+
+ // A safe write is a write that never writes into the danger zone.
+ bool hasOnlySafeWrites(CachedRecovery& cachedRecovery) const
+ {
+ for (VirtualRegister target : cachedRecovery.targets()) {
+ if (isDangerNew(target))
+ return false;
+ }
+ return true;
+ }
+
+ // You must ensure that there is no dangerous writes before
+ // calling this function.
+ bool tryWrites(CachedRecovery&);
+
+ // This function tries to ensure that there is no longer any
+ // possible safe write, i.e. all remaining writes are either to
+ // the danger zone or callee save restorations.
+ //
+ // It returns false if it was unable to perform some safe writes
+ // due to high register pressure.
+ bool performSafeWrites();
+
+ unsigned m_numPassedArgs { UINT_MAX };
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp
new file mode 100644
index 000000000..5dfe96e81
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffler.h"
+
+#if ENABLE(JIT) && USE(JSVALUE32_64)
+
+#include "CCallHelpers.h"
+#include "DataFormat.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+DataFormat CallFrameShuffler::emitStore(CachedRecovery& location, MacroAssembler::Address address)
+{
+ ASSERT(!location.recovery().isInJSStack());
+
+ switch (location.recovery().technique()) {
+ case UnboxedInt32InGPR:
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
+ address.withOffset(TagOffset));
+ m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatInt32;
+ case UnboxedCellInGPR:
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag),
+ address.withOffset(TagOffset));
+ m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatCell;
+ case Constant:
+ m_jit.storeTrustedValue(location.recovery().constant(), address);
+ return DataFormatJS;
+ case InPair:
+ m_jit.storeValue(location.recovery().jsValueRegs(), address);
+ return DataFormatJS;
+ case UnboxedBooleanInGPR:
+ m_jit.store32(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
+ address.withOffset(TagOffset));
+ m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatBoolean;
+ case InFPR:
+ case UnboxedDoubleInFPR:
+ m_jit.storeDouble(location.recovery().fpr(), address);
+ return DataFormatJS;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+void CallFrameShuffler::emitBox(CachedRecovery& location)
+{
+ // Nothing to do, we're good! JSValues and doubles can be stored
+ // immediately, and other formats don't need any transformation -
+ // just storing a constant tag separately.
+ ASSERT_UNUSED(location, canBox(location));
+}
+
+void CallFrameShuffler::emitLoad(CachedRecovery& location)
+{
+ if (!location.recovery().isInJSStack())
+ return;
+
+ if (verbose)
+ dataLog(" * Loading ", location.recovery(), " into ");
+ VirtualRegister reg { location.recovery().virtualRegister() };
+ MacroAssembler::Address address { addressForOld(reg) };
+
+ bool tryFPR { true };
+ JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
+ if (wantedJSValueRegs) {
+ if (wantedJSValueRegs.payloadGPR() != InvalidGPRReg
+ && !m_registers[wantedJSValueRegs.payloadGPR()]
+ && !m_lockedRegisters.get(wantedJSValueRegs.payloadGPR()))
+ tryFPR = false;
+ if (wantedJSValueRegs.tagGPR() != InvalidGPRReg
+ && !m_registers[wantedJSValueRegs.tagGPR()]
+ && !m_lockedRegisters.get(wantedJSValueRegs.tagGPR()))
+ tryFPR = false;
+ }
+
+ if (tryFPR && location.loadsIntoFPR()) {
+ FPRReg resultFPR = location.wantedFPR();
+ if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR))
+ resultFPR = getFreeFPR();
+ if (resultFPR != InvalidFPRReg) {
+ m_jit.loadDouble(address, resultFPR);
+ DataFormat dataFormat = DataFormatJS;
+ if (location.recovery().dataFormat() == DataFormatDouble)
+ dataFormat = DataFormatDouble;
+ updateRecovery(location,
+ ValueRecovery::inFPR(resultFPR, dataFormat));
+ if (verbose)
+ dataLog(location.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+ return;
+ }
+ }
+
+ if (location.loadsIntoGPR()) {
+ GPRReg resultGPR { wantedJSValueRegs.payloadGPR() };
+ if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
+ resultGPR = getFreeGPR();
+ ASSERT(resultGPR != InvalidGPRReg);
+ m_jit.loadPtr(address.withOffset(PayloadOffset), resultGPR);
+ updateRecovery(location,
+ ValueRecovery::inGPR(resultGPR, location.recovery().dataFormat()));
+ if (verbose)
+ dataLog(location.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+ return;
+ }
+
+ ASSERT(location.recovery().technique() == DisplacedInJSStack);
+ GPRReg payloadGPR { wantedJSValueRegs.payloadGPR() };
+ GPRReg tagGPR { wantedJSValueRegs.tagGPR() };
+ if (payloadGPR == InvalidGPRReg || m_registers[payloadGPR] || m_lockedRegisters.get(payloadGPR))
+ payloadGPR = getFreeGPR();
+ m_lockedRegisters.set(payloadGPR);
+ if (tagGPR == InvalidGPRReg || m_registers[tagGPR] || m_lockedRegisters.get(tagGPR))
+ tagGPR = getFreeGPR();
+ m_lockedRegisters.clear(payloadGPR);
+ ASSERT(payloadGPR != InvalidGPRReg && tagGPR != InvalidGPRReg && tagGPR != payloadGPR);
+ m_jit.loadPtr(address.withOffset(PayloadOffset), payloadGPR);
+ m_jit.loadPtr(address.withOffset(TagOffset), tagGPR);
+ updateRecovery(location,
+ ValueRecovery::inPair(tagGPR, payloadGPR));
+ if (verbose)
+ dataLog(location.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+}
+
+bool CallFrameShuffler::canLoad(CachedRecovery& location)
+{
+ if (!location.recovery().isInJSStack())
+ return true;
+
+ if (location.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg)
+ return true;
+
+ if (location.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg)
+ return true;
+
+ if (location.recovery().technique() == DisplacedInJSStack) {
+ GPRReg payloadGPR { getFreeGPR() };
+ if (payloadGPR == InvalidGPRReg)
+ return false;
+ m_lockedRegisters.set(payloadGPR);
+ GPRReg tagGPR { getFreeGPR() };
+ m_lockedRegisters.clear(payloadGPR);
+ return tagGPR != InvalidGPRReg;
+ }
+
+ return false;
+}
+
+void CallFrameShuffler::emitDisplace(CachedRecovery& location)
+{
+ ASSERT(location.recovery().isInRegisters());
+ JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
+ ASSERT(wantedJSValueRegs); // We don't support wanted FPRs on 32bit platforms
+
+ GPRReg wantedTagGPR { wantedJSValueRegs.tagGPR() };
+ GPRReg wantedPayloadGPR { wantedJSValueRegs.payloadGPR() };
+
+ if (wantedTagGPR != InvalidGPRReg) {
+ ASSERT(!m_lockedRegisters.get(wantedTagGPR));
+ if (CachedRecovery* currentTag { m_registers[wantedTagGPR] }) {
+ if (currentTag == &location) {
+ if (verbose)
+ dataLog(" + ", wantedTagGPR, " is OK\n");
+ } else {
+ // This can never happen on 32bit platforms since we
+ // have at most one wanted JSValueRegs, for the
+ // callee, and no callee-save registers.
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+ }
+
+ if (wantedPayloadGPR != InvalidGPRReg) {
+ ASSERT(!m_lockedRegisters.get(wantedPayloadGPR));
+ if (CachedRecovery* currentPayload { m_registers[wantedPayloadGPR] }) {
+ if (currentPayload == &location) {
+ if (verbose)
+ dataLog(" + ", wantedPayloadGPR, " is OK\n");
+ } else {
+ // See above
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+ }
+
+ if (location.recovery().technique() == InPair
+ || location.recovery().isInGPR()) {
+ GPRReg payloadGPR;
+ if (location.recovery().technique() == InPair)
+ payloadGPR = location.recovery().payloadGPR();
+ else
+ payloadGPR = location.recovery().gpr();
+
+ if (wantedPayloadGPR == InvalidGPRReg)
+ wantedPayloadGPR = payloadGPR;
+
+ if (payloadGPR != wantedPayloadGPR) {
+ if (location.recovery().technique() == InPair
+ && wantedPayloadGPR == location.recovery().tagGPR()) {
+ if (verbose)
+ dataLog(" * Swapping ", payloadGPR, " and ", wantedPayloadGPR, "\n");
+ m_jit.swap(payloadGPR, wantedPayloadGPR);
+ updateRecovery(location,
+ ValueRecovery::inPair(payloadGPR, wantedPayloadGPR));
+ } else {
+ if (verbose)
+ dataLog(" * Moving ", payloadGPR, " into ", wantedPayloadGPR, "\n");
+ m_jit.move(payloadGPR, wantedPayloadGPR);
+ if (location.recovery().technique() == InPair) {
+ updateRecovery(location,
+ ValueRecovery::inPair(location.recovery().tagGPR(),
+ wantedPayloadGPR));
+ } else {
+ updateRecovery(location,
+ ValueRecovery::inGPR(wantedPayloadGPR, location.recovery().dataFormat()));
+ }
+ }
+ }
+
+ if (wantedTagGPR == InvalidGPRReg)
+ wantedTagGPR = getFreeGPR();
+ switch (location.recovery().dataFormat()) {
+ case DataFormatInt32:
+ if (verbose)
+ dataLog(" * Moving int32 tag into ", wantedTagGPR, "\n");
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
+ wantedTagGPR);
+ break;
+ case DataFormatCell:
+ if (verbose)
+ dataLog(" * Moving cell tag into ", wantedTagGPR, "\n");
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag),
+ wantedTagGPR);
+ break;
+ case DataFormatBoolean:
+ if (verbose)
+ dataLog(" * Moving boolean tag into ", wantedTagGPR, "\n");
+ m_jit.move(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
+ wantedTagGPR);
+ break;
+ case DataFormatJS:
+ ASSERT(wantedTagGPR != location.recovery().payloadGPR());
+ if (wantedTagGPR != location.recovery().tagGPR()) {
+ if (verbose)
+ dataLog(" * Moving ", location.recovery().tagGPR(), " into ", wantedTagGPR, "\n");
+ m_jit.move(location.recovery().tagGPR(), wantedTagGPR);
+ }
+ break;
+
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ } else {
+ ASSERT(location.recovery().isInFPR());
+ if (wantedTagGPR == InvalidGPRReg) {
+ ASSERT(wantedPayloadGPR != InvalidGPRReg);
+ m_lockedRegisters.set(wantedPayloadGPR);
+ wantedTagGPR = getFreeGPR();
+ m_lockedRegisters.clear(wantedPayloadGPR);
+ }
+ if (wantedPayloadGPR == InvalidGPRReg) {
+ m_lockedRegisters.set(wantedTagGPR);
+ wantedPayloadGPR = getFreeGPR();
+ m_lockedRegisters.clear(wantedTagGPR);
+ }
+ m_jit.boxDouble(location.recovery().fpr(), wantedTagGPR, wantedPayloadGPR);
+ }
+ updateRecovery(location, ValueRecovery::inPair(wantedTagGPR, wantedPayloadGPR));
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT) && USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp
new file mode 100644
index 000000000..2ef6ed111
--- /dev/null
+++ b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp
@@ -0,0 +1,369 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "CallFrameShuffler.h"
+
+#if ENABLE(JIT) && USE(JSVALUE64)
+
+#include "CCallHelpers.h"
+#include "DataFormat.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+DataFormat CallFrameShuffler::emitStore(
+ CachedRecovery& cachedRecovery, MacroAssembler::Address address)
+{
+ ASSERT(!cachedRecovery.recovery().isInJSStack());
+
+ switch (cachedRecovery.recovery().technique()) {
+ case InGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatJS;
+ case UnboxedInt32InGPR:
+ m_jit.store32(cachedRecovery.recovery().gpr(), address.withOffset(PayloadOffset));
+ return DataFormatInt32;
+ case UnboxedInt52InGPR:
+ m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
+ cachedRecovery.recovery().gpr());
+ FALLTHROUGH;
+ case UnboxedStrictInt52InGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatStrictInt52;
+ case UnboxedBooleanInGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatBoolean;
+ case UnboxedCellInGPR:
+ m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
+ return DataFormatCell;
+ case UnboxedDoubleInFPR:
+ m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
+ return DataFormatDouble;
+ case InFPR:
+ m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
+ return DataFormatJS;
+ case Constant:
+ m_jit.storeTrustedValue(cachedRecovery.recovery().constant(), address);
+ return DataFormatJS;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+}
+
+void CallFrameShuffler::emitBox(CachedRecovery& cachedRecovery)
+{
+ ASSERT(canBox(cachedRecovery));
+ if (cachedRecovery.recovery().isConstant())
+ return;
+
+ if (cachedRecovery.recovery().isInGPR()) {
+ switch (cachedRecovery.recovery().dataFormat()) {
+ case DataFormatInt32:
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ m_jit.zeroExtend32ToPtr(
+ cachedRecovery.recovery().gpr(),
+ cachedRecovery.recovery().gpr());
+ m_lockedRegisters.set(cachedRecovery.recovery().gpr());
+ if (tryAcquireTagTypeNumber())
+ m_jit.or64(m_tagTypeNumber, cachedRecovery.recovery().gpr());
+ else {
+ // We have to do this the hard way
+ m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber),
+ cachedRecovery.recovery().gpr());
+ }
+ m_lockedRegisters.clear(cachedRecovery.recovery().gpr());
+ cachedRecovery.setRecovery(
+ ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ return;
+ case DataFormatInt52:
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
+ cachedRecovery.recovery().gpr());
+ cachedRecovery.setRecovery(
+ ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatStrictInt52));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ FALLTHROUGH;
+ case DataFormatStrictInt52: {
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ FPRReg resultFPR = getFreeFPR();
+ ASSERT(resultFPR != InvalidFPRReg);
+ m_jit.convertInt64ToDouble(cachedRecovery.recovery().gpr(), resultFPR);
+ updateRecovery(cachedRecovery, ValueRecovery::inFPR(resultFPR, DataFormatDouble));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ break;
+ }
+ case DataFormatBoolean:
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ m_jit.add32(MacroAssembler::TrustedImm32(ValueFalse),
+ cachedRecovery.recovery().gpr());
+ cachedRecovery.setRecovery(
+ ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ return;
+ default:
+ return;
+ }
+ }
+
+ if (cachedRecovery.recovery().isInFPR()) {
+ if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
+ if (verbose)
+ dataLog(" * Boxing ", cachedRecovery.recovery());
+ GPRReg resultGPR = cachedRecovery.wantedJSValueRegs().gpr();
+ if (resultGPR == InvalidGPRReg || m_registers[resultGPR])
+ resultGPR = getFreeGPR();
+ ASSERT(resultGPR != InvalidGPRReg);
+ m_jit.purifyNaN(cachedRecovery.recovery().fpr());
+ m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), resultGPR);
+ m_lockedRegisters.set(resultGPR);
+ if (tryAcquireTagTypeNumber())
+ m_jit.sub64(m_tagTypeNumber, resultGPR);
+ else
+ m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR);
+ m_lockedRegisters.clear(resultGPR);
+ updateRecovery(cachedRecovery, ValueRecovery::inGPR(resultGPR, DataFormatJS));
+ if (verbose)
+ dataLog(" into ", cachedRecovery.recovery(), "\n");
+ return;
+ }
+ ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void CallFrameShuffler::emitLoad(CachedRecovery& cachedRecovery)
+{
+ if (!cachedRecovery.recovery().isInJSStack())
+ return;
+
+ if (verbose)
+ dataLog(" * Loading ", cachedRecovery.recovery(), " into ");
+
+ VirtualRegister reg = cachedRecovery.recovery().virtualRegister();
+ MacroAssembler::Address address { addressForOld(reg) };
+ bool tryFPR { true };
+ GPRReg resultGPR { cachedRecovery.wantedJSValueRegs().gpr() };
+
+ // If we want a GPR and it's available, that's better than loading
+ // into an FPR.
+ if (resultGPR != InvalidGPRReg && !m_registers[resultGPR]
+ && !m_lockedRegisters.get(resultGPR) && cachedRecovery.loadsIntoGPR())
+ tryFPR = false;
+
+ // Otherwise, we prefer loading into FPRs if possible
+ if (tryFPR && cachedRecovery.loadsIntoFPR()) {
+ FPRReg resultFPR { cachedRecovery.wantedFPR() };
+ if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR))
+ resultFPR = getFreeFPR();
+ if (resultFPR != InvalidFPRReg) {
+ m_jit.loadDouble(address, resultFPR);
+ DataFormat dataFormat = DataFormatJS;
+ // We could be transforming a DataFormatCell into a
+ // DataFormatJS here - but that's OK.
+ if (cachedRecovery.recovery().dataFormat() == DataFormatDouble)
+ dataFormat = DataFormatDouble;
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inFPR(resultFPR, dataFormat));
+ if (verbose)
+ dataLog(cachedRecovery.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+ return;
+ }
+ }
+
+ ASSERT(cachedRecovery.loadsIntoGPR());
+ if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
+ resultGPR = getFreeGPR();
+ ASSERT(resultGPR != InvalidGPRReg);
+ m_jit.loadPtr(address, resultGPR);
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inGPR(resultGPR, cachedRecovery.recovery().dataFormat()));
+ if (verbose)
+ dataLog(cachedRecovery.recovery(), "\n");
+ if (reg == newAsOld(dangerFrontier()))
+ updateDangerFrontier();
+}
+
+bool CallFrameShuffler::canLoad(CachedRecovery& cachedRecovery)
+{
+ if (!cachedRecovery.recovery().isInJSStack())
+ return true;
+
+ ASSERT(cachedRecovery.loadsIntoFPR() || cachedRecovery.loadsIntoGPR());
+
+ if (cachedRecovery.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg)
+ return true;
+
+ if (cachedRecovery.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg)
+ return true;
+
+ return false;
+}
+
+void CallFrameShuffler::emitDisplace(CachedRecovery& cachedRecovery)
+{
+ Reg wantedReg;
+ if (!(wantedReg = Reg { cachedRecovery.wantedJSValueRegs().gpr() }))
+ wantedReg = Reg { cachedRecovery.wantedFPR() };
+ ASSERT(wantedReg);
+ ASSERT(!m_lockedRegisters.get(wantedReg));
+
+ if (CachedRecovery* current = m_registers[wantedReg]) {
+ if (current == &cachedRecovery) {
+ if (verbose)
+ dataLog(" + ", wantedReg, " is OK\n");
+ return;
+ }
+ // We could do a more complex thing by finding cycles
+ // etc. in that case.
+ // However, ending up in this situation will be super
+ // rare, and should actually be outright impossible for
+ // non-FTL tiers, since:
+ // (a) All doubles have been converted into JSValues with
+ // ValueRep nodes, so FPRs are initially free
+ //
+ // (b) The only recoveries with wanted registers are the
+ // callee (which always starts out in a register) and
+ // the callee-save registers
+ //
+ // (c) The callee-save registers are the first things we
+ // load (after the return PC), and they are loaded as JSValues
+ //
+ // (d) We prefer loading JSValues into FPRs if their
+ // wanted GPR is not available
+ //
+ // (e) If we end up spilling some registers with a
+ // target, we won't load them again before the very
+ // end of the algorithm
+ //
+ // Combined, this means that we will never load a recovery
+ // with a wanted GPR into any GPR other than its wanted
+ // GPR. The callee could however have been initially in
+ // one of the callee-save registers - but since the wanted
+ // GPR for the callee is always regT0, it will be the
+ // first one to be displaced, and we won't see it when
+ // handling any of the callee-save registers.
+ //
+ // Thus, the only way we could ever reach this path is in
+ // the FTL, when there is so much pressure that we
+ // absolutely need to load the callee-save registers into
+ // different GPRs initially but not enough pressure to
+ // then have to spill all of them. And even in that case,
+ // depending on the order in which B3 saves the
+ // callee-saves, we will probably still be safe. Anyway,
+ // the couple extra move instructions compared to an
+ // efficient cycle-based algorithm are not going to hurt
+ // us.
+ if (wantedReg.isFPR()) {
+ FPRReg tempFPR = getFreeFPR();
+ if (verbose)
+ dataLog(" * Moving ", wantedReg, " into ", tempFPR, "\n");
+ m_jit.moveDouble(wantedReg.fpr(), tempFPR);
+ updateRecovery(*current,
+ ValueRecovery::inFPR(tempFPR, current->recovery().dataFormat()));
+ } else {
+ GPRReg tempGPR = getFreeGPR();
+ if (verbose)
+ dataLog(" * Moving ", wantedReg.gpr(), " into ", tempGPR, "\n");
+ m_jit.move(wantedReg.gpr(), tempGPR);
+ updateRecovery(*current,
+ ValueRecovery::inGPR(tempGPR, current->recovery().dataFormat()));
+ }
+ }
+ ASSERT(!m_registers[wantedReg]);
+
+ if (cachedRecovery.recovery().isConstant()) {
+ // We only care about callee saves for wanted FPRs, and those are never constants
+ ASSERT(wantedReg.isGPR());
+ if (verbose)
+ dataLog(" * Loading ", cachedRecovery.recovery().constant(), " into ", wantedReg, "\n");
+ m_jit.moveTrustedValue(cachedRecovery.recovery().constant(), JSValueRegs { wantedReg.gpr() });
+ updateRecovery(
+ cachedRecovery,
+ ValueRecovery::inRegister(wantedReg, DataFormatJS));
+ } else if (cachedRecovery.recovery().isInGPR()) {
+ if (verbose)
+ dataLog(" * Moving ", cachedRecovery.recovery(), " into ", wantedReg, "\n");
+ if (wantedReg.isGPR())
+ m_jit.move(cachedRecovery.recovery().gpr(), wantedReg.gpr());
+ else
+ m_jit.move64ToDouble(cachedRecovery.recovery().gpr(), wantedReg.fpr());
+ RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inRegister(wantedReg, DataFormatJS));
+ } else {
+ ASSERT(cachedRecovery.recovery().isInFPR());
+ if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
+ // We only care about callee saves for wanted FPRs, and those are always DataFormatJS
+ ASSERT(wantedReg.isGPR());
+ // This will automatically pick the wanted GPR
+ emitBox(cachedRecovery);
+ } else {
+ if (verbose)
+ dataLog(" * Moving ", cachedRecovery.recovery().fpr(), " into ", wantedReg, "\n");
+ if (wantedReg.isGPR())
+ m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), wantedReg.gpr());
+ else
+ m_jit.moveDouble(cachedRecovery.recovery().fpr(), wantedReg.fpr());
+ RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
+ updateRecovery(cachedRecovery,
+ ValueRecovery::inRegister(wantedReg, DataFormatJS));
+ }
+ }
+
+ ASSERT(m_registers[wantedReg] == &cachedRecovery);
+}
+
+bool CallFrameShuffler::tryAcquireTagTypeNumber()
+{
+ if (m_tagTypeNumber != InvalidGPRReg)
+ return true;
+
+ m_tagTypeNumber = getFreeGPR();
+
+ if (m_tagTypeNumber == InvalidGPRReg)
+ return false;
+
+ m_lockedRegisters.set(m_tagTypeNumber);
+ m_jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), m_tagTypeNumber);
+ return true;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT) && USE(JSVALUE64)
diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
index b09f2f6cd..01f8faf24 100644
--- a/Source/JavaScriptCore/jit/CompactJITCodeMap.h
+++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -26,14 +26,11 @@
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef CompactJITCodeMap_h
-#define CompactJITCodeMap_h
+#pragma once
#include <wtf/Assertions.h>
#include <wtf/FastMalloc.h>
#include <wtf/FastMalloc.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/PassOwnPtr.h>
#include <wtf/Vector.h>
namespace JSC {
@@ -47,7 +44,7 @@ namespace JSC {
// CompactJITCodeMap::Encoder encoder(map);
// encoder.append(a, b);
// encoder.append(c, d); // preconditions: c >= a, d >= b
-// OwnPtr<CompactJITCodeMap> map = encoder.finish();
+// auto map = encoder.finish();
//
// At some later time:
//
@@ -80,6 +77,16 @@ struct BytecodeAndMachineOffset {
class CompactJITCodeMap {
WTF_MAKE_FAST_ALLOCATED;
public:
+ CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries)
+ : m_buffer(buffer)
+#if !ASSERT_DISABLED
+ , m_size(size)
+#endif
+ , m_numberOfEntries(numberOfEntries)
+ {
+ UNUSED_PARAM(size);
+ }
+
~CompactJITCodeMap()
{
if (m_buffer)
@@ -94,16 +101,6 @@ public:
void decode(Vector<BytecodeAndMachineOffset>& result) const;
private:
- CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries)
- : m_buffer(buffer)
-#if !ASSERT_DISABLED
- , m_size(size)
-#endif
- , m_numberOfEntries(numberOfEntries)
- {
- UNUSED_PARAM(size);
- }
-
uint8_t at(unsigned index) const
{
ASSERT(index < m_size);
@@ -138,8 +135,8 @@ public:
void ensureCapacityFor(unsigned numberOfEntriesToAdd);
void append(unsigned bytecodeIndex, unsigned machineCodeOffset);
- PassOwnPtr<CompactJITCodeMap> finish();
-
+ std::unique_ptr<CompactJITCodeMap> finish();
+
private:
void appendByte(uint8_t value);
void encodeNumber(uint32_t value);
@@ -212,18 +209,18 @@ inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned
m_numberOfEntries++;
}
-inline PassOwnPtr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish()
+inline std::unique_ptr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish()
{
m_capacity = m_size;
m_buffer = static_cast<uint8_t*>(fastRealloc(m_buffer, m_capacity));
- OwnPtr<CompactJITCodeMap> result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries));
+ auto result = std::make_unique<CompactJITCodeMap>(m_buffer, m_size, m_numberOfEntries);
m_buffer = 0;
m_size = 0;
m_capacity = 0;
m_numberOfEntries = 0;
m_previousBytecodeIndex = 0;
m_previousMachineCodeOffset = 0;
- return result.release();
+ return result;
}
inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value)
@@ -293,5 +290,3 @@ inline void CompactJITCodeMap::Decoder::read(unsigned& bytecodeIndex, unsigned&
}
} // namespace JSC
-
-#endif // CompactJITCodeMap_h
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp
new file mode 100644
index 000000000..b4f56650b
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ExecutableAllocationFuzz.h"
+
+#include "TestRunnerUtils.h"
+#include <wtf/Atomics.h>
+#include <wtf/DataLog.h>
+
+namespace JSC {
+
+static Atomic<unsigned> s_numberOfExecutableAllocationFuzzChecks;
+unsigned numberOfExecutableAllocationFuzzChecks()
+{
+ return s_numberOfExecutableAllocationFuzzChecks.load();
+}
+
+ExecutableAllocationFuzzResult doExecutableAllocationFuzzing()
+{
+ ASSERT(Options::useExecutableAllocationFuzz());
+
+ unsigned oldValue;
+ unsigned newValue;
+ do {
+ oldValue = s_numberOfExecutableAllocationFuzzChecks.load();
+ newValue = oldValue + 1;
+ } while (!s_numberOfExecutableAllocationFuzzChecks.compareExchangeWeak(oldValue, newValue));
+
+ if (newValue == Options::fireExecutableAllocationFuzzAt()) {
+ if (Options::verboseExecutableAllocationFuzz()) {
+ dataLog("Will pretend to fail executable allocation.\n");
+ WTFReportBacktrace();
+ }
+ return PretendToFailExecutableAllocation;
+ }
+
+ if (Options::fireExecutableAllocationFuzzAtOrAfter()
+ && newValue >= Options::fireExecutableAllocationFuzzAtOrAfter()) {
+ if (Options::verboseExecutableAllocationFuzz()) {
+ dataLog("Will pretend to fail executable allocation.\n");
+ WTFReportBacktrace();
+ }
+ return PretendToFailExecutableAllocation;
+ }
+
+ return AllowNormalExecutableAllocation;
+}
+
+} // namespace JSC
+
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h
new file mode 100644
index 000000000..176e1727a
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "Options.h"
+
+namespace JSC {
+
+enum ExecutableAllocationFuzzResult {
+ AllowNormalExecutableAllocation,
+ PretendToFailExecutableAllocation
+};
+
+ExecutableAllocationFuzzResult doExecutableAllocationFuzzing();
+
+inline ExecutableAllocationFuzzResult doExecutableAllocationFuzzingIfEnabled()
+{
+ if (LIKELY(!Options::useExecutableAllocationFuzz()))
+ return AllowNormalExecutableAllocation;
+
+ return doExecutableAllocationFuzzing();
+}
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index 5ac6cc412..44f8fbae4 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008-2009, 2015, 2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,245 +20,400 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include "config.h"
-
#include "ExecutableAllocator.h"
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+#if ENABLE(ASSEMBLER)
+
#include "CodeProfiling.h"
-#include <wtf/HashSet.h>
+#include "ExecutableAllocationFuzz.h"
+#include "JSCInlines.h"
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#include <wtf/PassOwnPtr.h>
+
+#if OS(DARWIN)
+#include <sys/mman.h>
+#endif
+
+#include "LinkBuffer.h"
+#include "MacroAssembler.h"
+
+#if PLATFORM(MAC) || (PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000)
+#define HAVE_REMAP_JIT 1
+#endif
+
+#if HAVE(REMAP_JIT)
+#if CPU(ARM64) && PLATFORM(IOS) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 100000
+#define USE_EXECUTE_ONLY_JIT_WRITE_FUNCTION 1
#endif
-#include <wtf/ThreadingPrimitives.h>
-#include <wtf/VMTags.h>
#endif
-// Uncomment to create an artificial executable memory usage limit. This limit
-// is imperfect and is primarily useful for testing the VM's ability to handle
-// out-of-executable-memory situations.
-// #define EXECUTABLE_MEMORY_LIMIT 1000000
+#if OS(DARWIN)
+#include <mach/mach.h>
+extern "C" {
+ /* Routine mach_vm_remap */
+#ifdef mig_external
+ mig_external
+#else
+ extern
+#endif /* mig_external */
+ kern_return_t mach_vm_remap
+ (
+ vm_map_t target_task,
+ mach_vm_address_t *target_address,
+ mach_vm_size_t size,
+ mach_vm_offset_t mask,
+ int flags,
+ vm_map_t src_task,
+ mach_vm_address_t src_address,
+ boolean_t copy,
+ vm_prot_t *cur_protection,
+ vm_prot_t *max_protection,
+ vm_inherit_t inheritance
+ );
+}
-#if ENABLE(ASSEMBLER)
+#endif
using namespace WTF;
namespace JSC {
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool;
+JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool;
+
+JS_EXPORTDATA JITWriteFunction jitWriteFunction;
+
+#if !USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION) && HAVE(REMAP_JIT)
+static uintptr_t startOfFixedWritableMemoryPool;
+#endif
-class DemandExecutableAllocator : public MetaAllocator {
+class FixedVMPoolExecutableAllocator : public MetaAllocator {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- DemandExecutableAllocator()
- : MetaAllocator(jitAllocationGranule)
- {
- MutexLocker lock(allocatorsMutex());
- allocators().add(this);
- // Don't preallocate any memory here.
- }
-
- virtual ~DemandExecutableAllocator()
+ FixedVMPoolExecutableAllocator()
+ : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
{
- {
- MutexLocker lock(allocatorsMutex());
- allocators().remove(this);
+ size_t reservationSize;
+ if (Options::jitMemoryReservationSize())
+ reservationSize = Options::jitMemoryReservationSize();
+ else
+ reservationSize = fixedExecutableMemoryPoolSize;
+ reservationSize = roundUpToMultipleOf(pageSize(), reservationSize);
+ m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ if (m_reservation) {
+ ASSERT(m_reservation.size() == reservationSize);
+ void* reservationBase = m_reservation.base();
+
+ if (Options::useSeparatedWXHeap()) {
+ // First page of our JIT allocation is reserved.
+ ASSERT(reservationSize >= pageSize() * 2);
+ reservationBase = (void*)((uintptr_t)reservationBase + pageSize());
+ reservationSize -= pageSize();
+ initializeSeparatedWXHeaps(m_reservation.base(), pageSize(), reservationBase, reservationSize);
+ }
+
+ addFreshFreeSpace(reservationBase, reservationSize);
+
+ startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(reservationBase);
+ endOfFixedExecutableMemoryPool = startOfFixedExecutableMemoryPool + reservationSize;
}
- for (unsigned i = 0; i < reservations.size(); ++i)
- reservations.at(i).deallocate();
}
- static size_t bytesAllocatedByAllAllocators()
+ virtual ~FixedVMPoolExecutableAllocator();
+
+protected:
+ void* allocateNewSpace(size_t&) override
{
- size_t total = 0;
- MutexLocker lock(allocatorsMutex());
- for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
- total += (*allocator)->bytesAllocated();
- return total;
+ // We're operating in a fixed pool, so new allocation is always prohibited.
+ return 0;
}
- static size_t bytesCommittedByAllocactors()
+ void notifyNeedPage(void* page) override
{
- size_t total = 0;
- MutexLocker lock(allocatorsMutex());
- for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
- total += (*allocator)->bytesCommitted();
- return total;
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
+ UNUSED_PARAM(page);
+#else
+ m_reservation.commit(page, pageSize());
+#endif
}
-#if ENABLE(META_ALLOCATOR_PROFILE)
- static void dumpProfileFromAllAllocators()
+ void notifyPageIsFree(void* page) override
{
- MutexLocker lock(allocatorsMutex());
- for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
- (*allocator)->dumpProfile();
- }
+#if USE(MADV_FREE_FOR_JIT_MEMORY)
+ for (;;) {
+ int result = madvise(page, pageSize(), MADV_FREE);
+ if (!result)
+ return;
+ ASSERT(result == -1);
+ if (errno != EAGAIN) {
+ RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
+ break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
+ }
+ }
+#else
+ m_reservation.decommit(page, pageSize());
#endif
+ }
-protected:
- virtual void* allocateNewSpace(size_t& numPages)
+private:
+#if OS(DARWIN) && HAVE(REMAP_JIT)
+ void initializeSeparatedWXHeaps(void* stubBase, size_t stubSize, void* jitBase, size_t jitSize)
{
- size_t newNumPages = (((numPages * pageSize() + JIT_ALLOCATOR_LARGE_ALLOC_SIZE - 1) / JIT_ALLOCATOR_LARGE_ALLOC_SIZE * JIT_ALLOCATOR_LARGE_ALLOC_SIZE) + pageSize() - 1) / pageSize();
-
- ASSERT(newNumPages >= numPages);
-
- numPages = newNumPages;
-
-#ifdef EXECUTABLE_MEMORY_LIMIT
- if (bytesAllocatedByAllAllocators() >= EXECUTABLE_MEMORY_LIMIT)
- return 0;
+ mach_vm_address_t writableAddr = 0;
+
+ // Create a second mapping of the JIT region at a random address.
+ vm_prot_t cur, max;
+ int remapFlags = VM_FLAGS_ANYWHERE;
+#if defined(VM_FLAGS_RANDOM_ADDR)
+ remapFlags |= VM_FLAGS_RANDOM_ADDR;
+#endif
+ kern_return_t ret = mach_vm_remap(mach_task_self(), &writableAddr, jitSize, 0,
+ remapFlags,
+ mach_task_self(), (mach_vm_address_t)jitBase, FALSE,
+ &cur, &max, VM_INHERIT_DEFAULT);
+
+ bool remapSucceeded = (ret == KERN_SUCCESS);
+ if (!remapSucceeded)
+ return;
+
+ // Assemble a thunk that will serve as the means for writing into the JIT region.
+ MacroAssemblerCodeRef writeThunk = jitWriteThunkGenerator(reinterpret_cast<void*>(writableAddr), stubBase, stubSize);
+
+ int result = 0;
+
+#if USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+ // Prevent reading the write thunk code.
+ result = mprotect(stubBase, stubSize, VM_PROT_EXECUTE_ONLY);
+ RELEASE_ASSERT(!result);
#endif
-
- PageReservation reservation = PageReservation::reserve(numPages * pageSize(), OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
- RELEASE_ASSERT(reservation);
-
- reservations.append(reservation);
-
- return reservation.base();
+
+ // Prevent writing into the executable JIT mapping.
+ result = mprotect(jitBase, jitSize, VM_PROT_READ | VM_PROT_EXECUTE);
+ RELEASE_ASSERT(!result);
+
+ // Prevent execution in the writable JIT mapping.
+ result = mprotect((void*)writableAddr, jitSize, VM_PROT_READ | VM_PROT_WRITE);
+ RELEASE_ASSERT(!result);
+
+ // Zero out writableAddr to avoid leaking the address of the writable mapping.
+ memset_s(&writableAddr, sizeof(writableAddr), 0, sizeof(writableAddr));
+
+ jitWriteFunction = reinterpret_cast<JITWriteFunction>(writeThunk.code().executableAddress());
}
-
- virtual void notifyNeedPage(void* page)
+
+#if CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+ MacroAssemblerCodeRef jitWriteThunkGenerator(void* writableAddr, void* stubBase, size_t stubSize)
{
- OSAllocator::commit(page, pageSize(), EXECUTABLE_POOL_WRITABLE, true);
+ using namespace ARM64Registers;
+ using TrustedImm32 = MacroAssembler::TrustedImm32;
+
+ MacroAssembler jit;
+
+ jit.move(MacroAssembler::TrustedImmPtr(writableAddr), x7);
+ jit.addPtr(x7, x0);
+
+ jit.move(x0, x3);
+ MacroAssembler::Jump smallCopy = jit.branch64(MacroAssembler::Below, x2, MacroAssembler::TrustedImm64(64));
+
+ jit.add64(TrustedImm32(32), x3);
+ jit.and64(TrustedImm32(-32), x3);
+ jit.loadPair64(x1, x12, x13);
+ jit.loadPair64(x1, TrustedImm32(16), x14, x15);
+ jit.sub64(x3, x0, x5);
+ jit.addPtr(x5, x1);
+
+ jit.loadPair64(x1, x8, x9);
+ jit.loadPair64(x1, TrustedImm32(16), x10, x11);
+ jit.add64(TrustedImm32(32), x1);
+ jit.sub64(x5, x2);
+ jit.storePair64(x12, x13, x0);
+ jit.storePair64(x14, x15, x0, TrustedImm32(16));
+ MacroAssembler::Jump cleanup = jit.branchSub64(MacroAssembler::BelowOrEqual, TrustedImm32(64), x2);
+
+ MacroAssembler::Label copyLoop = jit.label();
+ jit.storePair64WithNonTemporalAccess(x8, x9, x3);
+ jit.storePair64WithNonTemporalAccess(x10, x11, x3, TrustedImm32(16));
+ jit.add64(TrustedImm32(32), x3);
+ jit.loadPair64WithNonTemporalAccess(x1, x8, x9);
+ jit.loadPair64WithNonTemporalAccess(x1, TrustedImm32(16), x10, x11);
+ jit.add64(TrustedImm32(32), x1);
+ jit.branchSub64(MacroAssembler::Above, TrustedImm32(32), x2).linkTo(copyLoop, &jit);
+
+ cleanup.link(&jit);
+ jit.add64(x2, x1);
+ jit.loadPair64(x1, x12, x13);
+ jit.loadPair64(x1, TrustedImm32(16), x14, x15);
+ jit.storePair64(x8, x9, x3);
+ jit.storePair64(x10, x11, x3, TrustedImm32(16));
+ jit.addPtr(x2, x3);
+ jit.storePair64(x12, x13, x3, TrustedImm32(32));
+ jit.storePair64(x14, x15, x3, TrustedImm32(48));
+ jit.ret();
+
+ MacroAssembler::Label local0 = jit.label();
+ jit.load64(x1, PostIndex(8), x6);
+ jit.store64(x6, x3, PostIndex(8));
+ smallCopy.link(&jit);
+ jit.branchSub64(MacroAssembler::AboveOrEqual, TrustedImm32(8), x2).linkTo(local0, &jit);
+ MacroAssembler::Jump local2 = jit.branchAdd64(MacroAssembler::Equal, TrustedImm32(8), x2);
+ MacroAssembler::Label local1 = jit.label();
+ jit.load8(x1, PostIndex(1), x6);
+ jit.store8(x6, x3, PostIndex(1));
+ jit.branchSub64(MacroAssembler::NotEqual, TrustedImm32(1), x2).linkTo(local1, &jit);
+ local2.link(&jit);
+ jit.ret();
+
+ LinkBuffer linkBuffer(jit, stubBase, stubSize);
+ // We don't use FINALIZE_CODE() for two reasons.
+ // The first is that we don't want the writeable address, as disassembled instructions,
+ // to appear in the console or anywhere in memory, via the PrintStream buffer.
+ // The second is we can't guarantee that the code is readable when using the
+ // asyncDisassembly option as our caller will set our pages execute only.
+ return linkBuffer.finalizeCodeWithoutDisassembly();
}
-
- virtual void notifyPageIsFree(void* page)
+#else // CPU(ARM64) && USE(EXECUTE_ONLY_JIT_WRITE_FUNCTION)
+ static void genericWriteToJITRegion(off_t offset, const void* data, size_t dataSize)
{
- OSAllocator::decommit(page, pageSize());
+ memcpy((void*)(startOfFixedWritableMemoryPool + offset), data, dataSize);
}
-private:
- Vector<PageReservation, 16> reservations;
- static HashSet<DemandExecutableAllocator*>& allocators()
+ MacroAssemblerCodeRef jitWriteThunkGenerator(void* address, void*, size_t)
{
- DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ());
- return sAllocators;
+ startOfFixedWritableMemoryPool = reinterpret_cast<uintptr_t>(address);
+ uintptr_t function = (uintptr_t)((void*)&genericWriteToJITRegion);
+#if CPU(ARM_THUMB2)
+ // Handle thumb offset
+ function -= 1;
+#endif
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(MacroAssemblerCodePtr((void*)function));
}
- static Mutex& allocatorsMutex()
+#endif
+
+#else // OS(DARWIN) && HAVE(REMAP_JIT)
+ void initializeSeparatedWXHeaps(void*, size_t, void*, size_t)
{
- DEFINE_STATIC_LOCAL(Mutex, mutex, ());
- return mutex;
}
-};
+#endif
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-void ExecutableAllocator::initializeAllocator()
-{
-}
-#else
-static DemandExecutableAllocator* gAllocator;
+private:
+ PageReservation m_reservation;
+};
-namespace {
-static inline DemandExecutableAllocator* allocator()
-{
- return gAllocator;
-}
-}
+static FixedVMPoolExecutableAllocator* allocator;
void ExecutableAllocator::initializeAllocator()
{
- ASSERT(!gAllocator);
- gAllocator = new DemandExecutableAllocator();
- CodeProfiling::notifyAllocator(gAllocator);
+ ASSERT(!allocator);
+ allocator = new FixedVMPoolExecutableAllocator();
+ CodeProfiling::notifyAllocator(allocator);
}
-#endif
ExecutableAllocator::ExecutableAllocator(VM&)
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- : m_allocator(adoptPtr(new DemandExecutableAllocator()))
-#endif
{
- ASSERT(allocator());
+ ASSERT(allocator);
}
ExecutableAllocator::~ExecutableAllocator()
{
}
+FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
+{
+ m_reservation.deallocate();
+}
+
bool ExecutableAllocator::isValid() const
{
- return true;
+ return !!allocator->bytesReserved();
}
bool ExecutableAllocator::underMemoryPressure()
{
-#ifdef EXECUTABLE_MEMORY_LIMIT
- return DemandExecutableAllocator::bytesAllocatedByAllAllocators() > EXECUTABLE_MEMORY_LIMIT / 2;
-#else
- return false;
-#endif
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ return statistics.bytesAllocated > statistics.bytesReserved / 2;
}
double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
{
- double result;
-#ifdef EXECUTABLE_MEMORY_LIMIT
- size_t bytesAllocated = DemandExecutableAllocator::bytesAllocatedByAllAllocators() + addedMemoryUsage;
- if (bytesAllocated >= EXECUTABLE_MEMORY_LIMIT)
- bytesAllocated = EXECUTABLE_MEMORY_LIMIT;
- result = static_cast<double>(EXECUTABLE_MEMORY_LIMIT) /
- (EXECUTABLE_MEMORY_LIMIT - bytesAllocated);
-#else
- UNUSED_PARAM(addedMemoryUsage);
- result = 1.0;
-#endif
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
+ size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
+ size_t bytesAvailable = static_cast<size_t>(
+ statistics.bytesReserved * (1 - executablePoolReservationFraction));
+ if (bytesAllocated >= bytesAvailable)
+ bytesAllocated = bytesAvailable;
+ double result = 1.0;
+ size_t divisor = bytesAvailable - bytesAllocated;
+ if (divisor)
+ result = static_cast<double>(bytesAvailable) / divisor;
if (result < 1.0)
result = 1.0;
return result;
-
}
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
- RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
- RELEASE_ASSERT(result || effort != JITCompilationMustSucceed);
- return result.release();
+ if (Options::logExecutableAllocation()) {
+ MetaAllocator::Statistics stats = allocator->currentStatistics();
+ dataLog("Allocating ", sizeInBytes, " bytes of executable memory with ", stats.bytesAllocated, " bytes allocated, ", stats.bytesReserved, " bytes reserved, and ", stats.bytesCommitted, " committed.\n");
+ }
+
+ if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
+ dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
+ WTFReportBacktrace();
+ }
+
+ if (effort == JITCompilationCanFail
+ && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
+ return nullptr;
+
+ if (effort == JITCompilationCanFail) {
+ // Don't allow allocations if we are down to reserve.
+ MetaAllocator::Statistics statistics = allocator->currentStatistics();
+ size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes;
+ size_t bytesAvailable = static_cast<size_t>(
+ statistics.bytesReserved * (1 - executablePoolReservationFraction));
+ if (bytesAllocated > bytesAvailable)
+ return nullptr;
+ }
+
+ RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
+ if (!result) {
+ if (effort != JITCompilationCanFail) {
+ dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
+ CRASH();
+ }
+ return nullptr;
+ }
+ return result;
}
-size_t ExecutableAllocator::committedByteCount()
+bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
{
- return DemandExecutableAllocator::bytesCommittedByAllocactors();
+ return allocator->isInAllocatedMemory(locker, address);
}
-#if ENABLE(META_ALLOCATOR_PROFILE)
-void ExecutableAllocator::dumpProfile()
+Lock& ExecutableAllocator::getLock() const
{
- DemandExecutableAllocator::dumpProfileFromAllAllocators();
+ return allocator->getLock();
}
-#endif
-
-#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#if OS(WINDOWS)
-#error "ASSEMBLER_WX_EXCLUSIVE not yet suported on this platform."
-#endif
-
-void ExecutableAllocator::reprotectRegion(void* start, size_t size, ProtectionSetting setting)
+size_t ExecutableAllocator::committedByteCount()
{
- size_t pageSize = WTF::pageSize();
-
- // Calculate the start of the page containing this region,
- // and account for this extra memory within size.
- intptr_t startPtr = reinterpret_cast<intptr_t>(start);
- intptr_t pageStartPtr = startPtr & ~(pageSize - 1);
- void* pageStart = reinterpret_cast<void*>(pageStartPtr);
- size += (startPtr - pageStartPtr);
-
- // Round size up
- size += (pageSize - 1);
- size &= ~(pageSize - 1);
-
- mprotect(pageStart, size, (setting == Writable) ? PROTECTION_FLAGS_RW : PROTECTION_FLAGS_RX);
+ return allocator->bytesCommitted();
}
+#if ENABLE(META_ALLOCATOR_PROFILE)
+void ExecutableAllocator::dumpProfile()
+{
+ allocator->dumpProfile();
+}
#endif
-
+
}
-#endif // HAVE(ASSEMBLER)
+#endif // ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
index 01be7c1aa..a686e7217 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,18 +23,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ExecutableAllocator_h
-#define ExecutableAllocator_h
+#pragma once
+
#include "JITCompilationEffort.h"
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include <wtf/Assertions.h>
+#include <wtf/Lock.h>
#include <wtf/MetaAllocatorHandle.h>
#include <wtf/MetaAllocator.h>
#include <wtf/PageAllocation.h>
-#include <wtf/PassRefPtr.h>
-#include <wtf/RefCounted.h>
-#include <wtf/Vector.h>
#if OS(IOS)
#include <libkern/OSCacheControl.h>
@@ -48,71 +46,57 @@
#include <sys/cachectl.h>
#endif
-#if CPU(SH4) && OS(LINUX)
-#include <asm/cachectl.h>
-#include <asm/unistd.h>
-#include <sys/syscall.h>
-#include <unistd.h>
-#endif
-
-#if OS(WINCE)
-// From pkfuncs.h (private header file from the Platform Builder)
-#define CACHE_SYNC_ALL 0x07F
-extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
-#endif
-
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4)
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
-#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
-#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
-#define EXECUTABLE_POOL_WRITABLE false
-#else
#define EXECUTABLE_POOL_WRITABLE true
-#endif
namespace JSC {
class VM;
-void releaseExecutableMemory(VM&);
static const unsigned jitAllocationGranule = 32;
-inline size_t roundUpAllocationSize(size_t request, size_t granularity)
-{
- RELEASE_ASSERT((std::numeric_limits<size_t>::max() - granularity) > request);
-
- // Round up to next page boundary
- size_t size = request + (granularity - 1);
- size = size & ~(granularity - 1);
- ASSERT(size >= request);
- return size;
-}
-
-}
-
-namespace JSC {
-
typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
#if ENABLE(ASSEMBLER)
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
-class DemandExecutableAllocator;
-#endif
-
-#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
-#if CPU(ARM) || CPU(ARM64)
+#if defined(FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB) && FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB > 0
+static const size_t fixedExecutableMemoryPoolSize = FIXED_EXECUTABLE_MEMORY_POOL_SIZE_IN_MB * 1024 * 1024;
+#elif CPU(ARM)
static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
+#elif CPU(ARM64)
+static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
#elif CPU(X86_64)
static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
#else
static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
#endif
-
-extern uintptr_t startOfFixedExecutableMemoryPool;
+#if CPU(ARM)
+static const double executablePoolReservationFraction = 0.15;
+#else
+static const double executablePoolReservationFraction = 0.25;
#endif
+extern JS_EXPORTDATA uintptr_t startOfFixedExecutableMemoryPool;
+extern JS_EXPORTDATA uintptr_t endOfFixedExecutableMemoryPool;
+
+typedef void (*JITWriteFunction)(off_t, const void*, size_t);
+extern JS_EXPORTDATA JITWriteFunction jitWriteFunction;
+
+static inline void* performJITMemcpy(void *dst, const void *src, size_t n)
+{
+ // Use execute-only write thunk for writes inside the JIT region. This is a variant of
+ // memcpy that takes an offset into the JIT region as its destination (first) parameter.
+ if (jitWriteFunction && (uintptr_t)dst >= startOfFixedExecutableMemoryPool && (uintptr_t)dst <= endOfFixedExecutableMemoryPool) {
+ off_t offset = (off_t)((uintptr_t)dst - startOfFixedExecutableMemoryPool);
+ jitWriteFunction(offset, src, n);
+ return dst;
+ }
+
+ // Use regular memcpy for writes outside the JIT region.
+ return memcpy(dst, src, n);
+}
+
class ExecutableAllocator {
enum ProtectionSetting { Writable, Executable };
@@ -134,40 +118,15 @@ public:
static void dumpProfile() { }
#endif
- PassRefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void makeWritable(void* start, size_t size)
- {
- reprotectRegion(start, size, Writable);
- }
+ RefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
- static void makeExecutable(void* start, size_t size)
- {
- reprotectRegion(start, size, Executable);
- }
-#else
- static void makeWritable(void*, size_t) {}
- static void makeExecutable(void*, size_t) {}
-#endif
+ bool isValidExecutableMemory(const LockHolder&, void* address);
static size_t committedByteCount();
-private:
-
-#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- static void reprotectRegion(void*, size_t, ProtectionSetting);
-#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
- // We create a MetaAllocator for each JS global object.
- OwnPtr<DemandExecutableAllocator> m_allocator;
- DemandExecutableAllocator* allocator() { return m_allocator.get(); }
-#endif
-#endif
-
+ Lock& getLock() const;
};
#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
} // namespace JSC
-
-#endif // !defined(ExecutableAllocator)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
deleted file mode 100644
index 8e0b77cfc..000000000
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ /dev/null
@@ -1,194 +0,0 @@
-/*
- * Copyright (C) 2009 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#include "ExecutableAllocator.h"
-
-#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
-
-#include "CodeProfiling.h"
-#include <errno.h>
-#include <unistd.h>
-#include <wtf/MetaAllocator.h>
-#include <wtf/PageReservation.h>
-#include <wtf/VMTags.h>
-
-#if OS(DARWIN)
-#include <sys/mman.h>
-#endif
-
-#if OS(LINUX)
-#include <stdio.h>
-#endif
-
-#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090
-// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case.
-#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1
-#endif
-
-using namespace WTF;
-
-namespace JSC {
-
-uintptr_t startOfFixedExecutableMemoryPool;
-
-class FixedVMPoolExecutableAllocator : public MetaAllocator {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- FixedVMPoolExecutableAllocator()
- : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
- {
- m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
-#if !ENABLE(LLINT)
- RELEASE_ASSERT(m_reservation);
-#endif
- if (m_reservation) {
- ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize);
- addFreshFreeSpace(m_reservation.base(), m_reservation.size());
-
- startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
- }
- }
-
- virtual ~FixedVMPoolExecutableAllocator();
-
-protected:
- virtual void* allocateNewSpace(size_t&) override
- {
- // We're operating in a fixed pool, so new allocation is always prohibited.
- return 0;
- }
-
- virtual void notifyNeedPage(void* page) override
- {
-#if USE(MADV_FREE_FOR_JIT_MEMORY)
- UNUSED_PARAM(page);
-#else
- m_reservation.commit(page, pageSize());
-#endif
- }
-
- virtual void notifyPageIsFree(void* page) override
- {
-#if USE(MADV_FREE_FOR_JIT_MEMORY)
- for (;;) {
- int result = madvise(page, pageSize(), MADV_FREE);
- if (!result)
- return;
- ASSERT(result == -1);
- if (errno != EAGAIN) {
- RELEASE_ASSERT_NOT_REACHED(); // In debug mode, this should be a hard failure.
- break; // In release mode, we should just ignore the error - not returning memory to the OS is better than crashing, especially since we _will_ be able to reuse the memory internally anyway.
- }
- }
-#else
- m_reservation.decommit(page, pageSize());
-#endif
- }
-
-private:
- PageReservation m_reservation;
-};
-
-static FixedVMPoolExecutableAllocator* allocator;
-
-void ExecutableAllocator::initializeAllocator()
-{
- ASSERT(!allocator);
- allocator = new FixedVMPoolExecutableAllocator();
- CodeProfiling::notifyAllocator(allocator);
-}
-
-ExecutableAllocator::ExecutableAllocator(VM&)
-{
- ASSERT(allocator);
-}
-
-ExecutableAllocator::~ExecutableAllocator()
-{
-}
-
-FixedVMPoolExecutableAllocator::~FixedVMPoolExecutableAllocator()
-{
- m_reservation.deallocate();
-}
-
-bool ExecutableAllocator::isValid() const
-{
- return !!allocator->bytesReserved();
-}
-
-bool ExecutableAllocator::underMemoryPressure()
-{
- MetaAllocator::Statistics statistics = allocator->currentStatistics();
- return statistics.bytesAllocated > statistics.bytesReserved / 2;
-}
-
-double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
-{
- MetaAllocator::Statistics statistics = allocator->currentStatistics();
- ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
- size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
- if (bytesAllocated >= statistics.bytesReserved)
- bytesAllocated = statistics.bytesReserved;
- double result = 1.0;
- size_t divisor = statistics.bytesReserved - bytesAllocated;
- if (divisor)
- result = static_cast<double>(statistics.bytesReserved) / divisor;
- if (result < 1.0)
- result = 1.0;
- return result;
-}
-
-PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
-{
- RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
- if (!result) {
- if (effort == JITCompilationCanFail)
- return result;
- releaseExecutableMemory(vm);
- result = allocator->allocate(sizeInBytes, ownerUID);
- RELEASE_ASSERT(result);
- }
- return result.release();
-}
-
-size_t ExecutableAllocator::committedByteCount()
-{
- return allocator->bytesCommitted();
-}
-
-#if ENABLE(META_ALLOCATOR_PROFILE)
-void ExecutableAllocator::dumpProfile()
-{
- allocator->dumpProfile();
-}
-#endif
-
-}
-
-
-#endif // ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
diff --git a/Source/JavaScriptCore/jit/FPRInfo.h b/Source/JavaScriptCore/jit/FPRInfo.h
index 5bb0e16cc..ec0ab125a 100644
--- a/Source/JavaScriptCore/jit/FPRInfo.h
+++ b/Source/JavaScriptCore/jit/FPRInfo.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef FPRInfo_h
-#define FPRInfo_h
+#pragma once
#include "MacroAssembler.h"
#include <wtf/PrintStream.h>
@@ -42,6 +41,7 @@ class FPRInfo {
public:
typedef FPRReg RegisterType;
static const unsigned numberOfRegisters = 6;
+ static const unsigned numberOfArgumentRegisters = 8;
// Temporary registers.
static const FPRReg fpRegT0 = X86Registers::xmm0;
@@ -56,6 +56,10 @@ public:
static const FPRReg argumentFPR1 = X86Registers::xmm1; // fpRegT1
static const FPRReg argumentFPR2 = X86Registers::xmm2; // fpRegT2
static const FPRReg argumentFPR3 = X86Registers::xmm3; // fpRegT3
+ static const FPRReg argumentFPR4 = X86Registers::xmm4; // fpRegT4
+ static const FPRReg argumentFPR5 = X86Registers::xmm5; // fpRegT5
+ static const FPRReg argumentFPR6 = X86Registers::xmm6;
+ static const FPRReg argumentFPR7 = X86Registers::xmm7;
#endif
// On X86 the return will actually be on the x87 stack,
// so we'll copy to xmm0 for sanity!
@@ -182,6 +186,7 @@ class FPRInfo {
public:
typedef FPRReg RegisterType;
static const unsigned numberOfRegisters = 23;
+ static const unsigned numberOfArgumentRegisters = 8;
// Temporary registers.
// q8-q15 are callee saved, q31 is use by the MacroAssembler as fpTempRegister.
@@ -208,6 +213,14 @@ public:
static const FPRReg fpRegT20 = ARM64Registers::q28;
static const FPRReg fpRegT21 = ARM64Registers::q29;
static const FPRReg fpRegT22 = ARM64Registers::q30;
+ static const FPRReg fpRegCS0 = ARM64Registers::q8;
+ static const FPRReg fpRegCS1 = ARM64Registers::q9;
+ static const FPRReg fpRegCS2 = ARM64Registers::q10;
+ static const FPRReg fpRegCS3 = ARM64Registers::q11;
+ static const FPRReg fpRegCS4 = ARM64Registers::q12;
+ static const FPRReg fpRegCS5 = ARM64Registers::q13;
+ static const FPRReg fpRegCS6 = ARM64Registers::q14;
+ static const FPRReg fpRegCS7 = ARM64Registers::q15;
static const FPRReg argumentFPR0 = ARM64Registers::q0; // fpRegT0
static const FPRReg argumentFPR1 = ARM64Registers::q1; // fpRegT1
@@ -242,10 +255,15 @@ public:
16, 17, 18, 19, 20, 21, 22, InvalidIndex
};
unsigned result = indexForRegister[reg];
- ASSERT(result != InvalidIndex);
return result;
}
+ static FPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < 8);
+ return static_cast<FPRReg>(index);
+ }
+
static const char* debugName(FPRReg reg)
{
ASSERT(reg != InvalidFPRReg);
@@ -269,15 +287,16 @@ public:
class FPRInfo {
public:
typedef FPRReg RegisterType;
- static const unsigned numberOfRegisters = 6;
+ static const unsigned numberOfRegisters = 7;
// Temporary registers.
static const FPRReg fpRegT0 = MIPSRegisters::f0;
- static const FPRReg fpRegT1 = MIPSRegisters::f4;
- static const FPRReg fpRegT2 = MIPSRegisters::f6;
- static const FPRReg fpRegT3 = MIPSRegisters::f8;
- static const FPRReg fpRegT4 = MIPSRegisters::f10;
- static const FPRReg fpRegT5 = MIPSRegisters::f18;
+ static const FPRReg fpRegT1 = MIPSRegisters::f2;
+ static const FPRReg fpRegT2 = MIPSRegisters::f4;
+ static const FPRReg fpRegT3 = MIPSRegisters::f6;
+ static const FPRReg fpRegT4 = MIPSRegisters::f8;
+ static const FPRReg fpRegT5 = MIPSRegisters::f10;
+ static const FPRReg fpRegT6 = MIPSRegisters::f18;
static const FPRReg returnValueFPR = MIPSRegisters::f0;
@@ -287,7 +306,7 @@ public:
static FPRReg toRegister(unsigned index)
{
static const FPRReg registerForIndex[numberOfRegisters] = {
- fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 };
+ fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6 };
ASSERT(index < numberOfRegisters);
return registerForIndex[index];
@@ -298,14 +317,13 @@ public:
ASSERT(reg != InvalidFPRReg);
ASSERT(reg < 20);
static const unsigned indexForRegister[20] = {
- 0, InvalidIndex, InvalidIndex, InvalidIndex,
- 1, InvalidIndex, 2, InvalidIndex,
- 3, InvalidIndex, 4, InvalidIndex,
+ 0, InvalidIndex, 1, InvalidIndex,
+ 2, InvalidIndex, 3, InvalidIndex,
+ 4, InvalidIndex, 5, InvalidIndex,
InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
- InvalidIndex, InvalidIndex, 5, InvalidIndex,
+ InvalidIndex, InvalidIndex, 6, InvalidIndex,
};
unsigned result = indexForRegister[reg];
- ASSERT(result != InvalidIndex);
return result;
}
@@ -331,68 +349,6 @@ public:
#endif // CPU(MIPS)
-#if CPU(SH4)
-
-class FPRInfo {
-public:
- typedef FPRReg RegisterType;
- static const unsigned numberOfRegisters = 6;
-
- // Temporary registers.
- static const FPRReg fpRegT0 = SH4Registers::dr0;
- static const FPRReg fpRegT1 = SH4Registers::dr2;
- static const FPRReg fpRegT2 = SH4Registers::dr4;
- static const FPRReg fpRegT3 = SH4Registers::dr6;
- static const FPRReg fpRegT4 = SH4Registers::dr8;
- static const FPRReg fpRegT5 = SH4Registers::dr10;
-
- static const FPRReg returnValueFPR = SH4Registers::dr0;
-
- static const FPRReg argumentFPR0 = SH4Registers::dr4;
- static const FPRReg argumentFPR1 = SH4Registers::dr6;
-
- static FPRReg toRegister(unsigned index)
- {
- static const FPRReg registerForIndex[numberOfRegisters] = {
- fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 };
-
- ASSERT(index < numberOfRegisters);
- return registerForIndex[index];
- }
-
- static unsigned toIndex(FPRReg reg)
- {
- ASSERT(reg != InvalidFPRReg);
- ASSERT(reg < 16);
- static const unsigned indexForRegister[16] = {
- 0, InvalidIndex, 1, InvalidIndex,
- 2, InvalidIndex, 3, InvalidIndex,
- 4, InvalidIndex, 5, InvalidIndex,
- InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
- };
- unsigned result = indexForRegister[reg];
- ASSERT(result != InvalidIndex);
- return result;
- }
-
- static const char* debugName(FPRReg reg)
- {
- ASSERT(reg != InvalidFPRReg);
- ASSERT(reg < 16);
- static const char* nameForRegister[16] = {
- "dr0", "fr1", "dr2", "fr3",
- "dr4", "fr5", "dr6", "fr7",
- "dr8", "fr9", "dr10", "fr11",
- "dr12", "fr13", "dr14", "fr15"
- };
- return nameForRegister[reg];
- }
-
- static const unsigned InvalidIndex = 0xffffffff;
-};
-
-#endif // CPU(SH4)
-
#endif // ENABLE(JIT)
} // namespace JSC
@@ -409,5 +365,3 @@ inline void printInternal(PrintStream& out, JSC::FPRReg reg)
}
} // namespace WTF
-
-#endif
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
index f681dd847..bab6de13b 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,24 +28,27 @@
#if ENABLE(JIT)
+#include "CodeBlock.h"
+#include "DFGCommonData.h"
#include "Heap.h"
#include "VM.h"
-#include "Operations.h"
+#include "JITStubRoutineSet.h"
+#include "JSCInlines.h"
#include "SlotVisitor.h"
#include "Structure.h"
+#include <wtf/RefPtr.h>
namespace JSC {
GCAwareJITStubRoutine::GCAwareJITStubRoutine(
- const MacroAssemblerCodeRef& code, VM& vm, bool isClosureCall)
+ const MacroAssemblerCodeRef& code, VM& vm)
: JITStubRoutine(code)
, m_mayBeExecuting(false)
, m_isJettisoned(false)
- , m_isClosureCall(isClosureCall)
{
- vm.heap.m_jitStubRoutines.add(this);
+ vm.heap.m_jitStubRoutines->add(this);
}
-
+
GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { }
void GCAwareJITStubRoutine::observeZeroRefCount()
@@ -78,48 +81,78 @@ void GCAwareJITStubRoutine::markRequiredObjectsInternal(SlotVisitor&)
{
}
-MarkingGCAwareJITStubRoutineWithOneObject::MarkingGCAwareJITStubRoutineWithOneObject(
+MarkingGCAwareJITStubRoutine::MarkingGCAwareJITStubRoutine(
const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner,
- JSCell* object)
+ const Vector<JSCell*>& cells)
: GCAwareJITStubRoutine(code, vm)
- , m_object(vm, owner, object)
+ , m_cells(cells.size())
{
+ for (unsigned i = cells.size(); i--;)
+ m_cells[i].set(vm, owner, cells[i]);
}
-MarkingGCAwareJITStubRoutineWithOneObject::~MarkingGCAwareJITStubRoutineWithOneObject()
+MarkingGCAwareJITStubRoutine::~MarkingGCAwareJITStubRoutine()
{
}
-void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(SlotVisitor& visitor)
+void MarkingGCAwareJITStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
{
- visitor.append(&m_object);
+ for (auto& entry : m_cells)
+ visitor.append(entry);
}
-PassRefPtr<JITStubRoutine> createJITStubRoutine(
- const MacroAssemblerCodeRef& code,
- VM& vm,
- const JSCell*,
- bool makesCalls)
+
+GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler(
+ const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner, const Vector<JSCell*>& cells,
+ CodeBlock* codeBlockForExceptionHandlers, CallSiteIndex exceptionHandlerCallSiteIndex)
+ : MarkingGCAwareJITStubRoutine(code, vm, owner, cells)
+ , m_codeBlockWithExceptionHandler(codeBlockForExceptionHandlers)
+ , m_exceptionHandlerCallSiteIndex(exceptionHandlerCallSiteIndex)
{
- if (!makesCalls)
- return adoptRef(new JITStubRoutine(code));
+ RELEASE_ASSERT(m_codeBlockWithExceptionHandler);
+ ASSERT(!!m_codeBlockWithExceptionHandler->handlerForIndex(exceptionHandlerCallSiteIndex.bits()));
+}
- return static_pointer_cast<JITStubRoutine>(
- adoptRef(new GCAwareJITStubRoutine(code, vm)));
+void GCAwareJITStubRoutineWithExceptionHandler::aboutToDie()
+{
+ m_codeBlockWithExceptionHandler = nullptr;
}
-PassRefPtr<JITStubRoutine> createJITStubRoutine(
+void GCAwareJITStubRoutineWithExceptionHandler::observeZeroRefCount()
+{
+#if ENABLE(DFG_JIT)
+ if (m_codeBlockWithExceptionHandler) {
+ m_codeBlockWithExceptionHandler->jitCode()->dfgCommon()->removeCallSiteIndex(m_exceptionHandlerCallSiteIndex);
+ m_codeBlockWithExceptionHandler->removeExceptionHandlerForCallSite(m_exceptionHandlerCallSiteIndex);
+ m_codeBlockWithExceptionHandler = nullptr;
+ }
+#endif
+
+ Base::observeZeroRefCount();
+}
+
+
+Ref<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef& code,
VM& vm,
const JSCell* owner,
bool makesCalls,
- JSCell* object)
+ const Vector<JSCell*>& cells,
+ CodeBlock* codeBlockForExceptionHandlers,
+ CallSiteIndex exceptionHandlerCallSiteIndex)
{
if (!makesCalls)
- return adoptRef(new JITStubRoutine(code));
+ return adoptRef(*new JITStubRoutine(code));
+
+ if (codeBlockForExceptionHandlers) {
+ RELEASE_ASSERT(JITCode::isOptimizingJIT(codeBlockForExceptionHandlers->jitType()));
+ return adoptRef(*new GCAwareJITStubRoutineWithExceptionHandler(code, vm, owner, cells, codeBlockForExceptionHandlers, exceptionHandlerCallSiteIndex));
+ }
+
+ if (cells.isEmpty())
+ return adoptRef(*new GCAwareJITStubRoutine(code, vm));
- return static_pointer_cast<JITStubRoutine>(
- adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, vm, owner, object)));
+ return adoptRef(*new MarkingGCAwareJITStubRoutine(code, vm, owner, cells));
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
index 03045c5d1..5ee36ca46 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GCAwareJITStubRoutine_h
-#define GCAwareJITStubRoutine_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
@@ -34,7 +31,6 @@
#include "JSObject.h"
#include "JSString.h"
#include "WriteBarrier.h"
-#include <wtf/RefCounted.h>
#include <wtf/Vector.h>
namespace JSC {
@@ -54,7 +50,7 @@ class JITStubRoutineSet;
// list which does not get reclaimed all at once).
class GCAwareJITStubRoutine : public JITStubRoutine {
public:
- GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&, bool isClosureCall = false);
+ GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&);
virtual ~GCAwareJITStubRoutine();
void markRequiredObjects(SlotVisitor& visitor)
@@ -64,10 +60,8 @@ public:
void deleteFromGC();
- bool isClosureCall() const { return m_isClosureCall; }
-
protected:
- virtual void observeZeroRefCount() override;
+ void observeZeroRefCount() override;
virtual void markRequiredObjectsInternal(SlotVisitor&);
@@ -76,22 +70,39 @@ private:
bool m_mayBeExecuting;
bool m_isJettisoned;
- bool m_isClosureCall;
};
// Use this if you want to mark one additional object during GC if your stub
// routine is known to be executing.
-class MarkingGCAwareJITStubRoutineWithOneObject : public GCAwareJITStubRoutine {
+class MarkingGCAwareJITStubRoutine : public GCAwareJITStubRoutine {
public:
- MarkingGCAwareJITStubRoutineWithOneObject(
- const MacroAssemblerCodeRef&, VM&, const JSCell* owner, JSCell*);
- virtual ~MarkingGCAwareJITStubRoutineWithOneObject();
+ MarkingGCAwareJITStubRoutine(
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner, const Vector<JSCell*>&);
+ virtual ~MarkingGCAwareJITStubRoutine();
protected:
- virtual void markRequiredObjectsInternal(SlotVisitor&) override;
+ void markRequiredObjectsInternal(SlotVisitor&) override;
+
+private:
+ Vector<WriteBarrier<JSCell>> m_cells;
+};
+
+
+// The stub has exception handlers in it. So it clears itself from exception
+// handling table when it dies. It also frees space in CodeOrigin table
+// for new exception handlers to use the same CallSiteIndex.
+class GCAwareJITStubRoutineWithExceptionHandler : public MarkingGCAwareJITStubRoutine {
+public:
+ typedef GCAwareJITStubRoutine Base;
+
+ GCAwareJITStubRoutineWithExceptionHandler(const MacroAssemblerCodeRef&, VM&, const JSCell* owner, const Vector<JSCell*>&, CodeBlock*, CallSiteIndex);
+
+ void aboutToDie() override;
+ void observeZeroRefCount() override;
private:
- WriteBarrier<JSCell> m_object;
+ CodeBlock* m_codeBlockWithExceptionHandler;
+ CallSiteIndex m_exceptionHandlerCallSiteIndex;
};
// Helper for easily creating a GC-aware JIT stub routine. For the varargs,
@@ -100,7 +111,7 @@ private:
// appropriate. Generally you only need to pass pointers that will be used
// after the first call to C++ or JS.
//
-// PassRefPtr<JITStubRoutine> createJITStubRoutine(
+// Ref<JITStubRoutine> createJITStubRoutine(
// const MacroAssemblerCodeRef& code,
// VM& vm,
// const JSCell* owner,
@@ -113,15 +124,11 @@ private:
// this function using varargs, I ended up with more code than this simple
// way.
-PassRefPtr<JITStubRoutine> createJITStubRoutine(
- const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls);
-PassRefPtr<JITStubRoutine> createJITStubRoutine(
+Ref<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls,
- JSCell*);
+ const Vector<JSCell*>& = { },
+ CodeBlock* codeBlockForExceptionHandlers = nullptr, CallSiteIndex exceptionHandlingCallSiteIndex = CallSiteIndex(std::numeric_limits<unsigned>::max()));
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // GCAwareJITStubRoutine_h
-
diff --git a/Source/JavaScriptCore/jit/GPRInfo.cpp b/Source/JavaScriptCore/jit/GPRInfo.cpp
new file mode 100644
index 000000000..5a8005f9b
--- /dev/null
+++ b/Source/JavaScriptCore/jit/GPRInfo.cpp
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "GPRInfo.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JSValueRegs::dump(PrintStream& out) const
+{
+#if USE(JSVALUE64)
+ out.print(m_gpr);
+#else
+ out.print("(tag:", tagGPR(), ", payload:", payloadGPR(), ")");
+#endif
+}
+
+// This is in the .cpp file to work around clang issues.
+#if CPU(X86_64)
+const GPRReg GPRInfo::patchpointScratchRegister = MacroAssembler::s_scratchRegister;
+#elif CPU(ARM64)
+const GPRReg GPRInfo::patchpointScratchRegister = ARM64Registers::ip0;
+#endif
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/GPRInfo.h b/Source/JavaScriptCore/jit/GPRInfo.h
index 393a56b50..f7e4a6b2c 100644
--- a/Source/JavaScriptCore/jit/GPRInfo.h
+++ b/Source/JavaScriptCore/jit/GPRInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,14 +23,21 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef GPRInfo_h
-#define GPRInfo_h
+#pragma once
#include "MacroAssembler.h"
+#include <array>
#include <wtf/PrintStream.h>
namespace JSC {
+enum NoResultTag { NoResult };
+
+// We use the same conventions in the basline JIT as in the LLint. If you
+// change mappings in the GPRInfo, you should change them in the offlineasm
+// compiler adequately. The register naming conventions are described at the
+// top of the LowLevelInterpreter.asm file.
+
typedef MacroAssembler::RegisterID GPRReg;
#define InvalidGPRReg ((::JSC::GPRReg)-1)
@@ -54,12 +61,25 @@ public:
return JSValueRegs(gpr);
}
+ static JSValueRegs withTwoAvailableRegs(GPRReg gpr, GPRReg)
+ {
+ return JSValueRegs(gpr);
+ }
+
bool operator!() const { return m_gpr == InvalidGPRReg; }
+ explicit operator bool() const { return m_gpr != InvalidGPRReg; }
+
+ bool operator==(JSValueRegs other) { return m_gpr == other.m_gpr; }
+ bool operator!=(JSValueRegs other) { return !(*this == other); }
GPRReg gpr() const { return m_gpr; }
GPRReg tagGPR() const { return InvalidGPRReg; }
GPRReg payloadGPR() const { return m_gpr; }
+ bool uses(GPRReg gpr) const { return m_gpr == gpr; }
+
+ void dump(PrintStream&) const;
+
private:
GPRReg m_gpr;
};
@@ -98,6 +118,7 @@ public:
}
bool operator!() const { return m_base == InvalidGPRReg; }
+ explicit operator bool() const { return m_base != InvalidGPRReg; }
bool isAddress() const { return m_offset != notAddress(); }
@@ -119,6 +140,11 @@ public:
return m_base;
}
+ JSValueRegs regs() const
+ {
+ return JSValueRegs(gpr());
+ }
+
MacroAssembler::Address asAddress() const { return MacroAssembler::Address(base(), offset()); }
private:
@@ -144,16 +170,29 @@ public:
{
}
+ static JSValueRegs withTwoAvailableRegs(GPRReg gpr1, GPRReg gpr2)
+ {
+ return JSValueRegs(gpr1, gpr2);
+ }
+
static JSValueRegs payloadOnly(GPRReg gpr)
{
return JSValueRegs(InvalidGPRReg, gpr);
}
- bool operator!() const
+ bool operator!() const { return !static_cast<bool>(*this); }
+ explicit operator bool() const
{
- return static_cast<GPRReg>(m_tagGPR) == InvalidGPRReg
- && static_cast<GPRReg>(m_payloadGPR) == InvalidGPRReg;
+ return static_cast<GPRReg>(m_tagGPR) != InvalidGPRReg
+ || static_cast<GPRReg>(m_payloadGPR) != InvalidGPRReg;
}
+
+ bool operator==(JSValueRegs other) const
+ {
+ return m_tagGPR == other.m_tagGPR
+ && m_payloadGPR == other.m_payloadGPR;
+ }
+ bool operator!=(JSValueRegs other) const { return !(*this == other); }
GPRReg tagGPR() const { return static_cast<GPRReg>(m_tagGPR); }
GPRReg payloadGPR() const { return static_cast<GPRReg>(m_payloadGPR); }
@@ -169,6 +208,10 @@ public:
return tagGPR();
}
+ bool uses(GPRReg gpr) const { return m_tagGPR == gpr || m_payloadGPR == gpr; }
+
+ void dump(PrintStream&) const;
+
private:
int8_t m_tagGPR;
int8_t m_payloadGPR;
@@ -219,11 +262,12 @@ public:
result.m_tagType = static_cast<int8_t>(JSValue::CellTag);
return result;
}
-
- bool operator!() const
+
+ bool operator!() const { return !static_cast<bool>(*this); }
+ explicit operator bool() const
{
- return static_cast<GPRReg>(m_baseOrTag) == InvalidGPRReg
- && static_cast<GPRReg>(m_payload) == InvalidGPRReg;
+ return static_cast<GPRReg>(m_baseOrTag) != InvalidGPRReg
+ || static_cast<GPRReg>(m_payload) != InvalidGPRReg;
}
bool isAddress() const
@@ -268,6 +312,11 @@ public:
return static_cast<int32_t>(m_tagType);
}
+ JSValueRegs regs() const
+ {
+ return JSValueRegs(tagGPR(), payloadGPR());
+ }
+
MacroAssembler::Address asAddress(unsigned additionalOffset = 0) const { return MacroAssembler::Address(base(), offset() + additionalOffset); }
private:
@@ -280,10 +329,9 @@ private:
};
#endif // USE(JSVALUE32_64)
-// The baseline JIT requires that regT3 be callee-preserved.
-
#if CPU(X86)
#define NUMBER_OF_ARGUMENT_REGISTERS 0u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
class GPRInfo {
public:
@@ -291,25 +339,20 @@ public:
static const unsigned numberOfRegisters = 6;
static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
- // Note: regT3 is required to be callee-preserved.
-
// Temporary registers.
static const GPRReg regT0 = X86Registers::eax;
static const GPRReg regT1 = X86Registers::edx;
static const GPRReg regT2 = X86Registers::ecx;
- static const GPRReg regT3 = X86Registers::ebx;
- static const GPRReg regT4 = X86Registers::edi;
- static const GPRReg regT5 = X86Registers::esi;
- // These registers match the baseline JIT.
- static const GPRReg cachedResultRegister = regT0;
- static const GPRReg cachedResultRegister2 = regT1;
+ static const GPRReg regT3 = X86Registers::ebx; // Callee-save
+ static const GPRReg regT4 = X86Registers::esi; // Callee-save
+ static const GPRReg regT5 = X86Registers::edi; // Callee-save
static const GPRReg callFrameRegister = X86Registers::ebp;
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2
static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
+ static const GPRReg argumentGPR2 = X86Registers::eax; // regT0
+ static const GPRReg argumentGPR3 = X86Registers::ebx; // regT3
static const GPRReg nonArgGPR0 = X86Registers::esi; // regT4
- static const GPRReg nonArgGPR1 = X86Registers::eax; // regT0
- static const GPRReg nonArgGPR2 = X86Registers::ebx; // regT3
static const GPRReg returnValueGPR = X86Registers::eax; // regT0
static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
static const GPRReg nonPreservedNonReturnGPR = X86Registers::ecx;
@@ -321,13 +364,18 @@ public:
return registerForIndex[index];
}
+ static GPRReg toArgumentRegister(unsigned)
+ {
+ UNREACHABLE_FOR_PLATFORM();
+ return InvalidGPRReg;
+ }
+
static unsigned toIndex(GPRReg reg)
{
ASSERT(reg != InvalidGPRReg);
ASSERT(static_cast<int>(reg) < 8);
- static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4 };
+ static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, 5 };
unsigned result = indexForRegister[reg];
- ASSERT(result != InvalidIndex);
return result;
}
@@ -350,8 +398,10 @@ public:
#if CPU(X86_64)
#if !OS(WINDOWS)
#define NUMBER_OF_ARGUMENT_REGISTERS 6u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 5u
#else
#define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 7u
#endif
class GPRInfo {
@@ -360,50 +410,78 @@ public:
static const unsigned numberOfRegisters = 11;
static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
- // Note: regT3 is required to be callee-preserved.
-
// These registers match the baseline JIT.
- static const GPRReg cachedResultRegister = X86Registers::eax;
static const GPRReg callFrameRegister = X86Registers::ebp;
static const GPRReg tagTypeNumberRegister = X86Registers::r14;
static const GPRReg tagMaskRegister = X86Registers::r15;
+
// Temporary registers.
static const GPRReg regT0 = X86Registers::eax;
- static const GPRReg regT1 = X86Registers::edx;
- static const GPRReg regT2 = X86Registers::ecx;
- static const GPRReg regT3 = X86Registers::ebx;
- static const GPRReg regT4 = X86Registers::edi;
- static const GPRReg regT5 = X86Registers::esi;
- static const GPRReg regT6 = X86Registers::r8;
+#if !OS(WINDOWS)
+ static const GPRReg regT1 = X86Registers::esi;
+ static const GPRReg regT2 = X86Registers::edx;
+ static const GPRReg regT3 = X86Registers::ecx;
+ static const GPRReg regT4 = X86Registers::r8;
+ static const GPRReg regT5 = X86Registers::r10;
+ static const GPRReg regT6 = X86Registers::edi;
static const GPRReg regT7 = X86Registers::r9;
- static const GPRReg regT8 = X86Registers::r10;
- static const GPRReg regT9 = X86Registers::r12;
- static const GPRReg regT10 = X86Registers::r13;
+#else
+ static const GPRReg regT1 = X86Registers::edx;
+ static const GPRReg regT2 = X86Registers::r8;
+ static const GPRReg regT3 = X86Registers::r9;
+ static const GPRReg regT4 = X86Registers::r10;
+ static const GPRReg regT5 = X86Registers::ecx;
+#endif
+
+ static const GPRReg regCS0 = X86Registers::ebx;
+
+#if !OS(WINDOWS)
+ static const GPRReg regCS1 = X86Registers::r12;
+ static const GPRReg regCS2 = X86Registers::r13;
+ static const GPRReg regCS3 = X86Registers::r14;
+ static const GPRReg regCS4 = X86Registers::r15;
+#else
+ static const GPRReg regCS1 = X86Registers::esi;
+ static const GPRReg regCS2 = X86Registers::edi;
+ static const GPRReg regCS3 = X86Registers::r12;
+ static const GPRReg regCS4 = X86Registers::r13;
+ static const GPRReg regCS5 = X86Registers::r14;
+ static const GPRReg regCS6 = X86Registers::r15;
+#endif
+
// These constants provide the names for the general purpose argument & return value registers.
#if !OS(WINDOWS)
- static const GPRReg argumentGPR0 = X86Registers::edi; // regT4
- static const GPRReg argumentGPR1 = X86Registers::esi; // regT5
- static const GPRReg argumentGPR2 = X86Registers::edx; // regT1
- static const GPRReg argumentGPR3 = X86Registers::ecx; // regT2
- static const GPRReg argumentGPR4 = X86Registers::r8; // regT6
- static const GPRReg argumentGPR5 = X86Registers::r9; // regT7
+ static const GPRReg argumentGPR0 = X86Registers::edi; // regT6
+ static const GPRReg argumentGPR1 = X86Registers::esi; // regT1
+ static const GPRReg argumentGPR2 = X86Registers::edx; // regT2
+ static const GPRReg argumentGPR3 = X86Registers::ecx; // regT3
+ static const GPRReg argumentGPR4 = X86Registers::r8; // regT4
+ static const GPRReg argumentGPR5 = X86Registers::r9; // regT7
#else
- static const GPRReg argumentGPR0 = X86Registers::ecx;
- static const GPRReg argumentGPR1 = X86Registers::edx;
- static const GPRReg argumentGPR2 = X86Registers::r8; // regT6
- static const GPRReg argumentGPR3 = X86Registers::r9; // regT7
+ static const GPRReg argumentGPR0 = X86Registers::ecx; // regT5
+ static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
+ static const GPRReg argumentGPR2 = X86Registers::r8; // regT2
+ static const GPRReg argumentGPR3 = X86Registers::r9; // regT3
#endif
- static const GPRReg nonArgGPR0 = X86Registers::r10; // regT8
- static const GPRReg nonArgGPR1 = X86Registers::ebx; // regT3
- static const GPRReg nonArgGPR2 = X86Registers::r12; // regT9
+ static const GPRReg nonArgGPR0 = X86Registers::r10; // regT5 (regT4 on Windows)
static const GPRReg returnValueGPR = X86Registers::eax; // regT0
- static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
- static const GPRReg nonPreservedNonReturnGPR = X86Registers::esi;
+ static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 or regT2
+ static const GPRReg nonPreservedNonReturnGPR = X86Registers::r10; // regT5 (regT4 on Windows)
+ static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; // regT5 (regT4 on Windows)
+
+ // FIXME: I believe that all uses of this are dead in the sense that it just causes the scratch
+ // register allocator to select a different register and potentially spill things. It would be better
+ // if we instead had a more explicit way of saying that we don't have a scratch register.
+ static const GPRReg patchpointScratchRegister;
static GPRReg toRegister(unsigned index)
{
ASSERT(index < numberOfRegisters);
- static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9, regT10 };
+#if !OS(WINDOWS)
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regCS0, regCS1, regCS2 };
+#else
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regCS0, regCS1, regCS2, regCS3, regCS4 };
+#endif
return registerForIndex[index];
}
@@ -422,7 +500,11 @@ public:
{
ASSERT(reg != InvalidGPRReg);
ASSERT(static_cast<int>(reg) < 16);
- static const unsigned indexForRegister[16] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4, 6, 7, 8, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
+#if !OS(WINDOWS)
+ static const unsigned indexForRegister[16] = { 0, 3, 2, 8, InvalidIndex, InvalidIndex, 1, 6, 4, 7, 5, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
+#else
+ static const unsigned indexForRegister[16] = { 0, 5, 1, 6, InvalidIndex, InvalidIndex, 7, 8, 2, 3, 4, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
+#endif
return indexForRegister[reg];
}
@@ -439,6 +521,16 @@ public:
return nameForRegister[reg];
}
+ static const std::array<GPRReg, 3>& reservedRegisters()
+ {
+ static const std::array<GPRReg, 3> reservedRegisters { {
+ MacroAssembler::s_scratchRegister,
+ tagTypeNumberRegister,
+ tagMaskRegister,
+ } };
+ return reservedRegisters;
+ }
+
static const unsigned InvalidIndex = 0xffffffff;
};
@@ -446,6 +538,7 @@ public:
#if CPU(ARM)
#define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
class GPRInfo {
public:
@@ -453,13 +546,11 @@ public:
static const unsigned numberOfRegisters = 9;
static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
- // Note: regT3 is required to be callee-preserved.
-
// Temporary registers.
static const GPRReg regT0 = ARMRegisters::r0;
static const GPRReg regT1 = ARMRegisters::r1;
static const GPRReg regT2 = ARMRegisters::r2;
- static const GPRReg regT3 = ARMRegisters::r4;
+ static const GPRReg regT3 = ARMRegisters::r3;
static const GPRReg regT4 = ARMRegisters::r8;
static const GPRReg regT5 = ARMRegisters::r9;
static const GPRReg regT6 = ARMRegisters::r10;
@@ -468,22 +559,19 @@ public:
#else
static const GPRReg regT7 = ARMRegisters::r7;
#endif
- static const GPRReg regT8 = ARMRegisters::r3;
+ static const GPRReg regT8 = ARMRegisters::r4;
// These registers match the baseline JIT.
- static const GPRReg cachedResultRegister = regT0;
- static const GPRReg cachedResultRegister2 = regT1;
static const GPRReg callFrameRegister = ARMRegisters::fp;
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = ARMRegisters::r0; // regT0
static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1
static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2
- static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT8
- static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT3
+ static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT3
+ static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT8
static const GPRReg nonArgGPR1 = ARMRegisters::r8; // regT4
- static const GPRReg nonArgGPR2 = ARMRegisters::r9; // regT5
static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0
static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1
- static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5; // regT7
+ static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5;
static GPRReg toRegister(unsigned index)
{
@@ -492,18 +580,24 @@ public:
return registerForIndex[index];
}
+ static GPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < numberOfArgumentRegisters);
+ static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 };
+ return registerForIndex[index];
+ }
+
static unsigned toIndex(GPRReg reg)
{
ASSERT(reg != InvalidGPRReg);
ASSERT(static_cast<int>(reg) < 16);
static const unsigned indexForRegister[16] =
#if CPU(ARM_THUMB2)
- { 0, 1, 2, 8, 3, 9, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
#else
- { 0, 1, 2, 8, 3, 9, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
#endif
unsigned result = indexForRegister[reg];
- ASSERT(result != InvalidIndex);
return result;
}
@@ -527,26 +621,27 @@ public:
#if CPU(ARM64)
#define NUMBER_OF_ARGUMENT_REGISTERS 8u
+// Callee Saves includes x19..x28 and FP registers q8..q15
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 18u
class GPRInfo {
public:
typedef GPRReg RegisterType;
static const unsigned numberOfRegisters = 16;
-
- // Note: regT3 is required to be callee-preserved.
+ static const unsigned numberOfArgumentRegisters = 8;
// These registers match the baseline JIT.
- static const GPRReg cachedResultRegister = ARM64Registers::x0;
- static const GPRReg timeoutCheckRegister = ARM64Registers::x26;
static const GPRReg callFrameRegister = ARM64Registers::fp;
static const GPRReg tagTypeNumberRegister = ARM64Registers::x27;
static const GPRReg tagMaskRegister = ARM64Registers::x28;
+ static const GPRReg dataTempRegister = MacroAssembler::dataTempRegister;
+ static const GPRReg memoryTempRegister = MacroAssembler::memoryTempRegister;
// Temporary registers.
static const GPRReg regT0 = ARM64Registers::x0;
static const GPRReg regT1 = ARM64Registers::x1;
static const GPRReg regT2 = ARM64Registers::x2;
- static const GPRReg regT3 = ARM64Registers::x23;
- static const GPRReg regT4 = ARM64Registers::x24;
+ static const GPRReg regT3 = ARM64Registers::x3;
+ static const GPRReg regT4 = ARM64Registers::x4;
static const GPRReg regT5 = ARM64Registers::x5;
static const GPRReg regT6 = ARM64Registers::x6;
static const GPRReg regT7 = ARM64Registers::x7;
@@ -558,6 +653,16 @@ public:
static const GPRReg regT13 = ARM64Registers::x13;
static const GPRReg regT14 = ARM64Registers::x14;
static const GPRReg regT15 = ARM64Registers::x15;
+ static const GPRReg regCS0 = ARM64Registers::x19; // Used by FTL only
+ static const GPRReg regCS1 = ARM64Registers::x20; // Used by FTL only
+ static const GPRReg regCS2 = ARM64Registers::x21; // Used by FTL only
+ static const GPRReg regCS3 = ARM64Registers::x22; // Used by FTL only
+ static const GPRReg regCS4 = ARM64Registers::x23; // Used by FTL only
+ static const GPRReg regCS5 = ARM64Registers::x24; // Used by FTL only
+ static const GPRReg regCS6 = ARM64Registers::x25; // Used by FTL only
+ static const GPRReg regCS7 = ARM64Registers::x26;
+ static const GPRReg regCS8 = ARM64Registers::x27; // tagTypeNumber
+ static const GPRReg regCS9 = ARM64Registers::x28; // tagMask
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = ARM64Registers::x0; // regT0
static const GPRReg argumentGPR1 = ARM64Registers::x1; // regT1
@@ -569,12 +674,13 @@ public:
static const GPRReg argumentGPR7 = ARM64Registers::x7; // regT7
static const GPRReg nonArgGPR0 = ARM64Registers::x8; // regT8
static const GPRReg nonArgGPR1 = ARM64Registers::x9; // regT9
- static const GPRReg nonArgGPR2 = ARM64Registers::x10; // regT10
static const GPRReg returnValueGPR = ARM64Registers::x0; // regT0
static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1
static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2;
+ static const GPRReg nonPreservedNonArgumentGPR = ARM64Registers::x8;
+ static const GPRReg patchpointScratchRegister;
- // GPRReg mapping is direct, the machine regsiter numbers can
+ // GPRReg mapping is direct, the machine register numbers can
// be used directly as indices into the GPR RegisterBank.
COMPILE_ASSERT(ARM64Registers::q0 == 0, q0_is_0);
COMPILE_ASSERT(ARM64Registers::q1 == 1, q1_is_1);
@@ -598,12 +704,20 @@ public:
}
static unsigned toIndex(GPRReg reg)
{
+ if (reg > regT15)
+ return InvalidIndex;
return (unsigned)reg;
}
+ static GPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < numberOfArgumentRegisters);
+ return toRegister(index);
+ }
+
static const char* debugName(GPRReg reg)
{
- ASSERT(static_cast<unsigned>(reg) != InvalidGPRReg);
+ ASSERT(reg != InvalidGPRReg);
ASSERT(static_cast<unsigned>(reg) < 32);
static const char* nameForRegister[32] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
@@ -614,6 +728,17 @@ public:
return nameForRegister[reg];
}
+ static const std::array<GPRReg, 4>& reservedRegisters()
+ {
+ static const std::array<GPRReg, 4> reservedRegisters { {
+ dataTempRegister,
+ memoryTempRegister,
+ tagTypeNumberRegister,
+ tagMaskRegister,
+ } };
+ return reservedRegisters;
+ }
+
static const unsigned InvalidIndex = 0xffffffff;
};
@@ -621,6 +746,7 @@ public:
#if CPU(MIPS)
#define NUMBER_OF_ARGUMENT_REGISTERS 4u
+#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
class GPRInfo {
public:
@@ -630,31 +756,26 @@ public:
// regT0 must be v0 for returning a 32-bit value.
// regT1 must be v1 for returning a pair of 32-bit value.
- // regT3 must be saved in the callee, so use an S register.
// Temporary registers.
static const GPRReg regT0 = MIPSRegisters::v0;
static const GPRReg regT1 = MIPSRegisters::v1;
- static const GPRReg regT2 = MIPSRegisters::t4;
- static const GPRReg regT3 = MIPSRegisters::s2;
- static const GPRReg regT4 = MIPSRegisters::t5;
- static const GPRReg regT5 = MIPSRegisters::t6;
- static const GPRReg regT6 = MIPSRegisters::s0;
+ static const GPRReg regT2 = MIPSRegisters::t2;
+ static const GPRReg regT3 = MIPSRegisters::t3;
+ static const GPRReg regT4 = MIPSRegisters::t4;
+ static const GPRReg regT5 = MIPSRegisters::t5;
+ static const GPRReg regT6 = MIPSRegisters::t6;
// These registers match the baseline JIT.
- static const GPRReg cachedResultRegister = regT0;
- static const GPRReg cachedResultRegister2 = regT1;
static const GPRReg callFrameRegister = MIPSRegisters::fp;
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = MIPSRegisters::a0;
static const GPRReg argumentGPR1 = MIPSRegisters::a1;
static const GPRReg argumentGPR2 = MIPSRegisters::a2;
static const GPRReg argumentGPR3 = MIPSRegisters::a3;
- static const GPRReg nonArgGPR0 = regT2;
- static const GPRReg nonArgGPR1 = regT3;
- static const GPRReg nonArgGPR2 = regT4;
+ static const GPRReg nonArgGPR0 = regT4;
static const GPRReg returnValueGPR = regT0;
static const GPRReg returnValueGPR2 = regT1;
- static const GPRReg nonPreservedNonReturnGPR = regT5;
+ static const GPRReg nonPreservedNonReturnGPR = regT2;
static GPRReg toRegister(unsigned index)
{
@@ -663,17 +784,24 @@ public:
return registerForIndex[index];
}
+ static GPRReg toArgumentRegister(unsigned index)
+ {
+ ASSERT(index < numberOfArgumentRegisters);
+ static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 };
+ return registerForIndex[index];
+ }
+
static unsigned toIndex(GPRReg reg)
{
ASSERT(reg != InvalidGPRReg);
- ASSERT(reg < 24);
- static const unsigned indexForRegister[24] = {
+ ASSERT(reg < 32);
+ static const unsigned indexForRegister[32] = {
InvalidIndex, InvalidIndex, 0, 1, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
- InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, 2, 4, 5, InvalidIndex,
- 6, InvalidIndex, 3, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
+ InvalidIndex, InvalidIndex, 2, 3, 4, 5, 6, InvalidIndex,
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
};
unsigned result = indexForRegister[reg];
- ASSERT(result != InvalidIndex);
return result;
}
@@ -695,76 +823,6 @@ public:
#endif // CPU(MIPS)
-#if CPU(SH4)
-#define NUMBER_OF_ARGUMENT_REGISTERS 4u
-
-class GPRInfo {
-public:
- typedef GPRReg RegisterType;
- static const unsigned numberOfRegisters = 10;
-
- // Note: regT3 is required to be callee-preserved.
-
- // Temporary registers.
- static const GPRReg regT0 = SH4Registers::r0;
- static const GPRReg regT1 = SH4Registers::r1;
- static const GPRReg regT2 = SH4Registers::r2;
- static const GPRReg regT3 = SH4Registers::r10;
- static const GPRReg regT4 = SH4Registers::r4;
- static const GPRReg regT5 = SH4Registers::r5;
- static const GPRReg regT6 = SH4Registers::r6;
- static const GPRReg regT7 = SH4Registers::r7;
- static const GPRReg regT8 = SH4Registers::r8;
- static const GPRReg regT9 = SH4Registers::r9;
- // These registers match the baseline JIT.
- static const GPRReg cachedResultRegister = regT0;
- static const GPRReg cachedResultRegister2 = regT1;
- static const GPRReg callFrameRegister = SH4Registers::fp;
- // These constants provide the names for the general purpose argument & return value registers.
- static const GPRReg argumentGPR0 = regT4;
- static const GPRReg argumentGPR1 = regT5;
- static const GPRReg argumentGPR2 = regT6;
- static const GPRReg argumentGPR3 = regT7;
- static const GPRReg nonArgGPR0 = regT3;
- static const GPRReg nonArgGPR1 = regT8;
- static const GPRReg nonArgGPR2 = regT9;
- static const GPRReg returnValueGPR = regT0;
- static const GPRReg returnValueGPR2 = regT1;
- static const GPRReg nonPreservedNonReturnGPR = regT2;
-
- static GPRReg toRegister(unsigned index)
- {
- ASSERT(index < numberOfRegisters);
- static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9 };
- return registerForIndex[index];
- }
-
- static unsigned toIndex(GPRReg reg)
- {
- ASSERT(reg != InvalidGPRReg);
- ASSERT(reg < 14);
- static const unsigned indexForRegister[14] = { 0, 1, 2, InvalidIndex, 4, 5, 6, 7, 8, 9, 3, InvalidIndex, InvalidIndex, InvalidIndex };
- unsigned result = indexForRegister[reg];
- ASSERT(result != InvalidIndex);
- return result;
- }
-
- static const char* debugName(GPRReg reg)
- {
- ASSERT(reg != InvalidGPRReg);
- ASSERT(reg < 16);
- static const char* nameForRegister[16] = {
- "r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
- "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
- };
- return nameForRegister[reg];
- }
-
- static const unsigned InvalidIndex = 0xffffffff;
-};
-
-#endif // CPU(SH4)
-
// The baseline JIT uses "accumulator" style execution with regT0 (for 64-bit)
// and regT0 + regT1 (for 32-bit) serving as the accumulator register(s) for
// passing results of one opcode to the next. Hence:
@@ -773,6 +831,14 @@ COMPILE_ASSERT(GPRInfo::regT0 == GPRInfo::returnValueGPR, regT0_must_equal_retur
COMPILE_ASSERT(GPRInfo::regT1 == GPRInfo::returnValueGPR2, regT1_must_equal_returnValueGPR2);
#endif
+inline GPRReg extractResult(GPRReg result) { return result; }
+#if USE(JSVALUE64)
+inline GPRReg extractResult(JSValueRegs result) { return result.gpr(); }
+#else
+inline JSValueRegs extractResult(JSValueRegs result) { return result; }
+#endif
+inline NoResultTag extractResult(NoResultTag) { return NoResult; }
+
#endif // ENABLE(JIT)
} // namespace JSC
@@ -789,5 +855,3 @@ inline void printInternal(PrintStream& out, JSC::GPRReg reg)
}
} // namespace WTF
-
-#endif
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
index 528fb2bc4..e8d01916b 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
@@ -29,6 +29,7 @@
#include "CallFrame.h"
#include "JSCJSValueInlines.h"
#include "JSObject.h"
+#include "JSCInlines.h"
#include <wtf/InlineASM.h>
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h
index f4c8bc703..eb6116106 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.h
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h
@@ -23,12 +23,10 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef HostCallReturnValue_h
-#define HostCallReturnValue_h
+#pragma once
#include "JSCJSValue.h"
#include "MacroAssemblerCodeRef.h"
-#include <wtf/Platform.h>
#if ENABLE(JIT)
@@ -42,7 +40,7 @@ namespace JSC {
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL;
-#if COMPILER(GCC)
+#if COMPILER(GCC_OR_CLANG)
// This is a public declaration only to convince CLANG not to elide it.
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL;
@@ -52,14 +50,12 @@ inline void initializeHostCallReturnValue()
getHostCallReturnValueWithExecState(0);
}
-#else // COMPILER(GCC)
+#else // COMPILER(GCC_OR_CLANG)
inline void initializeHostCallReturnValue() { }
-#endif // COMPILER(GCC)
+#endif // COMPILER(GCC_OR_CLANG)
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // HostCallReturnValue_h
diff --git a/Source/JavaScriptCore/jit/ICStats.cpp b/Source/JavaScriptCore/jit/ICStats.cpp
new file mode 100644
index 000000000..2080cd934
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ICStats.cpp
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ICStats.h"
+
+namespace JSC {
+
+bool ICEvent::operator<(const ICEvent& other) const
+{
+ if (m_classInfo != other.m_classInfo) {
+ if (!m_classInfo)
+ return true;
+ if (!other.m_classInfo)
+ return false;
+ return strcmp(m_classInfo->className, other.m_classInfo->className) < 0;
+ }
+
+ if (m_propertyName != other.m_propertyName)
+ return codePointCompare(m_propertyName.string(), other.m_propertyName.string()) < 0;
+
+ return m_kind < other.m_kind;
+}
+
+void ICEvent::dump(PrintStream& out) const
+{
+ out.print(m_kind, "(", m_classInfo ? m_classInfo->className : "<null>", ", ", m_propertyName, ")");
+}
+
+void ICEvent::log() const
+{
+ ICStats::instance().add(*this);
+}
+
+Atomic<ICStats*> ICStats::s_instance;
+
+ICStats::ICStats()
+{
+ m_thread = createThread(
+ "JSC ICStats",
+ [this] () {
+ LockHolder locker(m_lock);
+ for (;;) {
+ m_condition.waitFor(
+ m_lock, Seconds(1), [this] () -> bool { return m_shouldStop; });
+ if (m_shouldStop)
+ break;
+
+ dataLog("ICStats:\n");
+ auto list = m_spectrum.buildList();
+ for (unsigned i = list.size(); i--;)
+ dataLog(" ", list[i].key, ": ", list[i].count, "\n");
+ }
+ });
+}
+
+ICStats::~ICStats()
+{
+ {
+ LockHolder locker(m_lock);
+ m_shouldStop = true;
+ m_condition.notifyAll();
+ }
+
+ waitForThreadCompletion(m_thread);
+}
+
+void ICStats::add(const ICEvent& event)
+{
+ m_spectrum.add(event);
+}
+
+ICStats& ICStats::instance()
+{
+ for (;;) {
+ ICStats* result = s_instance.load();
+ if (result)
+ return *result;
+
+ ICStats* newStats = new ICStats();
+ if (s_instance.compareExchangeWeak(nullptr, newStats))
+ return *newStats;
+
+ delete newStats;
+ }
+}
+
+} // namespace JSC
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, ICEvent::Kind kind)
+{
+ switch (kind) {
+#define ICEVENT_KIND_DUMP(name) case ICEvent::name: out.print(#name); return;
+ FOR_EACH_ICEVENT_KIND(ICEVENT_KIND_DUMP);
+#undef ICEVENT_KIND_DUMP
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
+
diff --git a/Source/JavaScriptCore/jit/ICStats.h b/Source/JavaScriptCore/jit/ICStats.h
new file mode 100644
index 000000000..9499c915a
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ICStats.h
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include "ClassInfo.h"
+#include "Identifier.h"
+#include <wtf/Condition.h>
+#include <wtf/HashTable.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/Lock.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/PrintStream.h>
+#include <wtf/Spectrum.h>
+
+namespace JSC {
+
+#define FOR_EACH_ICEVENT_KIND(macro) \
+ macro(InvalidKind) \
+ macro(GetByIdAddAccessCase) \
+ macro(GetByIdReplaceWithJump) \
+ macro(GetByIdSelfPatch) \
+ macro(InAddAccessCase) \
+ macro(InReplaceWithJump) \
+ macro(OperationGetById) \
+ macro(OperationGetByIdGeneric) \
+ macro(OperationGetByIdBuildList) \
+ macro(OperationGetByIdOptimize) \
+ macro(OperationInOptimize) \
+ macro(OperationIn) \
+ macro(OperationGenericIn) \
+ macro(OperationPutByIdStrict) \
+ macro(OperationPutByIdNonStrict) \
+ macro(OperationPutByIdDirectStrict) \
+ macro(OperationPutByIdDirectNonStrict) \
+ macro(OperationPutByIdStrictOptimize) \
+ macro(OperationPutByIdNonStrictOptimize) \
+ macro(OperationPutByIdDirectStrictOptimize) \
+ macro(OperationPutByIdDirectNonStrictOptimize) \
+ macro(OperationPutByIdStrictBuildList) \
+ macro(OperationPutByIdNonStrictBuildList) \
+ macro(OperationPutByIdDirectStrictBuildList) \
+ macro(OperationPutByIdDirectNonStrictBuildList) \
+ macro(PutByIdAddAccessCase) \
+ macro(PutByIdReplaceWithJump) \
+ macro(PutByIdSelfPatch)
+
+class ICEvent {
+public:
+ enum Kind {
+#define ICEVENT_KIND_DECLARATION(name) name,
+ FOR_EACH_ICEVENT_KIND(ICEVENT_KIND_DECLARATION)
+#undef ICEVENT_KIND_DECLARATION
+ };
+
+ ICEvent()
+ {
+ }
+
+ ICEvent(Kind kind, const ClassInfo* classInfo, const Identifier propertyName)
+ : m_kind(kind)
+ , m_classInfo(classInfo)
+ , m_propertyName(propertyName)
+ {
+ }
+
+ ICEvent(WTF::HashTableDeletedValueType)
+ : m_kind(OperationGetById)
+ {
+ }
+
+ bool operator==(const ICEvent& other) const
+ {
+ return m_kind == other.m_kind
+ && m_classInfo == other.m_classInfo
+ && m_propertyName == other.m_propertyName;
+ }
+
+ bool operator!=(const ICEvent& other) const
+ {
+ return !(*this == other);
+ }
+
+ bool operator<(const ICEvent& other) const;
+ bool operator>(const ICEvent& other) const { return other < *this; }
+ bool operator<=(const ICEvent& other) const { return !(*this > other); }
+ bool operator>=(const ICEvent& other) const { return !(*this < other); }
+
+ explicit operator bool() const
+ {
+ return *this != ICEvent();
+ }
+
+ Kind kind() const { return m_kind; }
+ const ClassInfo* classInfo() const { return m_classInfo; }
+ const Identifier& propertyName() const { return m_propertyName; }
+
+ unsigned hash() const
+ {
+ return m_kind + WTF::PtrHash<const ClassInfo*>::hash(m_classInfo) + StringHash::hash(m_propertyName.string());
+ }
+
+ bool isHashTableDeletedValue() const
+ {
+ return *this == ICEvent(WTF::HashTableDeletedValue);
+ }
+
+ void dump(PrintStream&) const;
+
+ void log() const;
+
+private:
+
+ Kind m_kind { InvalidKind };
+ const ClassInfo* m_classInfo { nullptr };
+ Identifier m_propertyName;
+};
+
+struct ICEventHash {
+ static unsigned hash(const ICEvent& key) { return key.hash(); }
+ static bool equal(const ICEvent& a, const ICEvent& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+void printInternal(PrintStream&, JSC::ICEvent::Kind);
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::ICEvent> {
+ typedef JSC::ICEventHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::ICEvent> : SimpleClassHashTraits<JSC::ICEvent> {
+ static const bool emptyValueIsZero = false;
+};
+
+} // namespace WTF
+
+namespace JSC {
+
+class ICStats {
+ WTF_MAKE_NONCOPYABLE(ICStats);
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ ICStats();
+ ~ICStats();
+
+ void add(const ICEvent& event);
+
+ static ICStats& instance();
+
+private:
+
+ Spectrum<ICEvent, uint64_t> m_spectrum;
+ ThreadIdentifier m_thread;
+ Lock m_lock;
+ Condition m_condition;
+ bool m_shouldStop { false };
+
+ static Atomic<ICStats*> s_instance;
+};
+
+#define LOG_IC(arguments) do { \
+ if (Options::useICStats()) \
+ (ICEvent arguments).log(); \
+ } while (false)
+
+} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp
new file mode 100644
index 000000000..4fb7a5909
--- /dev/null
+++ b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "CallFrame.h"
+#include "CodeBlock.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JSArrayBufferView.h"
+#include "JSCJSValueInlines.h"
+#include "JSCellInlines.h"
+#include "PolymorphicAccess.h"
+#include "StructureStubInfo.h"
+
+namespace JSC {
+
+typedef CCallHelpers::TrustedImm32 TrustedImm32;
+typedef CCallHelpers::Imm32 Imm32;
+typedef CCallHelpers::TrustedImmPtr TrustedImmPtr;
+typedef CCallHelpers::ImmPtr ImmPtr;
+typedef CCallHelpers::TrustedImm64 TrustedImm64;
+typedef CCallHelpers::Imm64 Imm64;
+
+bool IntrinsicGetterAccessCase::canEmitIntrinsicGetter(JSFunction* getter, Structure* structure)
+{
+
+ switch (getter->intrinsic()) {
+ case TypedArrayByteOffsetIntrinsic:
+ case TypedArrayByteLengthIntrinsic:
+ case TypedArrayLengthIntrinsic: {
+ TypedArrayType type = structure->classInfo()->typedArrayStorageType;
+
+ if (!isTypedView(type))
+ return false;
+
+ return true;
+ }
+ default:
+ return false;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+void IntrinsicGetterAccessCase::emitIntrinsicGetter(AccessGenerationState& state)
+{
+ CCallHelpers& jit = *state.jit;
+ JSValueRegs valueRegs = state.valueRegs;
+ GPRReg baseGPR = state.baseGPR;
+ GPRReg valueGPR = valueRegs.payloadGPR();
+
+ switch (intrinsic()) {
+ case TypedArrayLengthIntrinsic: {
+ jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
+ jit.boxInt32(valueGPR, valueRegs);
+ state.succeed();
+ return;
+ }
+
+ case TypedArrayByteLengthIntrinsic: {
+ TypedArrayType type = structure()->classInfo()->typedArrayStorageType;
+
+ jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
+
+ if (elementSize(type) > 1) {
+ // We can use a bitshift here since we TypedArrays cannot have byteLength that overflows an int32.
+ jit.lshift32(valueGPR, Imm32(logElementSize(type)), valueGPR);
+ }
+
+ jit.boxInt32(valueGPR, valueRegs);
+ state.succeed();
+ return;
+ }
+
+ case TypedArrayByteOffsetIntrinsic: {
+ GPRReg scratchGPR = state.scratchGPR;
+
+ CCallHelpers::Jump emptyByteOffset = jit.branch32(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
+ TrustedImm32(WastefulTypedArray));
+
+ jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), valueGPR);
+ jit.loadPtr(MacroAssembler::Address(scratchGPR, Butterfly::offsetOfArrayBuffer()), scratchGPR);
+ jit.loadPtr(MacroAssembler::Address(scratchGPR, ArrayBuffer::offsetOfData()), scratchGPR);
+ jit.subPtr(scratchGPR, valueGPR);
+
+ CCallHelpers::Jump done = jit.jump();
+
+ emptyByteOffset.link(&jit);
+ jit.move(TrustedImmPtr(0), valueGPR);
+
+ done.link(&jit);
+
+ jit.boxInt32(valueGPR, valueRegs);
+ state.succeed();
+ return;
+ }
+
+ default:
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index c3508b01d..e74219b62 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,63 +26,75 @@
#include "config.h"
#if ENABLE(JIT)
-#include "JIT.h"
-// This probably does not belong here; adding here for now as a quick Windows build fix.
-#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
-#include "MacroAssembler.h"
-JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
-#endif
+#include "JIT.h"
+#include "BytecodeGraph.h"
#include "CodeBlock.h"
+#include "CodeBlockWithJITType.h"
#include "DFGCapabilities.h"
#include "Interpreter.h"
#include "JITInlines.h"
#include "JITOperations.h"
#include "JSArray.h"
+#include "JSCInlines.h"
#include "JSFunction.h"
#include "LinkBuffer.h"
-#include "Operations.h"
-#include "RepatchBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "PCToCodeOriginMap.h"
+#include "ProfilerDatabase.h"
+#include "ProgramCodeBlock.h"
#include "ResultType.h"
-#include "SamplingTool.h"
#include "SlowPathCall.h"
+#include "StackAlignment.h"
+#include "TypeProfilerLog.h"
#include <wtf/CryptographicallyRandomNumber.h>
+#include <wtf/GraphNodeWorklist.h>
+#include <wtf/SimpleStats.h>
using namespace std;
namespace JSC {
-void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
-}
+double totalBaselineCompileTime;
+double totalDFGCompileTime;
+double totalFTLCompileTime;
+double totalFTLDFGCompileTime;
+double totalFTLB3CompileTime;
-void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
+ MacroAssembler::repatchCall(
+ CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
+ newCalleeFunction);
}
-void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
+JIT::CodeRef JIT::compileCTINativeCall(VM* vm, NativeFunction func)
{
- RepatchBuffer repatchBuffer(codeblock);
- repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
+ if (!vm->canUseJIT())
+ return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
+ JIT jit(vm, 0);
+ return jit.privateCompileCTINativeCall(vm, func);
}
-JIT::JIT(VM* vm, CodeBlock* codeBlock)
+JIT::JIT(VM* vm, CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
: JSInterfaceJIT(vm, codeBlock)
, m_interpreter(vm->interpreter)
, m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
- , m_bytecodeOffset((unsigned)-1)
+ , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
, m_getByIdIndex(UINT_MAX)
, m_putByIdIndex(UINT_MAX)
, m_byValInstructionIndex(UINT_MAX)
, m_callLinkInfoIndex(UINT_MAX)
, m_randomGenerator(cryptographicallyRandomNumber())
+ , m_pcToCodeOriginMapBuilder(*vm)
, m_canBeOptimized(false)
, m_shouldEmitProfiling(false)
+ , m_loopOSREntryBytecodeOffset(loopOSREntryBytecodeOffset)
+{
+}
+
+JIT::~JIT()
{
}
@@ -96,27 +108,61 @@ void JIT::emitEnterOptimizationCheck()
skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
ASSERT(!m_bytecodeOffset);
+
+ copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer();
+
callOperation(operationOptimize, m_bytecodeOffset);
skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
+ move(returnValueGPR2, stackPointerRegister);
jump(returnValueGPR);
skipOptimize.link(this);
}
#endif
+void JIT::emitNotifyWrite(WatchpointSet* set)
+{
+ if (!set || set->state() == IsInvalidated) {
+ addSlowCase(Jump());
+ return;
+ }
+
+ addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
+}
+
+void JIT::emitNotifyWrite(GPRReg pointerToSet)
+{
+ addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
+}
+
+void JIT::assertStackPointerOffset()
+{
+ if (ASSERT_DISABLED)
+ return;
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
+ Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
+ breakpoint();
+ ok.link(this);
+}
+
#define NEXT_OPCODE(name) \
m_bytecodeOffset += OPCODE_LENGTH(name); \
break;
#define DEFINE_SLOW_OP(name) \
case op_##name: { \
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
- slowPathCall.call(); \
+ if (m_bytecodeOffset >= startBytecodeOffset) { \
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_##name); \
+ slowPathCall.call(); \
+ } \
NEXT_OPCODE(op_##name); \
}
#define DEFINE_OP(name) \
case name: { \
- emit_##name(currentInstruction); \
+ if (m_bytecodeOffset >= startBytecodeOffset) { \
+ emit_##name(currentInstruction); \
+ } \
NEXT_OPCODE(name); \
}
@@ -128,17 +174,60 @@ void JIT::emitEnterOptimizationCheck()
void JIT::privateCompileMainPass()
{
+ if (false)
+ dataLog("Compiling ", *m_codeBlock, "\n");
+
+ jitAssertTagsInPlace();
+ jitAssertArgumentCountSane();
+
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
- unsigned instructionCount = m_codeBlock->instructions().size();
+ unsigned instructionCount = m_instructions.size();
m_callLinkInfoIndex = 0;
+ unsigned startBytecodeOffset = 0;
+ if (m_loopOSREntryBytecodeOffset && m_codeBlock->inherits(*m_codeBlock->vm(), ProgramCodeBlock::info())) {
+ // We can only do this optimization because we execute ProgramCodeBlock's exactly once.
+ // This optimization would be invalid otherwise. When the LLInt determines it wants to
+ // do OSR entry into the baseline JIT in a loop, it will pass in the bytecode offset it
+ // was executing at when it kicked off our compilation. We only need to compile code for
+ // anything reachable from that bytecode offset.
+
+ // We only bother building the bytecode graph if it could save time and executable
+ // memory. We pick an arbitrary offset where we deem this is profitable.
+ if (m_loopOSREntryBytecodeOffset >= 200) {
+ // As a simplification, we don't find all bytecode ranges that are unreachable.
+ // Instead, we just find the minimum bytecode offset that is reachable, and
+ // compile code from that bytecode offset onwards.
+
+ BytecodeGraph<CodeBlock> graph(m_codeBlock, m_instructions);
+ BytecodeBasicBlock* block = graph.findBasicBlockForBytecodeOffset(m_loopOSREntryBytecodeOffset);
+ RELEASE_ASSERT(block);
+
+ GraphNodeWorklist<BytecodeBasicBlock*> worklist;
+ startBytecodeOffset = UINT_MAX;
+ worklist.push(block);
+ while (BytecodeBasicBlock* block = worklist.pop()) {
+ startBytecodeOffset = std::min(startBytecodeOffset, block->leaderOffset());
+ worklist.pushAll(block->successors());
+ }
+ }
+ }
+
for (m_bytecodeOffset = 0; m_bytecodeOffset < instructionCount; ) {
+ if (m_bytecodeOffset == startBytecodeOffset && startBytecodeOffset > 0) {
+ // We've proven all bytecode instructions up until here are unreachable.
+ // Let's ensure that by crashing if it's ever hit.
+ breakpoint();
+ }
+
if (m_disassembler)
m_disassembler->setForBytecodeMainPath(m_bytecodeOffset, label());
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
+
#if ENABLE(OPCODE_SAMPLING)
if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
sampleInstruction(currentInstruction);
@@ -158,55 +247,72 @@ void JIT::privateCompileMainPass()
AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
}
+
+ if (Options::eagerlyUpdateTopCallFrame())
+ updateTopCallFrame();
+
+ unsigned bytecodeOffset = m_bytecodeOffset;
switch (opcodeID) {
- DEFINE_SLOW_OP(del_by_val)
DEFINE_SLOW_OP(in)
DEFINE_SLOW_OP(less)
DEFINE_SLOW_OP(lesseq)
DEFINE_SLOW_OP(greater)
DEFINE_SLOW_OP(greatereq)
DEFINE_SLOW_OP(is_function)
- DEFINE_SLOW_OP(is_object)
+ DEFINE_SLOW_OP(is_object_or_null)
DEFINE_SLOW_OP(typeof)
- DEFINE_OP(op_touch_entry)
DEFINE_OP(op_add)
DEFINE_OP(op_bitand)
DEFINE_OP(op_bitor)
DEFINE_OP(op_bitxor)
DEFINE_OP(op_call)
+ DEFINE_OP(op_tail_call)
DEFINE_OP(op_call_eval)
DEFINE_OP(op_call_varargs)
+ DEFINE_OP(op_tail_call_varargs)
+ DEFINE_OP(op_tail_call_forward_arguments)
+ DEFINE_OP(op_construct_varargs)
DEFINE_OP(op_catch)
DEFINE_OP(op_construct)
- DEFINE_OP(op_get_callee)
DEFINE_OP(op_create_this)
DEFINE_OP(op_to_this)
- DEFINE_OP(op_init_lazy_reg)
- DEFINE_OP(op_create_arguments)
+ DEFINE_OP(op_create_direct_arguments)
+ DEFINE_OP(op_create_scoped_arguments)
+ DEFINE_OP(op_create_cloned_arguments)
+ DEFINE_OP(op_get_argument)
+ DEFINE_OP(op_argument_count)
+ DEFINE_OP(op_create_rest)
+ DEFINE_OP(op_get_rest_length)
+ DEFINE_OP(op_check_tdz)
+ DEFINE_OP(op_assert)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
+ DEFINE_OP(op_del_by_val)
DEFINE_OP(op_div)
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
- DEFINE_OP(op_create_activation)
+ DEFINE_OP(op_get_scope)
DEFINE_OP(op_eq)
DEFINE_OP(op_eq_null)
- case op_get_by_id_out_of_line:
+ DEFINE_OP(op_try_get_by_id)
case op_get_array_length:
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset:
DEFINE_OP(op_get_by_id)
- DEFINE_OP(op_get_arguments_length)
+ DEFINE_OP(op_get_by_id_with_this)
DEFINE_OP(op_get_by_val)
- DEFINE_OP(op_get_argument_by_val)
- DEFINE_OP(op_get_by_pname)
- DEFINE_OP(op_get_pnames)
- DEFINE_OP(op_check_has_instance)
+ DEFINE_OP(op_get_by_val_with_this)
+ DEFINE_OP(op_overrides_has_instance)
DEFINE_OP(op_instanceof)
+ DEFINE_OP(op_instanceof_custom)
+ DEFINE_OP(op_is_empty)
DEFINE_OP(op_is_undefined)
DEFINE_OP(op_is_boolean)
DEFINE_OP(op_is_number)
- DEFINE_OP(op_is_string)
+ DEFINE_OP(op_is_object)
+ DEFINE_OP(op_is_cell_with_type)
DEFINE_OP(op_jeq_null)
DEFINE_OP(op_jfalse)
DEFINE_OP(op_jmp)
@@ -222,9 +328,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jngreatereq)
DEFINE_OP(op_jtrue)
DEFINE_OP(op_loop_hint)
+ DEFINE_OP(op_watchdog)
DEFINE_OP(op_lshift)
DEFINE_OP(op_mod)
- DEFINE_OP(op_captured_mov)
DEFINE_OP(op_mov)
DEFINE_OP(op_mul)
DEFINE_OP(op_negate)
@@ -233,80 +339,88 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_new_array)
DEFINE_OP(op_new_array_with_size)
DEFINE_OP(op_new_array_buffer)
+ DEFINE_OP(op_new_array_with_spread)
+ DEFINE_OP(op_spread)
DEFINE_OP(op_new_func)
- DEFINE_OP(op_new_captured_func)
DEFINE_OP(op_new_func_exp)
+ DEFINE_OP(op_new_generator_func)
+ DEFINE_OP(op_new_generator_func_exp)
+ DEFINE_OP(op_new_async_func)
+ DEFINE_OP(op_new_async_func_exp)
DEFINE_OP(op_new_object)
DEFINE_OP(op_new_regexp)
- DEFINE_OP(op_next_pname)
DEFINE_OP(op_not)
DEFINE_OP(op_nstricteq)
- DEFINE_OP(op_pop_scope)
DEFINE_OP(op_dec)
DEFINE_OP(op_inc)
- DEFINE_OP(op_profile_did_call)
- DEFINE_OP(op_profile_will_call)
- DEFINE_OP(op_push_name_scope)
+ DEFINE_OP(op_pow)
+ DEFINE_OP(op_profile_type)
+ DEFINE_OP(op_profile_control_flow)
DEFINE_OP(op_push_with_scope)
- case op_put_by_id_out_of_line:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line:
+ DEFINE_OP(op_create_lexical_environment)
+ DEFINE_OP(op_get_parent_scope)
DEFINE_OP(op_put_by_id)
+ DEFINE_OP(op_put_by_id_with_this)
DEFINE_OP(op_put_by_index)
case op_put_by_val_direct:
DEFINE_OP(op_put_by_val)
- DEFINE_OP(op_put_getter_setter)
- case op_init_global_const_nop:
- NEXT_OPCODE(op_init_global_const_nop);
- DEFINE_OP(op_init_global_const)
+ DEFINE_OP(op_put_by_val_with_this)
+ DEFINE_OP(op_put_getter_by_id)
+ DEFINE_OP(op_put_setter_by_id)
+ DEFINE_OP(op_put_getter_setter_by_id)
+ DEFINE_OP(op_put_getter_by_val)
+ DEFINE_OP(op_put_setter_by_val)
+ DEFINE_OP(op_define_data_property)
+ DEFINE_OP(op_define_accessor_property)
DEFINE_OP(op_ret)
- DEFINE_OP(op_ret_object_or_this)
DEFINE_OP(op_rshift)
DEFINE_OP(op_unsigned)
DEFINE_OP(op_urshift)
+ DEFINE_OP(op_set_function_name)
DEFINE_OP(op_strcat)
DEFINE_OP(op_stricteq)
DEFINE_OP(op_sub)
DEFINE_OP(op_switch_char)
DEFINE_OP(op_switch_imm)
DEFINE_OP(op_switch_string)
- DEFINE_OP(op_tear_off_activation)
- DEFINE_OP(op_tear_off_arguments)
DEFINE_OP(op_throw)
DEFINE_OP(op_throw_static_error)
DEFINE_OP(op_to_number)
+ DEFINE_OP(op_to_string)
DEFINE_OP(op_to_primitive)
DEFINE_OP(op_resolve_scope)
DEFINE_OP(op_get_from_scope)
DEFINE_OP(op_put_to_scope)
-
- case op_get_by_id_chain:
- case op_get_by_id_generic:
- case op_get_by_id_proto:
- case op_get_by_id_self:
- case op_get_by_id_getter_chain:
- case op_get_by_id_getter_proto:
- case op_get_by_id_getter_self:
- case op_get_by_id_custom_chain:
- case op_get_by_id_custom_proto:
- case op_get_by_id_custom_self:
- case op_get_string_length:
- case op_put_by_id_generic:
- case op_put_by_id_replace:
- case op_put_by_id_transition:
+ DEFINE_OP(op_get_from_arguments)
+ DEFINE_OP(op_put_to_arguments)
+
+ DEFINE_OP(op_get_enumerable_length)
+ DEFINE_OP(op_has_generic_property)
+ DEFINE_OP(op_has_structure_property)
+ DEFINE_OP(op_has_indexed_property)
+ DEFINE_OP(op_get_direct_pname)
+ DEFINE_OP(op_get_property_enumerator)
+ DEFINE_OP(op_enumerator_structure_pname)
+ DEFINE_OP(op_enumerator_generic_pname)
+ DEFINE_OP(op_to_index_string)
+
+ DEFINE_OP(op_log_shadow_chicken_prologue)
+ DEFINE_OP(op_log_shadow_chicken_tail)
+ default:
RELEASE_ASSERT_NOT_REACHED();
}
+
+ if (false)
+ dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
}
- RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = (unsigned)-1;
+ m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
}
@@ -338,6 +452,8 @@ void JIT::privateCompileSlowCases()
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
m_bytecodeOffset = iter->to;
+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
+
unsigned firstTo = m_bytecodeOffset;
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
@@ -359,25 +475,26 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_bitor)
DEFINE_SLOWCASE_OP(op_bitxor)
DEFINE_SLOWCASE_OP(op_call)
+ DEFINE_SLOWCASE_OP(op_tail_call)
DEFINE_SLOWCASE_OP(op_call_eval)
DEFINE_SLOWCASE_OP(op_call_varargs)
+ DEFINE_SLOWCASE_OP(op_tail_call_varargs)
+ DEFINE_SLOWCASE_OP(op_tail_call_forward_arguments)
+ DEFINE_SLOWCASE_OP(op_construct_varargs)
DEFINE_SLOWCASE_OP(op_construct)
DEFINE_SLOWCASE_OP(op_to_this)
+ DEFINE_SLOWCASE_OP(op_check_tdz)
DEFINE_SLOWCASE_OP(op_create_this)
- DEFINE_SLOWCASE_OP(op_captured_mov)
DEFINE_SLOWCASE_OP(op_div)
DEFINE_SLOWCASE_OP(op_eq)
- DEFINE_SLOWCASE_OP(op_get_callee)
- case op_get_by_id_out_of_line:
+ DEFINE_SLOWCASE_OP(op_try_get_by_id)
case op_get_array_length:
+ case op_get_by_id_proto_load:
+ case op_get_by_id_unset:
DEFINE_SLOWCASE_OP(op_get_by_id)
- DEFINE_SLOWCASE_OP(op_get_arguments_length)
DEFINE_SLOWCASE_OP(op_get_by_val)
- DEFINE_SLOWCASE_OP(op_get_argument_by_val)
- DEFINE_SLOWCASE_OP(op_get_by_pname)
- DEFINE_SLOWCASE_OP(op_check_has_instance)
DEFINE_SLOWCASE_OP(op_instanceof)
- DEFINE_SLOWCASE_OP(op_jfalse)
+ DEFINE_SLOWCASE_OP(op_instanceof_custom)
DEFINE_SLOWCASE_OP(op_jless)
DEFINE_SLOWCASE_OP(op_jlesseq)
DEFINE_SLOWCASE_OP(op_jgreater)
@@ -386,8 +503,8 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_jnlesseq)
DEFINE_SLOWCASE_OP(op_jngreater)
DEFINE_SLOWCASE_OP(op_jngreatereq)
- DEFINE_SLOWCASE_OP(op_jtrue)
DEFINE_SLOWCASE_OP(op_loop_hint)
+ DEFINE_SLOWCASE_OP(op_watchdog)
DEFINE_SLOWCASE_OP(op_lshift)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
@@ -398,11 +515,6 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_nstricteq)
DEFINE_SLOWCASE_OP(op_dec)
DEFINE_SLOWCASE_OP(op_inc)
- case op_put_by_id_out_of_line:
- case op_put_by_id_transition_direct:
- case op_put_by_id_transition_normal:
- case op_put_by_id_transition_direct_out_of_line:
- case op_put_by_id_transition_normal_out_of_line:
DEFINE_SLOWCASE_OP(op_put_by_id)
case op_put_by_val_direct:
DEFINE_SLOWCASE_OP(op_put_by_val)
@@ -412,7 +524,11 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
DEFINE_SLOWCASE_OP(op_to_number)
+ DEFINE_SLOWCASE_OP(op_to_string)
DEFINE_SLOWCASE_OP(op_to_primitive)
+ DEFINE_SLOWCASE_OP(op_has_indexed_property)
+ DEFINE_SLOWCASE_OP(op_has_structure_property)
+ DEFINE_SLOWCASE_OP(op_get_direct_pname)
DEFINE_SLOWCASE_OP(op_resolve_scope)
DEFINE_SLOWCASE_OP(op_get_from_scope)
@@ -422,6 +538,9 @@ void JIT::privateCompileSlowCases()
RELEASE_ASSERT_NOT_REACHED();
}
+ if (false)
+ dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
+
RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
@@ -433,17 +552,26 @@ void JIT::privateCompileSlowCases()
RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
- RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = (unsigned)-1;
+ m_bytecodeOffset = std::numeric_limits<unsigned>::max();
#endif
}
-CompilationResult JIT::privateCompile(JITCompilationEffort effort)
+void JIT::compileWithoutLinking(JITCompilationEffort effort)
{
+ double before = 0;
+ if (UNLIKELY(computeCompileTimes()))
+ before = monotonicallyIncreasingTimeMS();
+
+ {
+ ConcurrentJSLocker locker(m_codeBlock->m_lock);
+ m_instructions = m_codeBlock->instructions().clone();
+ }
+
DFG::CapabilityLevel level = m_codeBlock->capabilityLevel();
switch (level) {
case DFG::CannotCompile:
@@ -451,11 +579,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
m_canBeOptimizedOrInlined = false;
m_shouldEmitProfiling = false;
break;
- case DFG::CanInline:
- m_canBeOptimized = false;
- m_canBeOptimizedOrInlined = true;
- m_shouldEmitProfiling = true;
- break;
case DFG::CanCompile:
case DFG::CanCompileAndInline:
m_canBeOptimized = true;
@@ -469,6 +592,7 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
switch (m_codeBlock->codeType()) {
case GlobalCode:
+ case ModuleCode:
case EvalCode:
m_codeBlock->m_shouldAlwaysBeInlined = false;
break;
@@ -478,9 +602,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
break;
}
-
- if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
- m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
+
+ if (Options::dumpDisassembly() || (m_vm->m_perBytecodeProfiler && Options::disassembleBaselineForProfiler()))
+ m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
if (m_vm->m_perBytecodeProfiler) {
m_compilation = adoptRef(
new Profiler::Compilation(
@@ -489,6 +613,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
}
+ m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
+
if (m_disassembler)
m_disassembler->setStartOfCode(label());
@@ -496,9 +622,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
if (m_randomGenerator.getUint32() & 1)
nop();
- preserveReturnAddressAfterCall(regT2);
- emitPutReturnPCToCallFrameHeader(regT2);
- emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
Label beginLabel(this);
@@ -507,9 +632,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
sampleInstruction(m_codeBlock->instructions().begin());
#endif
- Jump stackCheck;
if (m_codeBlock->codeType() == FunctionCode) {
- ASSERT(m_bytecodeOffset == (unsigned)-1);
+ ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
if (shouldEmitProfiling()) {
for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
// If this is a constructor, then we want to put in a dummy profiling site (to
@@ -526,49 +650,55 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
}
}
-
- addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1);
- stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1);
}
- Label functionBody = label();
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), regT1);
+
+ move(regT1, stackPointerRegister);
+ checkStackPointerAlignment();
+
+ emitSaveCalleeSaves();
+ emitMaterializeTagCheckRegisters();
+ RELEASE_ASSERT(!JITCode::isJIT(m_codeBlock->jitType()));
+
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
if (m_disassembler)
m_disassembler->setEndOfSlowPath(label());
+ m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
- Label arityCheck;
- if (m_codeBlock->codeType() == FunctionCode) {
- stackCheck.link(this);
- m_bytecodeOffset = 0;
- callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock);
-#ifndef NDEBUG
- m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
-#endif
- jump(functionBody);
+ stackOverflow.link(this);
+ m_bytecodeOffset = 0;
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+ callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
- arityCheck = label();
+ if (m_codeBlock->codeType() == FunctionCode) {
+ m_arityCheck = label();
store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
- preserveReturnAddressAfterCall(regT2);
- emitPutReturnPCToCallFrameHeader(regT2);
- emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
- load32(payloadFor(JSStack::ArgumentCount), regT1);
+ load32(payloadFor(CallFrameSlot::argumentCount), regT1);
branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
m_bytecodeOffset = 0;
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
- if (returnValueGPR != regT0)
- move(returnValueGPR, regT0);
- branchTest32(Zero, regT0).linkTo(beginLabel, this);
- emitNakedCall(m_vm->getCTIStub(arityFixup).code());
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
+ branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
+ move(returnValueGPR, GPRInfo::argumentGPR0);
+ emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
#if !ASSERT_DISABLED
- m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
#endif
jump(beginLabel);
@@ -580,14 +710,33 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
if (m_disassembler)
m_disassembler->setEndOfCode(label());
+ m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
+
+ m_linkBuffer = std::unique_ptr<LinkBuffer>(new LinkBuffer(*m_vm, *this, m_codeBlock, effort));
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
+ double after = 0;
+ if (UNLIKELY(computeCompileTimes())) {
+ after = monotonicallyIncreasingTimeMS();
+
+ if (Options::reportTotalCompileTimes())
+ totalBaselineCompileTime += after - before;
+ }
+ if (UNLIKELY(reportCompileTimes())) {
+ CString codeBlockName = toCString(*m_codeBlock);
+
+ dataLog("Optimized ", codeBlockName, " with Baseline JIT into ", m_linkBuffer->size(), " bytes in ", after - before, " ms.\n");
+ }
+}
+
+CompilationResult JIT::link()
+{
+ LinkBuffer& patchBuffer = *m_linkBuffer;
+
if (patchBuffer.didFailToAllocate())
return CompilationFailed;
// Translate vPC offsets into addresses in JIT generated code, for switch tables.
- for (unsigned i = 0; i < m_switches.size(); ++i) {
- SwitchRecord record = m_switches[i];
+ for (auto& record : m_switches) {
unsigned bytecodeOffset = record.bytecodeOffset;
if (record.type != SwitchRecord::String) {
@@ -605,10 +754,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
record.jumpTable.stringJumpTable->ctiDefault = patchBuffer.locationOf(m_labels[bytecodeOffset + record.defaultOffset]);
- StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end();
- for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) {
- unsigned offset = it->value.branchOffset;
- it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
+ for (auto& location : record.jumpTable.stringJumpTable->offsetTable.values()) {
+ unsigned offset = location.branchOffset;
+ location.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault;
}
}
}
@@ -618,9 +766,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
handler.nativeCode = patchBuffer.locationOf(m_labels[handler.target]);
}
- for (Vector<CallRecord>::iterator iter = m_calls.begin(); iter != m_calls.end(); ++iter) {
- if (iter->to)
- patchBuffer.link(iter->from, FunctionPtr(iter->to));
+ for (auto& record : m_calls) {
+ if (record.to)
+ patchBuffer.link(record.from, FunctionPtr(record.to));
}
for (unsigned i = m_getByIds.size(); i--;)
@@ -628,68 +776,77 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
for (unsigned i = m_putByIds.size(); i--;)
m_putByIds[i].finalize(patchBuffer);
- m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
- for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
- CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
- CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
- CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
- CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
-
- m_codeBlock->byValInfo(i) = ByValInfo(
- m_byValCompilationInfo[i].bytecodeIndex,
- badTypeJump,
- m_byValCompilationInfo[i].arrayMode,
- differenceBetweenCodePtr(badTypeJump, doneTarget),
- differenceBetweenCodePtr(returnAddress, slowPathTarget));
+ if (m_byValCompilationInfo.size()) {
+ CodeLocationLabel exceptionHandler = patchBuffer.locationOf(m_exceptionHandler);
+
+ for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
+ PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
+ CodeLocationJump notIndexJump = CodeLocationJump();
+ if (Jump(patchableNotIndexJump).isSet())
+ notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
+ CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
+ CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
+ CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
+ CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
+ CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
+
+ *byValCompilationInfo.byValInfo = ByValInfo(
+ byValCompilationInfo.bytecodeIndex,
+ notIndexJump,
+ badTypeJump,
+ exceptionHandler,
+ byValCompilationInfo.arrayMode,
+ byValCompilationInfo.arrayProfile,
+ differenceBetweenCodePtr(badTypeJump, doneTarget),
+ differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
+ differenceBetweenCodePtr(returnAddress, slowPathTarget));
+ }
}
- m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
- for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
- CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.callType = m_callStructureStubCompilationInfo[i].callType;
- info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
- info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
- info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
- info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
- info.calleeGPR = regT0;
+
+ for (auto& compilationInfo : m_callCompilationInfo) {
+ CallLinkInfo& info = *compilationInfo.callLinkInfo;
+ info.setCallLocations(
+ CodeLocationLabel(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation)),
+ CodeLocationLabel(patchBuffer.locationOf(compilationInfo.hotPathBegin)),
+ patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
}
-#if ENABLE(DFG_JIT) || ENABLE(LLINT)
- if (canBeOptimizedOrInlined()
-#if ENABLE(LLINT)
- || true
-#endif
- ) {
- CompactJITCodeMap::Encoder jitCodeMapEncoder;
- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
- if (m_labels[bytecodeOffset].isSet())
- jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
- }
- m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
+ CompactJITCodeMap::Encoder jitCodeMapEncoder;
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
+ if (m_labels[bytecodeOffset].isSet())
+ jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
}
-#endif
+ m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
MacroAssemblerCodePtr withArityCheck;
if (m_codeBlock->codeType() == FunctionCode)
- withArityCheck = patchBuffer.locationOf(arityCheck);
+ withArityCheck = patchBuffer.locationOf(m_arityCheck);
- if (Options::showDisassembly())
+ if (Options::dumpDisassembly()) {
m_disassembler->dump(patchBuffer);
+ patchBuffer.didAlreadyDisassemble();
+ }
if (m_compilation) {
- m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
- m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
+ if (Options::disassembleBaselineForProfiler())
+ m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
+ m_vm->m_perBytecodeProfiler->addCompilation(m_codeBlock, *m_compilation);
}
+
+ if (m_pcToCodeOriginMapBuilder.didBuildMapping())
+ m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
- CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
+ CodeRef result = FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
- m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
+ m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT->add(
static_cast<double>(result.size()) /
- static_cast<double>(m_codeBlock->instructions().size()));
-
+ static_cast<double>(m_instructions.size()));
+
m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
m_codeBlock->setJITCode(
- adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)),
- withArityCheck);
-
+ adoptRef(*new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
+
#if ENABLE(JIT_VERBOSE)
dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
#endif
@@ -697,72 +854,100 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
return CompilationSuccessful;
}
-void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
+CompilationResult JIT::privateCompile(JITCompilationEffort effort)
{
- RepatchBuffer repatchBuffer(callerCodeBlock);
-
- ASSERT(!callLinkInfo->isLinked());
- callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
- callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
- repatchBuffer.relink(callLinkInfo->hotPathOther, code);
-
- if (calleeCodeBlock)
- calleeCodeBlock->linkIncomingCall(exec, callLinkInfo);
-
- // Patch the slow patch so we do not continue to try to link.
- if (kind == CodeForCall) {
- ASSERT(callLinkInfo->callType == CallLinkInfo::Call
- || callLinkInfo->callType == CallLinkInfo::CallVarargs);
- if (callLinkInfo->callType == CallLinkInfo::Call) {
- repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
- return;
- }
-
- repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
- return;
- }
-
- ASSERT(kind == CodeForConstruct);
- repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
+ doMainThreadPreparationBeforeCompile();
+ compileWithoutLinking(effort);
+ return link();
}
-void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
+void JIT::privateCompileExceptionHandlers()
{
- RepatchBuffer repatchBuffer(callerCodeBlock);
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
- repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code());
-}
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
-void JIT::privateCompileExceptionHandlers()
-{
- if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
- return;
+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
- Jump doLookup;
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- if (!m_exceptionChecksWithCallFrameRollback.empty()) {
- m_exceptionChecksWithCallFrameRollback.link(this);
- emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0);
- doLookup = jump();
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
+ jumpToExceptionHandler();
}
- if (!m_exceptionChecks.empty())
+ if (!m_exceptionChecks.empty() || m_byValCompilationInfo.size()) {
+ m_exceptionHandler = label();
m_exceptionChecks.link(this);
-
- // lookupExceptionHandler is passed one argument, the exec (the CallFrame*).
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- if (doLookup.isSet())
- doLookup.link(this);
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
#endif
- m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
- jumpToExceptionHandler();
+ m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
+ jumpToExceptionHandler();
+ }
+}
+
+void JIT::doMainThreadPreparationBeforeCompile()
+{
+ // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
+ if (m_vm->typeProfiler())
+ m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
+}
+
+unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
+{
+ ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
+
+ return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
+}
+
+int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
+{
+ return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
+}
+
+bool JIT::reportCompileTimes()
+{
+ return Options::reportCompileTimes() || Options::reportBaselineCompileTimes();
}
+bool JIT::computeCompileTimes()
+{
+ return reportCompileTimes() || Options::reportTotalCompileTimes();
+}
+
+HashMap<CString, double> JIT::compileTimeStats()
+{
+ HashMap<CString, double> result;
+ if (Options::reportTotalCompileTimes()) {
+ result.add("Total Compile Time", totalBaselineCompileTime + totalDFGCompileTime + totalFTLCompileTime);
+ result.add("Baseline Compile Time", totalBaselineCompileTime);
+#if ENABLE(DFG_JIT)
+ result.add("DFG Compile Time", totalDFGCompileTime);
+#if ENABLE(FTL_JIT)
+ result.add("FTL Compile Time", totalFTLCompileTime);
+ result.add("FTL (DFG) Compile Time", totalFTLDFGCompileTime);
+ result.add("FTL (B3) Compile Time", totalFTLB3CompileTime);
+#endif // ENABLE(FTL_JIT)
+#endif // ENABLE(DFG_JIT)
+ }
+ return result;
+}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 298075706..d8e74d45a 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,14 +23,13 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JIT_h
-#define JIT_h
+#pragma once
#if ENABLE(JIT)
// We've run into some problems where changing the size of the class JIT leads to
// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
-#if COMPILER(GCC)
+#if COMPILER(GCC_OR_CLANG)
#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
#else
#define JIT_CLASS_ALIGNMENT
@@ -40,38 +39,33 @@
#include "CodeBlock.h"
#include "CompactJITCodeMap.h"
-#include "Interpreter.h"
#include "JITDisassembler.h"
#include "JITInlineCacheGenerator.h"
+#include "JITMathIC.h"
#include "JSInterfaceJIT.h"
-#include "LegacyProfiler.h"
-#include "Opcode.h"
-#include "ResultType.h"
-#include "SamplingTool.h"
+#include "PCToCodeOriginMap.h"
#include "UnusedPointer.h"
namespace JSC {
+ enum OpcodeID : unsigned;
+
class ArrayAllocationProfile;
+ class CallLinkInfo;
class CodeBlock;
class FunctionExecutable;
class JIT;
- class JSPropertyNameIterator;
class Identifier;
class Interpreter;
- class JSScope;
- class JSStack;
class MarkedAllocator;
class Register;
class StructureChain;
+ class StructureStubInfo;
- struct CallLinkInfo;
struct Instruction;
struct OperandTypes;
- struct PolymorphicAccessStructureList;
struct SimpleJumpTable;
struct StringJumpTable;
- struct StructureStubInfo;
struct CallRecord {
MacroAssembler::Call from;
@@ -104,12 +98,10 @@ namespace JSC {
struct SlowCaseEntry {
MacroAssembler::Jump from;
unsigned to;
- unsigned hint;
- SlowCaseEntry(MacroAssembler::Jump f, unsigned t, unsigned h = 0)
+ SlowCaseEntry(MacroAssembler::Jump f, unsigned t)
: from(f)
, to(t)
- , hint(h)
{
}
};
@@ -151,34 +143,38 @@ namespace JSC {
struct ByValCompilationInfo {
ByValCompilationInfo() { }
- ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget)
- : bytecodeIndex(bytecodeIndex)
+ ByValCompilationInfo(ByValInfo* byValInfo, unsigned bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget)
+ : byValInfo(byValInfo)
+ , bytecodeIndex(bytecodeIndex)
+ , notIndexJump(notIndexJump)
, badTypeJump(badTypeJump)
, arrayMode(arrayMode)
+ , arrayProfile(arrayProfile)
, doneTarget(doneTarget)
+ , nextHotPathTarget(nextHotPathTarget)
{
}
-
+
+ ByValInfo* byValInfo;
unsigned bytecodeIndex;
+ MacroAssembler::PatchableJump notIndexJump;
MacroAssembler::PatchableJump badTypeJump;
JITArrayMode arrayMode;
+ ArrayProfile* arrayProfile;
MacroAssembler::Label doneTarget;
+ MacroAssembler::Label nextHotPathTarget;
MacroAssembler::Label slowPathTarget;
MacroAssembler::Call returnAddress;
};
- struct StructureStubCompilationInfo {
+ struct CallCompilationInfo {
MacroAssembler::DataLabelPtr hotPathBegin;
MacroAssembler::Call hotPathOther;
MacroAssembler::Call callReturnLocation;
- CallLinkInfo::CallType callType;
- unsigned bytecodeIndex;
+ CallLinkInfo* callLinkInfo;
};
- // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
- void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
- void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(ReturnAddressPtr, FunctionPtr newCalleeFunction);
class JIT : private JSInterfaceJIT {
friend class JITSlowPathCall;
@@ -195,23 +191,31 @@ namespace JSC {
static const int patchPutByIdDefaultOffset = 256;
public:
- static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort)
+ JIT(VM*, CodeBlock* = 0, unsigned loopOSREntryBytecodeOffset = 0);
+ ~JIT();
+
+ void compileWithoutLinking(JITCompilationEffort);
+ CompilationResult link();
+
+ void doMainThreadPreparationBeforeCompile();
+
+ static CompilationResult compile(VM* vm, CodeBlock* codeBlock, JITCompilationEffort effort, unsigned bytecodeOffset = 0)
{
- return JIT(vm, codeBlock).privateCompile(effort);
+ return JIT(vm, codeBlock, bytecodeOffset).privateCompile(effort);
}
- static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
+ static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
- JIT jit(vm, callerCodeBlock);
- jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex;
- jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr);
+ JIT jit(vm, codeBlock);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
}
- static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
{
JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
+ jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName);
}
static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -228,39 +232,39 @@ namespace JSC {
jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
}
- static CodeRef compileCTINativeCall(VM* vm, NativeFunction func)
+ static void compilePutByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
{
- if (!vm->canUseJIT()) {
-#if ENABLE(LLINT)
- return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
-#else
- return CodeRef();
-#endif
- }
- JIT jit(vm, 0);
- return jit.privateCompileCTINativeCall(vm, func);
+ JIT jit(vm, codeBlock);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompilePutByValWithCachedId(byValInfo, returnAddress, putKind, propertyName);
}
- static void linkFor(ExecState*, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind);
- static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*);
-
- static unsigned frameRegisterCountFor(CodeBlock* codeBlock)
+ static void compileHasIndexedProperty(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
- return codeBlock->m_numCalleeRegisters;
+ JIT jit(vm, codeBlock);
+ jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
+ jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode);
}
- private:
- JIT(VM*, CodeBlock* = 0);
+ static CodeRef compileCTINativeCall(VM*, NativeFunction);
+
+ static unsigned frameRegisterCountFor(CodeBlock*);
+ static int stackPointerOffsetFor(CodeBlock*);
+ JS_EXPORT_PRIVATE static HashMap<CString, double> compileTimeStats();
+
+ private:
void privateCompileMainPass();
void privateCompileLinkPass();
void privateCompileSlowCases();
CompilationResult privateCompile(JITCompilationEffort);
- void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr);
-
void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&);
void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
+ void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&);
+
+ void privateCompileHasIndexedProperty(ByValInfo*, ReturnAddressPtr, JITArrayMode);
Label privateCompileCTINativeCall(VM*, bool isConstruct = false);
CodeRef privateCompileCTINativeCall(VM*, NativeFunction);
@@ -274,6 +278,15 @@ namespace JSC {
return functionCall;
}
+#if OS(WINDOWS) && CPU(X86_64)
+ Call appendCallWithSlowPathReturnType(const FunctionPtr& function)
+ {
+ Call functionCall = callWithSlowPathReturnType();
+ m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value()));
+ return functionCall;
+ }
+#endif
+
void exceptionCheck(Jump jumpToHandler)
{
m_exceptionChecks.append(jumpToHandler);
@@ -292,52 +305,41 @@ namespace JSC {
void privateCompileExceptionHandlers();
void addSlowCase(Jump);
- void addSlowCase(JumpList);
+ void addSlowCase(const JumpList&);
void addSlowCase();
void addJump(Jump, int);
void emitJumpSlowToHot(Jump, int);
void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex);
void compileOpCallSlowCase(OpcodeID, Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex);
- void compileLoadVarargs(Instruction*);
+ void compileSetupVarargsFrame(OpcodeID, Instruction*, CallLinkInfo*);
void compileCallEval(Instruction*);
void compileCallEvalSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitPutCallResult(Instruction*);
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
- bool isOperandConstantImmediateDouble(int src);
+ bool isOperandConstantDouble(int src);
void emitLoadDouble(int index, FPRegisterID value);
void emitLoadInt32ToDouble(int index, FPRegisterID value);
- Jump emitJumpIfNotObject(RegisterID structureReg);
+ Jump emitJumpIfCellObject(RegisterID cellReg);
+ Jump emitJumpIfCellNotObject(RegisterID cellReg);
- Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch);
- void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch);
- void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*);
-
- enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterValue, ShouldFilterBaseAndValue };
+ enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue };
// value register in write barrier is used before any scratch registers
// so may safely be the same as either of the scratch registers.
- Jump checkMarkWord(RegisterID owner, RegisterID scratch1, RegisterID scratch2);
- Jump checkMarkWord(JSCell* owner);
void emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode);
void emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode);
-/*
- void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch1, RegisterID scratch2, WriteBarrierMode);
- void emitWriteBarrier(JSCell* owner, RegisterID value, WriteBarrierMode);
-*/
+ void emitWriteBarrier(JSCell* owner);
- template<typename StructureType> // StructureType can be RegisterID or ImmPtr.
- void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch);
-
// This assumes that the value to profile is in regT0 and that regT3 is available for
// scratch.
void emitValueProfilingSite(ValueProfile*);
void emitValueProfilingSite(unsigned bytecodeOffset);
void emitValueProfilingSite();
- void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
- void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
+ void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*);
+ void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex);
void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*);
@@ -347,14 +349,22 @@ namespace JSC {
// Property is int-checked and zero extended. Base is cell checked.
// Structure is already profiled. Returns the slow cases. Fall-through
// case contains result in regT0, and it is not yet profiled.
+ JumpList emitInt32Load(Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); }
+ JumpList emitDoubleLoad(Instruction*, PatchableJump& badType);
+ JumpList emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
+ JumpList emitArrayStorageLoad(Instruction*, PatchableJump& badType);
+ JumpList emitLoadForArrayMode(Instruction*, JITArrayMode, PatchableJump& badType);
+
JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); }
JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType);
JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType);
+ JumpList emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType);
JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType);
JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType);
- // Property is in regT0, base is in regT0. regT2 contains indecing type.
+ // Property is in regT1, base is in regT0. regT2 contains indecing type.
// The value to store is not yet loaded. Property is int-checked and
// zero-extended. Base is cell checked. Structure is already profiled.
// returns the slow cases.
@@ -374,11 +384,23 @@ namespace JSC {
JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType);
JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType);
JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType);
-
+
+ // Identifier check helper for GetByVal and PutByVal.
+ void emitByValIdentifierCheck(ByValInfo*, RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases);
+
+ JITGetByIdGenerator emitGetByValWithCachedId(ByValInfo*, Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases);
+ JITPutByIdGenerator emitPutByValWithCachedId(ByValInfo*, Instruction*, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases);
+
enum FinalObjectMode { MayBeFinal, KnownNotFinal };
+ void emitGetVirtualRegister(int src, JSValueRegs dst);
+ void emitPutVirtualRegister(int dst, JSValueRegs src);
+
+ int32_t getOperandConstantInt(int src);
+ double getOperandConstantDouble(int src);
+
#if USE(JSVALUE32_64)
- bool getOperandConstantImmediateInt(int op1, int op2, int& op, int32_t& constant);
+ bool getOperandConstantInt(int op1, int op2, int& op, int32_t& constant);
void emitLoadTag(int index, RegisterID tag);
void emitLoadPayload(int index, RegisterID payload);
@@ -399,14 +421,8 @@ namespace JSC {
void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag);
void compileGetByIdHotPath(const Identifier*);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal);
- void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset);
// Arithmetic opcode helpers
- void emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType);
- void emitSub32Constant(int dst, int op, int32_t constant, ResultType opType);
void emitBinaryDoubleOp(OpcodeID, int dst, int op1, int op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
#else // USE(JSVALUE32_64)
@@ -425,72 +441,80 @@ namespace JSC {
emitPutVirtualRegister(dst, payload);
}
- int32_t getConstantOperandImmediateInt(int src);
-
Jump emitJumpIfJSCell(RegisterID);
Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
void emitJumpSlowCaseIfJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
- Jump emitJumpIfImmediateInteger(RegisterID);
- Jump emitJumpIfNotImmediateInteger(RegisterID);
- Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
- void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
- void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
- void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
-
- void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+ Jump emitJumpIfInt(RegisterID);
+ Jump emitJumpIfNotInt(RegisterID);
+ Jump emitJumpIfNotInt(RegisterID, RegisterID, RegisterID scratch);
+ PatchableJump emitPatchableJumpIfNotInt(RegisterID);
+ void emitJumpSlowCaseIfNotInt(RegisterID);
+ void emitJumpSlowCaseIfNotNumber(RegisterID);
+ void emitJumpSlowCaseIfNotInt(RegisterID, RegisterID, RegisterID scratch);
- void emitTagAsBoolImmediate(RegisterID reg);
- void compileBinaryArithOp(OpcodeID, int dst, int src1, int src2, OperandTypes opi);
- void compileBinaryArithOpSlowCase(Instruction*, OpcodeID, Vector<SlowCaseEntry>::iterator&, int dst, int src1, int src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
+ void emitTagBool(RegisterID);
void compileGetByIdHotPath(int baseVReg, const Identifier*);
- void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset);
- void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset);
- void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal);
- void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset);
#endif // USE(JSVALUE32_64)
void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition);
void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&);
+
+ void assertStackPointerOffset();
- void emit_op_touch_entry(Instruction*);
void emit_op_add(Instruction*);
void emit_op_bitand(Instruction*);
void emit_op_bitor(Instruction*);
void emit_op_bitxor(Instruction*);
void emit_op_call(Instruction*);
+ void emit_op_tail_call(Instruction*);
void emit_op_call_eval(Instruction*);
void emit_op_call_varargs(Instruction*);
- void emit_op_captured_mov(Instruction*);
+ void emit_op_tail_call_varargs(Instruction*);
+ void emit_op_tail_call_forward_arguments(Instruction*);
+ void emit_op_construct_varargs(Instruction*);
void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
- void emit_op_get_callee(Instruction*);
void emit_op_create_this(Instruction*);
void emit_op_to_this(Instruction*);
- void emit_op_create_arguments(Instruction*);
+ void emit_op_create_direct_arguments(Instruction*);
+ void emit_op_create_scoped_arguments(Instruction*);
+ void emit_op_create_cloned_arguments(Instruction*);
+ void emit_op_get_argument(Instruction*);
+ void emit_op_argument_count(Instruction*);
+ void emit_op_create_rest(Instruction*);
+ void emit_op_get_rest_length(Instruction*);
+ void emit_op_check_tdz(Instruction*);
+ void emit_op_assert(Instruction*);
void emit_op_debug(Instruction*);
void emit_op_del_by_id(Instruction*);
+ void emit_op_del_by_val(Instruction*);
void emit_op_div(Instruction*);
void emit_op_end(Instruction*);
void emit_op_enter(Instruction*);
- void emit_op_create_activation(Instruction*);
+ void emit_op_get_scope(Instruction*);
void emit_op_eq(Instruction*);
void emit_op_eq_null(Instruction*);
+ void emit_op_try_get_by_id(Instruction*);
void emit_op_get_by_id(Instruction*);
+ void emit_op_get_by_id_with_this(Instruction*);
+ void emit_op_get_by_val_with_this(Instruction*);
void emit_op_get_arguments_length(Instruction*);
void emit_op_get_by_val(Instruction*);
void emit_op_get_argument_by_val(Instruction*);
- void emit_op_get_by_pname(Instruction*);
void emit_op_init_lazy_reg(Instruction*);
- void emit_op_check_has_instance(Instruction*);
+ void emit_op_overrides_has_instance(Instruction*);
void emit_op_instanceof(Instruction*);
+ void emit_op_instanceof_custom(Instruction*);
+ void emit_op_is_empty(Instruction*);
void emit_op_is_undefined(Instruction*);
void emit_op_is_boolean(Instruction*);
void emit_op_is_number(Instruction*);
- void emit_op_is_string(Instruction*);
+ void emit_op_is_object(Instruction*);
+ void emit_op_is_cell_with_type(Instruction*);
void emit_op_jeq_null(Instruction*);
void emit_op_jfalse(Instruction*);
void emit_op_jmp(Instruction*);
@@ -506,6 +530,7 @@ namespace JSC {
void emit_op_jngreatereq(Instruction*);
void emit_op_jtrue(Instruction*);
void emit_op_loop_hint(Instruction*);
+ void emit_op_watchdog(Instruction*);
void emit_op_lshift(Instruction*);
void emit_op_mod(Instruction*);
void emit_op_mov(Instruction*);
@@ -516,68 +541,93 @@ namespace JSC {
void emit_op_new_array(Instruction*);
void emit_op_new_array_with_size(Instruction*);
void emit_op_new_array_buffer(Instruction*);
+ void emit_op_new_array_with_spread(Instruction*);
+ void emit_op_spread(Instruction*);
void emit_op_new_func(Instruction*);
- void emit_op_new_captured_func(Instruction*);
void emit_op_new_func_exp(Instruction*);
+ void emit_op_new_generator_func(Instruction*);
+ void emit_op_new_generator_func_exp(Instruction*);
+ void emit_op_new_async_func(Instruction*);
+ void emit_op_new_async_func_exp(Instruction*);
void emit_op_new_object(Instruction*);
void emit_op_new_regexp(Instruction*);
- void emit_op_get_pnames(Instruction*);
- void emit_op_next_pname(Instruction*);
void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
- void emit_op_pop_scope(Instruction*);
void emit_op_dec(Instruction*);
void emit_op_inc(Instruction*);
- void emit_op_profile_did_call(Instruction*);
- void emit_op_profile_will_call(Instruction*);
- void emit_op_push_name_scope(Instruction*);
+ void emit_op_pow(Instruction*);
+ void emit_op_profile_type(Instruction*);
+ void emit_op_profile_control_flow(Instruction*);
void emit_op_push_with_scope(Instruction*);
+ void emit_op_create_lexical_environment(Instruction*);
+ void emit_op_get_parent_scope(Instruction*);
void emit_op_put_by_id(Instruction*);
+ void emit_op_put_by_id_with_this(Instruction*);
void emit_op_put_by_index(Instruction*);
void emit_op_put_by_val(Instruction*);
- void emit_op_put_getter_setter(Instruction*);
- void emit_op_init_global_const(Instruction*);
+ void emit_op_put_by_val_with_this(Instruction*);
+ void emit_op_put_getter_by_id(Instruction*);
+ void emit_op_put_setter_by_id(Instruction*);
+ void emit_op_put_getter_setter_by_id(Instruction*);
+ void emit_op_put_getter_by_val(Instruction*);
+ void emit_op_put_setter_by_val(Instruction*);
+ void emit_op_define_data_property(Instruction*);
+ void emit_op_define_accessor_property(Instruction*);
void emit_op_ret(Instruction*);
- void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
+ void emit_op_set_function_name(Instruction*);
void emit_op_strcat(Instruction*);
void emit_op_stricteq(Instruction*);
void emit_op_sub(Instruction*);
void emit_op_switch_char(Instruction*);
void emit_op_switch_imm(Instruction*);
void emit_op_switch_string(Instruction*);
- void emit_op_tear_off_activation(Instruction*);
void emit_op_tear_off_arguments(Instruction*);
void emit_op_throw(Instruction*);
void emit_op_throw_static_error(Instruction*);
void emit_op_to_number(Instruction*);
+ void emit_op_to_string(Instruction*);
void emit_op_to_primitive(Instruction*);
void emit_op_unexpected_load(Instruction*);
void emit_op_unsigned(Instruction*);
void emit_op_urshift(Instruction*);
+ void emit_op_get_enumerable_length(Instruction*);
+ void emit_op_has_generic_property(Instruction*);
+ void emit_op_has_structure_property(Instruction*);
+ void emit_op_has_indexed_property(Instruction*);
+ void emit_op_get_direct_pname(Instruction*);
+ void emit_op_get_property_enumerator(Instruction*);
+ void emit_op_enumerator_structure_pname(Instruction*);
+ void emit_op_enumerator_generic_pname(Instruction*);
+ void emit_op_to_index_string(Instruction*);
+ void emit_op_log_shadow_chicken_prologue(Instruction*);
+ void emit_op_log_shadow_chicken_tail(Instruction*);
void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_tail_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_captured_mov(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_tail_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_tail_call_forward_arguments(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_construct_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_create_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_check_tdz(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_callee(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_try_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_id(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_instanceof_custom(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jgreater(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -588,6 +638,7 @@ namespace JSC {
void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -604,13 +655,19 @@ namespace JSC {
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_number(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_to_string(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_unsigned(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_has_indexed_property(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_has_structure_property(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_direct_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emit_op_resolve_scope(Instruction*);
void emit_op_get_from_scope(Instruction*);
void emit_op_put_to_scope(Instruction*);
+ void emit_op_get_from_arguments(Instruction*);
+ void emit_op_put_to_arguments(Instruction*);
void emitSlow_op_resolve_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_from_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_to_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -618,33 +675,42 @@ namespace JSC {
void emitRightShift(Instruction*, bool isUnsigned);
void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
+ void emitNewFuncCommon(Instruction*);
+ void emitNewFuncExprCommon(Instruction*);
void emitVarInjectionCheck(bool needsVarInjectionChecks);
- void emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth);
+ void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth);
void emitLoadWithStructureCheck(int scope, Structure** structureSlot);
- void emitGetGlobalProperty(uintptr_t* operandSlot);
- void emitGetGlobalVar(uintptr_t operand);
- void emitGetClosureVar(int scope, uintptr_t operand);
- void emitPutGlobalProperty(uintptr_t* operandSlot, int value);
#if USE(JSVALUE64)
- void emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet*);
+ void emitGetVarFromPointer(JSValue* operand, GPRReg);
+ void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg);
#else
- void emitNotifyWrite(RegisterID tag, RegisterID payload, RegisterID scratch, VariableWatchpointSet*);
+ void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload);
+ void emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload);
#endif
- void emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet*);
- void emitPutClosureVar(int scope, uintptr_t operand, int value);
+ void emitGetClosureVar(int scope, uintptr_t operand);
+ void emitNotifyWrite(WatchpointSet*);
+ void emitNotifyWrite(GPRReg pointerToSet);
+ void emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet*);
+ void emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet**);
+ void emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet*);
void emitInitRegister(int dst);
- void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
- void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
-#if USE(JSVALUE64)
- void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
-#endif
+ void emitPutIntToCallFrameHeader(RegisterID from, int entry);
JSValue getConstantOperand(int src);
- bool isOperandConstantImmediateInt(int src);
- bool isOperandConstantImmediateChar(int src);
+ bool isOperandConstantInt(int src);
+ bool isOperandConstantChar(int src);
+
+ template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
+ void emitMathICFast(JITUnaryMathIC<Generator>*, Instruction*, ProfiledFunction, NonProfiledFunction);
+ template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
+ void emitMathICFast(JITBinaryMathIC<Generator>*, Instruction*, ProfiledFunction, NonProfiledFunction);
+
+ template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
+ void emitMathICSlow(JITBinaryMathIC<Generator>*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction);
+ template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
+ void emitMathICSlow(JITUnaryMathIC<Generator>*, Instruction*, ProfiledRepatchFunction, ProfiledFunction, RepatchFunction);
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
@@ -652,7 +718,8 @@ namespace JSC {
}
void linkSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
- iter->from.link(this);
+ if (iter->from.isSet())
+ iter->from.link(this);
++iter;
}
void linkDummySlowCase(Vector<SlowCaseEntry>::iterator& iter)
@@ -661,8 +728,13 @@ namespace JSC {
++iter;
}
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int virtualRegisterIndex);
+ void linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases,
+ Vector<SlowCaseEntry>::iterator&, unsigned bytecodeOffset);
MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr&);
+#if OS(WINDOWS) && CPU(X86_64)
+ MacroAssembler::Call appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr&);
+#endif
MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr&);
MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr&, int);
MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr&, int);
@@ -671,9 +743,11 @@ namespace JSC {
MacroAssembler::Call callOperation(C_JITOperation_E);
MacroAssembler::Call callOperation(C_JITOperation_EO, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EL, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EL, TrustedImmPtr);
MacroAssembler::Call callOperation(C_JITOperation_ESt, Structure*);
MacroAssembler::Call callOperation(C_JITOperation_EZ, int32_t);
- MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, int32_t);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, int32_t, int32_t);
MacroAssembler::Call callOperation(J_JITOperation_E, int);
MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg);
MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, GPRReg, int32_t);
@@ -681,13 +755,29 @@ namespace JSC {
MacroAssembler::Call callOperation(J_JITOperation_EC, int, JSCell*);
MacroAssembler::Call callOperation(V_JITOperation_EC, JSCell*);
MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJ, JSValueRegs, JSValueRegs);
#if USE(JSVALUE64)
- MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, StringImpl*);
+ MacroAssembler::Call callOperation(J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*);
#else
- MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, StringImpl*);
+ MacroAssembler::Call callOperation(J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*);
#endif
- MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, const Identifier*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJI, int, GPRReg, UniquedStringImpl*);
MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJArp, JSValueRegs, JSValueRegs, ArithProfile*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJArp, JSValueRegs, JSValueRegs, JSValueRegs, ArithProfile*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJ, JSValueRegs, JSValueRegs, JSValueRegs);
+ MacroAssembler::Call callOperation(J_JITOperation_EJMic, JSValueRegs, JSValueRegs, TrustedImmPtr);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJMic, JSValueRegs, JSValueRegs, JSValueRegs, TrustedImmPtr);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, ArrayProfile*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, ByValInfo*);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EJsc, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJscC, int, GPRReg, JSCell*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg);
+ MacroAssembler::Call callOperation(C_JITOperation_EJscZ, GPRReg, int32_t);
+ MacroAssembler::Call callOperation(C_JITOperation_EJscZ, int, GPRReg, int32_t);
#if USE(JSVALUE64)
MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg);
#else
@@ -695,61 +785,93 @@ namespace JSC {
#endif
MacroAssembler::Call callOperation(J_JITOperation_EP, int, void*);
MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EPc, int, Instruction*);
+ MacroAssembler::Call callOperation(J_JITOperation_EPc, int, Instruction*);
MacroAssembler::Call callOperation(J_JITOperation_EZ, int, int32_t);
+ MacroAssembler::Call callOperation(J_JITOperation_EZZ, int, int32_t, int32_t);
+ MacroAssembler::Call callOperation(P_JITOperation_E);
MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, size_t);
- MacroAssembler::Call callOperation(P_JITOperation_EZ, int32_t);
MacroAssembler::Call callOperation(S_JITOperation_ECC, RegisterID, RegisterID);
MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID);
+ MacroAssembler::Call callOperation(S_JITOperation_EJI, GPRReg, UniquedStringImpl*);
MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID);
MacroAssembler::Call callOperation(S_JITOperation_EOJss, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(Sprt_JITOperation_EZ, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_E);
MacroAssembler::Call callOperation(V_JITOperation_EC, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_ECC, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_ECICC, RegisterID, const Identifier*, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, int32_t);
+ MacroAssembler::Call callOperation(V_JITOperation_ECIZC, RegisterID, UniquedStringImpl*, int32_t, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ECIZCC, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID);
+#if USE(JSVALUE64)
+ MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, int32_t, RegisterID);
+#else
+ MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, RegisterID, int32_t, RegisterID);
+#endif
+ MacroAssembler::Call callOperation(J_JITOperation_EE, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID);
+ MacroAssembler::Call callOperation(J_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID);
+ MacroAssembler::Call callOperationNoExceptionCheck(Z_JITOperation_E);
#if USE(JSVALUE64)
MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID);
#else
MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID, RegisterID);
#endif
- MacroAssembler::Call callOperation(V_JITOperation_EJIdJJ, RegisterID, const Identifier*, RegisterID, RegisterID);
#if USE(JSVALUE64)
- MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, StringImpl*);
+ MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, int32_t, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(V_JITOperation_ECIZJJ, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ECJ, RegisterID, RegisterID);
#else
- MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, StringImpl*);
+ MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(V_JITOperation_ECJ, RegisterID, RegisterID, RegisterID);
#endif
MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, ArrayProfile*);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, ByValInfo*);
MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, int32_t, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_EPc, Instruction*);
MacroAssembler::Call callOperation(V_JITOperation_EZ, int32_t);
+ MacroAssembler::Call callOperation(V_JITOperation_EZJ, int, GPRReg);
MacroAssembler::Call callOperationWithCallFrameRollbackOnException(J_JITOperation_E);
MacroAssembler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb, CodeBlock*);
MacroAssembler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E);
#if USE(JSVALUE32_64)
- MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, GPRReg, int32_t);
+ MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, RegisterID, int32_t, RegisterID);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, GPRReg, int32_t, int32_t);
MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg, GPRReg);
MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg, GPRReg);
- MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, GPRReg, const Identifier*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJI, int, GPRReg, GPRReg, UniquedStringImpl*);
MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg, GPRReg, GPRReg);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, GPRReg, GPRReg, ArrayProfile*);
+ MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, GPRReg, GPRReg, ByValInfo*);
MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, GPRReg, size_t);
MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(S_JITOperation_EJI, GPRReg, GPRReg, UniquedStringImpl*);
MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, RegisterID, int32_t);
+ MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ArrayProfile*);
+ MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ByValInfo*);
MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, RegisterID, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, RegisterID, int32_t, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EZJ, int32_t, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg, GPRReg);
#endif
+ template<typename SnippetGenerator>
+ void emitBitBinaryOpFastPath(Instruction* currentInstruction);
+
+ void emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID);
+
Jump checkStructure(RegisterID reg, Structure* structure);
void updateTopCallFrame();
Call emitNakedCall(CodePtr function = CodePtr());
+ Call emitNakedTailCall(CodePtr function = CodePtr());
// Loads the character value of a single character string into dst.
void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
@@ -795,14 +917,24 @@ namespace JSC {
bool shouldEmitProfiling() { return false; }
#endif
+ static bool reportCompileTimes();
+ static bool computeCompileTimes();
+
+ // If you need to check the value of an instruction multiple times and the instruction is
+ // part of a LLInt inline cache, then you want to use this. It will give you the value of
+ // the instruction at the start of JITing.
+ Instruction* copiedInstruction(Instruction*);
+
Interpreter* m_interpreter;
+
+ RefCountedArray<Instruction> m_instructions;
Vector<CallRecord> m_calls;
Vector<Label> m_labels;
Vector<JITGetByIdGenerator> m_getByIds;
Vector<JITPutByIdGenerator> m_putByIds;
Vector<ByValCompilationInfo> m_byValCompilationInfo;
- Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
+ Vector<CallCompilationInfo> m_callCompilationInfo;
Vector<JumpTable> m_jmpTable;
unsigned m_bytecodeOffset;
@@ -811,24 +943,32 @@ namespace JSC {
JumpList m_exceptionChecks;
JumpList m_exceptionChecksWithCallFrameRollback;
+ Label m_exceptionHandler;
unsigned m_getByIdIndex;
unsigned m_putByIdIndex;
unsigned m_byValInstructionIndex;
unsigned m_callLinkInfoIndex;
+
+ Label m_arityCheck;
+ std::unique_ptr<LinkBuffer> m_linkBuffer;
- OwnPtr<JITDisassembler> m_disassembler;
+ std::unique_ptr<JITDisassembler> m_disassembler;
RefPtr<Profiler::Compilation> m_compilation;
WeakRandom m_randomGenerator;
static CodeRef stringGetByValStubGenerator(VM*);
+ PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder;
+
+ HashMap<Instruction*, void*> m_instructionToMathIC;
+ HashMap<Instruction*, MathICGenerationState> m_instructionToMathICGenerationState;
+
bool m_canBeOptimized;
bool m_canBeOptimizedOrInlined;
bool m_shouldEmitProfiling;
+ unsigned m_loopOSREntryBytecodeOffset { 0 };
} JIT_CLASS_ALIGNMENT;
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JIT_h
diff --git a/Source/JavaScriptCore/jit/JITAddGenerator.cpp b/Source/JavaScriptCore/jit/JITAddGenerator.cpp
new file mode 100644
index 000000000..627af2787
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITAddGenerator.cpp
@@ -0,0 +1,187 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITAddGenerator.h"
+
+#include "ArithProfile.h"
+#include "JITMathIC.h"
+#include "LinkBuffer.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+JITMathICInlineResult JITAddGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
+{
+ // We default to speculating int32.
+ ObservedType lhs = ObservedType().withInt32();
+ ObservedType rhs = ObservedType().withInt32();
+ if (arithProfile) {
+ lhs = arithProfile->lhsObservedType();
+ rhs = arithProfile->rhsObservedType();
+ }
+
+ if (lhs.isOnlyNonNumber() && rhs.isOnlyNonNumber())
+ return JITMathICInlineResult::DontGenerate;
+
+ if ((lhs.isOnlyInt32() || m_leftOperand.isConstInt32()) && (rhs.isOnlyInt32() || m_rightOperand.isConstInt32())) {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ if (!m_leftOperand.isConstInt32())
+ state.slowPathJumps.append(jit.branchIfNotInt32(m_left));
+ if (!m_rightOperand.isConstInt32())
+ state.slowPathJumps.append(jit.branchIfNotInt32(m_right));
+
+ GPRReg scratch = m_scratchGPR;
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ int32_t constValue = m_leftOperand.isConstInt32() ? m_leftOperand.asConstInt32() : m_rightOperand.asConstInt32();
+ if (var.payloadGPR() != m_result.payloadGPR())
+ scratch = m_result.payloadGPR();
+ state.slowPathJumps.append(jit.branchAdd32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constValue), scratch));
+ } else {
+ if (m_left.payloadGPR() != m_result.payloadGPR() && m_right.payloadGPR() != m_result.payloadGPR())
+ scratch = m_result.payloadGPR();
+ state.slowPathJumps.append(jit.branchAdd32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), scratch));
+ }
+ jit.boxInt32(scratch, m_result);
+ return JITMathICInlineResult::GeneratedFastPath;
+ }
+
+ return JITMathICInlineResult::GenerateFullSnippet;
+}
+
+bool JITAddGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber())
+ return false;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& varOpr = m_leftOperand.isConstInt32() ? m_rightOperand : m_leftOperand;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar + intConstant.
+ CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(var);
+
+ GPRReg scratch = m_scratchGPR;
+ if (var.payloadGPR() != m_result.payloadGPR())
+ scratch = m_result.payloadGPR();
+ slowPathJumpList.append(jit.branchAdd32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constOpr.asConstInt32()), scratch));
+
+ jit.boxInt32(scratch, m_result);
+ endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ slowPathJumpList.append(notInt32);
+ return true;
+ }
+
+ // Try to do doubleVar + double(intConstant).
+ notInt32.link(&jit);
+ if (!varOpr.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(var, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(var, m_leftFPR, m_scratchGPR, m_scratchFPR);
+
+ jit.move(CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR);
+ jit.convertInt32ToDouble(m_scratchGPR, m_rightFPR);
+
+ // Fall thru to doubleVar + doubleVar.
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+ CCallHelpers::Jump leftNotInt;
+ CCallHelpers::Jump rightNotInt;
+
+ // Try to do intVar + intVar.
+ leftNotInt = jit.branchIfNotInt32(m_left);
+ rightNotInt = jit.branchIfNotInt32(m_right);
+
+ GPRReg scratch = m_scratchGPR;
+ if (m_left.payloadGPR() != m_result.payloadGPR() && m_right.payloadGPR() != m_result.payloadGPR())
+ scratch = m_result.payloadGPR();
+ slowPathJumpList.append(jit.branchAdd32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), scratch));
+
+ jit.boxInt32(scratch, m_result);
+ endJumpList.append(jit.jump());
+
+
+ if (!jit.supportsFloatingPoint()) {
+ slowPathJumpList.append(leftNotInt);
+ slowPathJumpList.append(rightNotInt);
+ return true;
+ }
+
+ leftNotInt.link(&jit);
+ if (!m_leftOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
+
+ jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
+ CCallHelpers::Jump rightWasInteger = jit.jump();
+
+ rightNotInt.link(&jit);
+ if (!m_rightOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
+
+ rightIsDouble.link(&jit);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+
+ rightWasInteger.link(&jit);
+
+ // Fall thru to doubleVar + doubleVar.
+ }
+
+ // Do doubleVar + doubleVar.
+ jit.addDouble(m_rightFPR, m_leftFPR);
+ if (arithProfile && shouldEmitProfiling)
+ arithProfile->emitSetDouble(jit);
+
+ jit.boxDouble(m_leftFPR, m_result);
+
+ return true;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITAddGenerator.h b/Source/JavaScriptCore/jit/JITAddGenerator.h
new file mode 100644
index 000000000..48e1b167e
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITAddGenerator.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "JITMathICInlineResult.h"
+#include "JITOperations.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+struct MathICGenerationState;
+
+class JITAddGenerator {
+public:
+ JITAddGenerator() { }
+
+ JITAddGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ }
+
+ JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+ bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile*, bool shouldEmitProfiling);
+
+ static bool isLeftOperandValidConstant(SnippetOperand leftOperand) { return leftOperand.isPositiveConstInt32(); }
+ static bool isRightOperandValidConstant(SnippetOperand rightOperand) { return rightOperand.isPositiveConstInt32(); }
+
+private:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index b9c70570c..58fdb10d1 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,19 +28,29 @@
#if ENABLE(JIT)
#include "JIT.h"
+#include "ArithProfile.h"
#include "CodeBlock.h"
+#include "JITAddGenerator.h"
+#include "JITBitAndGenerator.h"
+#include "JITBitOrGenerator.h"
+#include "JITBitXorGenerator.h"
+#include "JITDivGenerator.h"
#include "JITInlines.h"
+#include "JITLeftShiftGenerator.h"
+#include "JITMathIC.h"
+#include "JITMulGenerator.h"
+#include "JITNegGenerator.h"
#include "JITOperations.h"
-#include "JITStubs.h"
+#include "JITRightShiftGenerator.h"
+#include "JITSubGenerator.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "ResultType.h"
-#include "SamplingTool.h"
#include "SlowPathCall.h"
-
namespace JSC {
void JIT::emit_op_jless(Instruction* currentInstruction)
@@ -189,187 +199,15 @@ void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCa
#if USE(JSVALUE64)
-void JIT::emit_op_negate(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
-
- Jump srcNotInt = emitJumpIfNotImmediateInteger(regT0);
- addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
- neg32(regT0);
- emitFastArithReTagImmediate(regT0, regT0);
-
- Jump end = jump();
-
- srcNotInt.link(this);
- emitJumpSlowCaseIfNotImmediateNumber(regT0);
-
- move(TrustedImm64((int64_t)0x8000000000000000ull), regT1);
- xor64(regT1, regT0);
-
- end.link(this);
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter); // 0x7fffffff check
- linkSlowCase(iter); // double check
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
- slowPathCall.call();
-}
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- int result = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- emitFastArithImmToInt(regT0);
- emitFastArithImmToInt(regT2);
- lshift32(regT2, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
- slowPathCall.call();
-}
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- int result = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- // isOperandConstantImmediateInt(op2) => 1 SlowCase
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- // Mask with 0x1f as per ecma-262 11.7.2 step 7.
- rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- if (supportsFloatingPointTruncate()) {
- Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
- // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
- addSlowCase(emitJumpIfNotImmediateNumber(regT0));
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
- lhsIsInt.link(this);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- } else {
- // !supportsFloatingPoint() => 2 SlowCases
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- }
- emitFastArithImmToInt(regT2);
- rshift32(regT2, regT0);
- }
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2))
- linkSlowCase(iter);
-
- else {
- if (supportsFloatingPointTruncate()) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- }
- }
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_rshift);
- slowPathCall.call();
-}
-
-void JIT::emit_op_urshift(Instruction* currentInstruction)
-{
- int result = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- // isOperandConstantImmediateInt(op2) => 1 SlowCase
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- // Mask with 0x1f as per ecma-262 11.7.2 step 7.
- urshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT2);
- if (supportsFloatingPointTruncate()) {
- Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
- // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
- addSlowCase(emitJumpIfNotImmediateNumber(regT0));
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
- lhsIsInt.link(this);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- } else {
- // !supportsFloatingPoint() => 2 SlowCases
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
- }
- emitFastArithImmToInt(regT2);
- urshift32(regT2, regT0);
- }
- emitFastArithIntToImmNoCheck(regT0, regT0);
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2))
- linkSlowCase(iter);
-
- else {
- if (supportsFloatingPointTruncate()) {
- linkSlowCase(iter);
- linkSlowCase(iter);
- linkSlowCase(iter);
- } else {
- linkSlowCase(iter);
- linkSlowCase(iter);
- }
- }
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_urshift);
- slowPathCall.call();
-}
-
void JIT::emit_op_unsigned(Instruction* currentInstruction)
{
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotInt(regT0);
addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- emitFastArithReTagImmediate(regT0, regT0);
+ emitTagInt(regT0, regT0);
emitPutVirtualRegister(result, regT0);
}
@@ -389,7 +227,7 @@ void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, Relat
// - constant int immediate to int immediate
// - int immediate to int immediate
- if (isOperandConstantImmediateChar(op1)) {
+ if (isOperandConstantChar(op1)) {
emitGetVirtualRegister(op2, regT0);
addSlowCase(emitJumpIfNotJSCell(regT0));
JumpList failures;
@@ -398,7 +236,7 @@ void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, Relat
addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op2)) {
emitGetVirtualRegister(op1, regT0);
addSlowCase(emitJumpIfNotJSCell(regT0));
JumpList failures;
@@ -407,20 +245,20 @@ void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, Relat
addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantInt(op2)) {
emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t op2imm = getConstantOperandImmediateInt(op2);
+ emitJumpSlowCaseIfNotInt(regT0);
+ int32_t op2imm = getOperandConstantInt(op2);
addJump(branch32(condition, regT0, Imm32(op2imm)), target);
- } else if (isOperandConstantImmediateInt(op1)) {
+ } else if (isOperandConstantInt(op1)) {
emitGetVirtualRegister(op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- int32_t op1imm = getConstantOperandImmediateInt(op1);
+ emitJumpSlowCaseIfNotInt(regT1);
+ int32_t op1imm = getOperandConstantInt(op1);
addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ emitJumpSlowCaseIfNotInt(regT0);
+ emitJumpSlowCaseIfNotInt(regT1);
addJump(branch32(condition, regT0, regT1), target);
}
@@ -440,7 +278,7 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
// - floating-point number to constant int immediate
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
@@ -453,11 +291,11 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
return;
}
- if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantInt(op2)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail1 = emitJumpIfNotNumber(regT0);
add64(tagTypeNumberRegister, regT0);
move64ToDouble(regT0, fpRegT0);
@@ -476,11 +314,11 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
emitGetVirtualRegister(op2, regT1);
callOperation(operation, regT0, regT1);
emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
- } else if (isOperandConstantImmediateInt(op1)) {
+ } else if (isOperandConstantInt(op1)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail1 = emitJumpIfNotNumber(regT1);
add64(tagTypeNumberRegister, regT1);
move64ToDouble(regT1, fpRegT1);
@@ -503,9 +341,9 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
- Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
- Jump fail3 = emitJumpIfImmediateInteger(regT1);
+ Jump fail1 = emitJumpIfNotNumber(regT0);
+ Jump fail2 = emitJumpIfNotNumber(regT1);
+ Jump fail3 = emitJumpIfInt(regT1);
add64(tagTypeNumberRegister, regT0);
add64(tagTypeNumberRegister, regT1);
move64ToDouble(regT0, fpRegT0);
@@ -526,50 +364,14 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
}
}
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- int result = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t imm = getConstantOperandImmediateInt(op1);
- and64(Imm32(imm), regT0);
- if (imm >= 0)
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- int32_t imm = getConstantOperandImmediateInt(op2);
- and64(Imm32(imm), regT0);
- if (imm >= 0)
- emitFastArithIntToImmNoCheck(regT0, regT0);
- } else {
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- and64(regT1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- }
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
- slowPathCall.call();
-}
-
void JIT::emit_op_inc(Instruction* currentInstruction)
{
int srcDst = currentInstruction[1].u.operand;
emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotInt(regT0);
addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitTagInt(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
@@ -586,9 +388,9 @@ void JIT::emit_op_dec(Instruction* currentInstruction)
int srcDst = currentInstruction[1].u.operand;
emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotInt(regT0);
addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
- emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitTagInt(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
@@ -612,24 +414,26 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
// Make sure registers are correct for x86 IDIV instructions.
ASSERT(regT0 == X86Registers::eax);
- ASSERT(regT1 == X86Registers::edx);
- ASSERT(regT2 == X86Registers::ecx);
-
- emitGetVirtualRegisters(op1, regT3, op2, regT2);
- emitJumpSlowCaseIfNotImmediateInteger(regT3);
- emitJumpSlowCaseIfNotImmediateInteger(regT2);
-
- move(regT3, regT0);
- addSlowCase(branchTest32(Zero, regT2));
- Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
+ auto edx = X86Registers::edx;
+ auto ecx = X86Registers::ecx;
+ ASSERT(regT4 != edx);
+ ASSERT(regT4 != ecx);
+
+ emitGetVirtualRegisters(op1, regT4, op2, ecx);
+ emitJumpSlowCaseIfNotInt(regT4);
+ emitJumpSlowCaseIfNotInt(ecx);
+
+ move(regT4, regT0);
+ addSlowCase(branchTest32(Zero, ecx));
+ Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1));
addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
denominatorNotNeg1.link(this);
- m_assembler.cdq();
- m_assembler.idivl_r(regT2);
- Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
- addSlowCase(branchTest32(Zero, regT1));
+ x86ConvertToDoubleWord32();
+ x86Div32(ecx);
+ Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0));
+ addSlowCase(branchTest32(Zero, edx));
numeratorPositive.link(this);
- emitFastArithReTagImmediate(regT1, regT0);
+ emitTagInt(edx, regT0);
emitPutVirtualRegister(result);
}
@@ -661,342 +465,565 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
/* ------------------------------ END: OP_MOD ------------------------------ */
-/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
-
-void JIT::compileBinaryArithOp(OpcodeID opcodeID, int, int op1, int op2, OperandTypes)
-{
- emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
- if (opcodeID == op_add)
- addSlowCase(branchAdd32(Overflow, regT1, regT0));
- else if (opcodeID == op_sub)
- addSlowCase(branchSub32(Overflow, regT1, regT0));
- else {
- ASSERT(opcodeID == op_mul);
- if (shouldEmitProfiling()) {
- // We want to be able to measure if this is taking the slow case just
- // because of negative zero. If this produces positive zero, then we
- // don't want the slow case to be taken because that will throw off
- // speculative compilation.
- move(regT0, regT2);
- addSlowCase(branchMul32(Overflow, regT1, regT2));
- JumpList done;
- done.append(branchTest32(NonZero, regT2));
- Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0));
- done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0)));
- negativeZero.link(this);
- // We only get here if we have a genuine negative zero. Record this,
- // so that the speculative JIT knows that we failed speculation
- // because of a negative zero.
- add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter));
- addSlowCase(jump());
- done.link(this);
- move(regT2, regT0);
- } else {
- addSlowCase(branchMul32(Overflow, regT1, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- }
- }
- emitFastArithIntToImmNoCheck(regT0, regT0);
+#endif // USE(JSVALUE64)
+
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);
+ JITNegIC* negateIC = m_codeBlock->addJITNegIC(arithProfile);
+ m_instructionToMathIC.add(currentInstruction, negateIC);
+ emitMathICFast(negateIC, currentInstruction, operationArithNegateProfiled, operationArithNegate);
}
-void JIT::compileBinaryArithOpSlowCase(Instruction* currentInstruction, OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, int result, int op1, int op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
- COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
- Jump notImm1;
- Jump notImm2;
- if (op1HasImmediateIntFastCase) {
- notImm2 = getSlowCase(iter);
- } else if (op2HasImmediateIntFastCase) {
- notImm1 = getSlowCase(iter);
- } else {
- notImm1 = getSlowCase(iter);
- notImm2 = getSlowCase(iter);
- }
+ JITNegIC* negIC = bitwise_cast<JITNegIC*>(m_instructionToMathIC.get(currentInstruction));
+ emitMathICSlow(negIC, currentInstruction, operationArithNegateProfiledOptimize, operationArithNegateProfiled, operationArithNegateOptimize);
+}
- linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
- if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
- linkSlowCase(iter);
+template<typename SnippetGenerator>
+void JIT::emitBitBinaryOpFastPath(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
- Label stubFunctionCall(this);
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
+#else
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+#endif
+
+ SnippetOperand leftOperand;
+ SnippetOperand rightOperand;
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
+
+ gen.generateFastPath(*this);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
+}
- JITSlowPathCall slowPathCall(this, currentInstruction, opcodeID == op_add ? slow_path_add : opcodeID == op_sub ? slow_path_sub : slow_path_mul);
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ emitBitBinaryOpFastPath<JITBitAndGenerator>(currentInstruction);
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
slowPathCall.call();
- Jump end = jump();
-
- if (op1HasImmediateIntFastCase) {
- notImm2.link(this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- emitGetVirtualRegister(op1, regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT2);
- } else if (op2HasImmediateIntFastCase) {
- notImm1.link(this);
- if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- emitGetVirtualRegister(op2, regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT2);
- } else {
- // if we get here, eax is not an int32, edx not yet checked.
- notImm1.link(this);
- if (!types.first().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT1);
- Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
- convertInt32ToDouble(regT1, fpRegT2);
- Jump op2wasInteger = jump();
-
- // if we get here, eax IS an int32, edx is not.
- notImm2.link(this);
- if (!types.second().definitelyIsNumber())
- emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
- convertInt32ToDouble(regT0, fpRegT1);
- op2isDouble.link(this);
- add64(tagTypeNumberRegister, regT1);
- move64ToDouble(regT1, fpRegT2);
- op2wasInteger.link(this);
- }
+}
- if (opcodeID == op_add)
- addDouble(fpRegT2, fpRegT1);
- else if (opcodeID == op_sub)
- subDouble(fpRegT2, fpRegT1);
- else if (opcodeID == op_mul)
- mulDouble(fpRegT2, fpRegT1);
- else {
- ASSERT(opcodeID == op_div);
- divDouble(fpRegT2, fpRegT1);
- }
- moveDoubleTo64(fpRegT1, regT0);
- sub64(tagTypeNumberRegister, regT0);
- emitPutVirtualRegister(result, regT0);
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ emitBitBinaryOpFastPath<JITBitOrGenerator>(currentInstruction);
+}
- end.link(this);
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
+ slowPathCall.call();
}
-void JIT::emit_op_add(Instruction* currentInstruction)
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ emitBitBinaryOpFastPath<JITBitXorGenerator>(currentInstruction);
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ emitBitBinaryOpFastPath<JITLeftShiftGenerator>(currentInstruction);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
+ slowPathCall.call();
+}
+
+void JIT::emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID opcodeID)
+{
+ ASSERT(opcodeID == op_rshift || opcodeID == op_urshift);
+
+ JITRightShiftGenerator::ShiftType snippetShiftType = opcodeID == op_rshift ?
+ JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
+
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- addSlowCase();
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
- slowPathCall.call();
- return;
- }
+#if USE(JSVALUE64)
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
+ FPRReg scratchFPR = InvalidFPRReg;
+#else
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+ FPRReg scratchFPR = fpRegT2;
+#endif
- if (isOperandConstantImmediateInt(op1)) {
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT0);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1));
- emitFastArithIntToImmNoCheck(regT1, regT0);
- } else
- compileBinaryArithOp(op_add, result, op1, op2, types);
+ SnippetOperand leftOperand;
+ SnippetOperand rightOperand;
- emitPutVirtualRegister(result);
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
+ fpRegT0, scratchGPR, scratchFPR, snippetShiftType);
+
+ gen.generateFastPath(*this);
+
+ ASSERT(gen.didEmitFastPath());
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
+}
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ emitRightShiftFastPath(currentInstruction, op_rshift);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_rshift);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ emitRightShiftFastPath(currentInstruction, op_urshift);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_urshift);
+ slowPathCall.call();
+}
+
+ALWAYS_INLINE static OperandTypes getOperandTypes(Instruction* instruction)
+{
+ return OperandTypes(ArithProfile::fromInt(instruction[4].u.operand).lhsResultType(), ArithProfile::fromInt(instruction[4].u.operand).rhsResultType());
+}
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);
+ JITAddIC* addIC = m_codeBlock->addJITAddIC(arithProfile);
+ m_instructionToMathIC.add(currentInstruction, addIC);
+ emitMathICFast(addIC, currentInstruction, operationValueAddProfiled, operationValueAdd);
}
void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITAddIC* addIC = bitwise_cast<JITAddIC*>(m_instructionToMathIC.get(currentInstruction));
+ emitMathICSlow(addIC, currentInstruction, operationValueAddProfiledOptimize, operationValueAddProfiled, operationValueAddOptimize);
+}
+
+template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
+void JIT::emitMathICFast(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
+{
int result = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ int operand = currentInstruction[2].u.operand;
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- linkDummySlowCase(iter);
- return;
- }
+#if USE(JSVALUE64)
+ // ArithNegate benefits from using the same register as src and dst.
+ // Since regT1==argumentGPR1, using regT1 avoid shuffling register to call the slow path.
+ JSValueRegs srcRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = JSValueRegs(regT1);
+ GPRReg scratchGPR = regT2;
+#else
+ JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
+ GPRReg scratchGPR = regT4;
+#endif
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineStart = label();
+#endif
- bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
- bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
- compileBinaryArithOpSlowCase(currentInstruction, op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+ mathIC->m_generator = Generator(resultRegs, srcRegs, scratchGPR);
+
+ emitGetVirtualRegister(operand, srcRegs);
+
+ MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
+
+ bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
+ if (!generatedInlineCode) {
+ ArithProfile* arithProfile = mathIC->arithProfile();
+ if (arithProfile && shouldEmitProfiling())
+ callOperation(profiledFunction, resultRegs, srcRegs, arithProfile);
+ else
+ callOperation(nonProfiledFunction, resultRegs, srcRegs);
+ } else
+ addSlowCase(mathICGenerationState.slowPathJumps);
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineEnd = label();
+ addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
+
+ emitPutVirtualRegister(result, resultRegs);
}
-void JIT::emit_op_mul(Instruction* currentInstruction)
+template <typename Generator, typename ProfiledFunction, typename NonProfiledFunction>
+void JIT::emitMathICFast(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledFunction profiledFunction, NonProfiledFunction nonProfiledFunction)
{
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- // For now, only plant a fast int case if the constant operand is greater than zero.
- int32_t value;
- if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
- // Add a special fast case profile because the DFG JIT will expect one.
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
- emitGetVirtualRegister(op2, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
- emitFastArithReTagImmediate(regT1, regT0);
- } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
- // Add a special fast case profile because the DFG JIT will expect one.
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
- emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotImmediateInteger(regT0);
- addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
- emitFastArithReTagImmediate(regT1, regT0);
+#if USE(JSVALUE64)
+ OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));
+ JSValueRegs leftRegs = JSValueRegs(regT1);
+ JSValueRegs rightRegs = JSValueRegs(regT2);
+ JSValueRegs resultRegs = JSValueRegs(regT0);
+ GPRReg scratchGPR = regT3;
+ FPRReg scratchFPR = fpRegT2;
+#else
+ OperandTypes types = getOperandTypes(currentInstruction);
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+ FPRReg scratchFPR = fpRegT2;
+#endif
+
+ SnippetOperand leftOperand(types.first());
+ SnippetOperand rightOperand(types.second());
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ mathIC->m_generator = Generator(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, fpRegT0, fpRegT1, scratchGPR, scratchFPR);
+
+ ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
+
+ if (!Generator::isLeftOperandValidConstant(leftOperand))
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!Generator::isRightOperandValidConstant(rightOperand))
+ emitGetVirtualRegister(op2, rightRegs);
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineStart = label();
+#endif
+
+ MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.add(currentInstruction, MathICGenerationState()).iterator->value;
+
+ bool generatedInlineCode = mathIC->generateInline(*this, mathICGenerationState);
+ if (!generatedInlineCode) {
+ if (leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ else if (rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+ ArithProfile* arithProfile = mathIC->arithProfile();
+ if (arithProfile && shouldEmitProfiling())
+ callOperation(profiledFunction, resultRegs, leftRegs, rightRegs, arithProfile);
+ else
+ callOperation(nonProfiledFunction, resultRegs, leftRegs, rightRegs);
+ } else
+ addSlowCase(mathICGenerationState.slowPathJumps);
+
+#if ENABLE(MATH_IC_STATS)
+ auto inlineEnd = label();
+ addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(inlineEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(inlineStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
+
+ emitPutVirtualRegister(result, resultRegs);
+}
+
+template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
+void JIT::emitMathICSlow(JITUnaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
+{
+ MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
+ mathICGenerationState.slowPathStart = label();
+
+ int result = currentInstruction[1].u.operand;
+
+#if USE(JSVALUE64)
+ JSValueRegs srcRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = JSValueRegs(regT0);
+#else
+ JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs resultRegs = JSValueRegs(regT3, regT2);
+#endif
+
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathStart = label();
+#endif
+
+ ArithProfile* arithProfile = mathIC->arithProfile();
+ if (arithProfile && shouldEmitProfiling()) {
+ if (mathICGenerationState.shouldSlowPathRepatch)
+ mathICGenerationState.slowPathCall = callOperation(reinterpret_cast<J_JITOperation_EJMic>(profiledRepatchFunction), resultRegs, srcRegs, TrustedImmPtr(mathIC));
+ else
+ mathICGenerationState.slowPathCall = callOperation(profiledFunction, resultRegs, srcRegs, arithProfile);
} else
- compileBinaryArithOp(op_mul, result, op1, op2, types);
+ mathICGenerationState.slowPathCall = callOperation(reinterpret_cast<J_JITOperation_EJMic>(repatchFunction), resultRegs, srcRegs, TrustedImmPtr(mathIC));
+
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathEnd = label();
+ addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
- emitPutVirtualRegister(result);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addLinkTask([=] (LinkBuffer& linkBuffer) {
+ MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
+ mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
+ });
}
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+template <typename Generator, typename ProfiledRepatchFunction, typename ProfiledFunction, typename RepatchFunction>
+void JIT::emitMathICSlow(JITBinaryMathIC<Generator>* mathIC, Instruction* currentInstruction, ProfiledRepatchFunction profiledRepatchFunction, ProfiledFunction profiledFunction, RepatchFunction repatchFunction)
{
+ MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
+ mathICGenerationState.slowPathStart = label();
+
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
- bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
- compileBinaryArithOpSlowCase(currentInstruction, op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
+#if USE(JSVALUE64)
+ OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));
+ JSValueRegs leftRegs = JSValueRegs(regT1);
+ JSValueRegs rightRegs = JSValueRegs(regT2);
+ JSValueRegs resultRegs = JSValueRegs(regT0);
+#else
+ OperandTypes types = getOperandTypes(currentInstruction);
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+#endif
+
+ SnippetOperand leftOperand(types.first());
+ SnippetOperand rightOperand(types.second());
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+
+ ASSERT(!(Generator::isLeftOperandValidConstant(leftOperand) && Generator::isRightOperandValidConstant(rightOperand)));
+
+ if (Generator::isLeftOperandValidConstant(leftOperand))
+ emitGetVirtualRegister(op1, leftRegs);
+ else if (Generator::isRightOperandValidConstant(rightOperand))
+ emitGetVirtualRegister(op2, rightRegs);
+
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathStart = label();
+#endif
+
+ ArithProfile* arithProfile = mathIC->arithProfile();
+ if (arithProfile && shouldEmitProfiling()) {
+ if (mathICGenerationState.shouldSlowPathRepatch)
+ mathICGenerationState.slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(profiledRepatchFunction), resultRegs, leftRegs, rightRegs, TrustedImmPtr(mathIC));
+ else
+ mathICGenerationState.slowPathCall = callOperation(profiledFunction, resultRegs, leftRegs, rightRegs, arithProfile);
+ } else
+ mathICGenerationState.slowPathCall = callOperation(bitwise_cast<J_JITOperation_EJJMic>(repatchFunction), resultRegs, leftRegs, rightRegs, TrustedImmPtr(mathIC));
+
+#if ENABLE(MATH_IC_STATS)
+ auto slowPathEnd = label();
+ addLinkTask([=] (LinkBuffer& linkBuffer) {
+ size_t size = static_cast<char*>(linkBuffer.locationOf(slowPathEnd).executableAddress()) - static_cast<char*>(linkBuffer.locationOf(slowPathStart).executableAddress());
+ mathIC->m_generatedCodeSize += size;
+ });
+#endif
+
+ emitPutVirtualRegister(result, resultRegs);
+
+ addLinkTask([=] (LinkBuffer& linkBuffer) {
+ MathICGenerationState& mathICGenerationState = m_instructionToMathICGenerationState.find(currentInstruction)->value;
+ mathIC->finalizeInlineCode(mathICGenerationState, linkBuffer);
+ });
}
void JIT::emit_op_div(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
+ int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (isOperandConstantImmediateDouble(op1)) {
- emitGetVirtualRegister(op1, regT0);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- } else if (isOperandConstantImmediateInt(op1)) {
- emitLoadInt32ToDouble(op1, fpRegT0);
- } else {
- emitGetVirtualRegister(op1, regT0);
- if (!types.first().definitelyIsNumber())
- emitJumpSlowCaseIfNotImmediateNumber(regT0);
- Jump notInt = emitJumpIfNotImmediateInteger(regT0);
- convertInt32ToDouble(regT0, fpRegT0);
- Jump skipDoubleLoad = jump();
- notInt.link(this);
- add64(tagTypeNumberRegister, regT0);
- move64ToDouble(regT0, fpRegT0);
- skipDoubleLoad.link(this);
- }
+#if USE(JSVALUE64)
+ OperandTypes types = getOperandTypes(copiedInstruction(currentInstruction));
+ JSValueRegs leftRegs = JSValueRegs(regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT1);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT2;
+#else
+ OperandTypes types = getOperandTypes(currentInstruction);
+ JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
+ JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
+ JSValueRegs resultRegs = leftRegs;
+ GPRReg scratchGPR = regT4;
+#endif
+ FPRReg scratchFPR = fpRegT2;
- if (isOperandConstantImmediateDouble(op2)) {
- emitGetVirtualRegister(op2, regT1);
- add64(tagTypeNumberRegister, regT1);
- move64ToDouble(regT1, fpRegT1);
- } else if (isOperandConstantImmediateInt(op2)) {
- emitLoadInt32ToDouble(op2, fpRegT1);
+ ArithProfile* arithProfile = nullptr;
+ if (shouldEmitProfiling())
+ arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);
+
+ SnippetOperand leftOperand(types.first());
+ SnippetOperand rightOperand(types.second());
+
+ if (isOperandConstantInt(op1))
+ leftOperand.setConstInt32(getOperandConstantInt(op1));
+#if USE(JSVALUE64)
+ else if (isOperandConstantDouble(op1))
+ leftOperand.setConstDouble(getOperandConstantDouble(op1));
+#endif
+ else if (isOperandConstantInt(op2))
+ rightOperand.setConstInt32(getOperandConstantInt(op2));
+#if USE(JSVALUE64)
+ else if (isOperandConstantDouble(op2))
+ rightOperand.setConstDouble(getOperandConstantDouble(op2));
+#endif
+
+ RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
+
+ if (!leftOperand.isConst())
+ emitGetVirtualRegister(op1, leftRegs);
+ if (!rightOperand.isConst())
+ emitGetVirtualRegister(op2, rightRegs);
+
+ JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
+ fpRegT0, fpRegT1, scratchGPR, scratchFPR, arithProfile);
+
+ gen.generateFastPath(*this);
+
+ if (gen.didEmitFastPath()) {
+ gen.endJumpList().link(this);
+ emitPutVirtualRegister(result, resultRegs);
+
+ addSlowCase(gen.slowPathJumpList());
} else {
- emitGetVirtualRegister(op2, regT1);
- if (!types.second().definitelyIsNumber())
- emitJumpSlowCaseIfNotImmediateNumber(regT1);
- Jump notInt = emitJumpIfNotImmediateInteger(regT1);
- convertInt32ToDouble(regT1, fpRegT1);
- Jump skipDoubleLoad = jump();
- notInt.link(this);
- add64(tagTypeNumberRegister, regT1);
- move64ToDouble(regT1, fpRegT1);
- skipDoubleLoad.link(this);
+ ASSERT(gen.endJumpList().empty());
+ ASSERT(gen.slowPathJumpList().empty());
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
+ slowPathCall.call();
}
- divDouble(fpRegT1, fpRegT0);
-
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
- // If we've got an integer, we might as well make that the result of the division.
- emitFastArithReTagImmediate(regT0, regT0);
- Jump isInteger = jump();
- notInteger.link(this);
- moveDoubleTo64(fpRegT0, regT0);
- Jump doubleZero = branchTest64(Zero, regT0);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
- sub64(tagTypeNumberRegister, regT0);
- Jump trueDouble = jump();
- doubleZero.link(this);
- move(tagTypeNumberRegister, regT0);
- trueDouble.link(this);
- isInteger.link(this);
-
- emitPutVirtualRegister(dst, regT0);
}
void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
-#ifndef NDEBUG
- breakpoint();
-#endif
- return;
- }
- if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter);
- }
- if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
- if (!types.second().definitelyIsNumber())
- linkSlowCase(iter);
- }
- // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
slowPathCall.call();
}
-void JIT::emit_op_sub(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
{
- int result = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);
+ JITMulIC* mulIC = m_codeBlock->addJITMulIC(arithProfile);
+ m_instructionToMathIC.add(currentInstruction, mulIC);
+ emitMathICFast(mulIC, currentInstruction, operationValueMulProfiled, operationValueMul);
+}
- compileBinaryArithOp(op_sub, result, op1, op2, types);
- emitPutVirtualRegister(result);
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+
+ JITMulIC* mulIC = bitwise_cast<JITMulIC*>(m_instructionToMathIC.get(currentInstruction));
+ emitMathICSlow(mulIC, currentInstruction, operationValueMulProfiledOptimize, operationValueMulProfiled, operationValueMulOptimize);
}
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_sub(Instruction* currentInstruction)
{
- int result = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+ ArithProfile* arithProfile = m_codeBlock->arithProfileForPC(currentInstruction);
+ JITSubIC* subIC = m_codeBlock->addJITSubIC(arithProfile);
+ m_instructionToMathIC.add(currentInstruction, subIC);
+ emitMathICFast(subIC, currentInstruction, operationValueSubProfiled, operationValueSub);
+}
- compileBinaryArithOpSlowCase(currentInstruction, op_sub, iter, result, op1, op2, types, false, false);
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+ JITSubIC* subIC = bitwise_cast<JITSubIC*>(m_instructionToMathIC.get(currentInstruction));
+ emitMathICSlow(subIC, currentInstruction, operationValueSubProfiledOptimize, operationValueSubProfiled, operationValueSubOptimize);
}
-/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
+void JIT::emit_op_pow(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_pow);
+ slowPathCall.call();
+}
-#endif // USE(JSVALUE64)
+/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL, OP_POW ------------------------------ */
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 53ac73894..d271d48a0 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -1,5 +1,5 @@
/*
-* Copyright (C) 2008 Apple Inc. All rights reserved.
+* Copyright (C) 2008, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,59 +31,23 @@
#include "CodeBlock.h"
#include "JITInlines.h"
-#include "JITStubs.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "Operations.h"
+#include "JSCInlines.h"
#include "ResultType.h"
-#include "SamplingTool.h"
#include "SlowPathCall.h"
namespace JSC {
-void JIT::emit_op_negate(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
-
- Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
- neg32(regT0);
- emitStoreInt32(dst, regT0, (dst == src));
-
- Jump end = jump();
-
- srcNotInt.link(this);
- addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
-
- xor32(TrustedImm32(1 << 31), regT1);
- store32(regT1, tagFor(dst));
- if (dst != src)
- store32(regT0, payloadFor(dst));
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter); // 0x7fffffff check
- linkSlowCase(iter); // double check
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
- slowPathCall.call();
-}
-
void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target, RelationalCondition condition)
{
JumpList notInt32Op1;
JumpList notInt32Op2;
// Character less.
- if (isOperandConstantImmediateChar(op1)) {
+ if (isOperandConstantChar(op1)) {
emitLoad(op2, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
@@ -92,7 +56,7 @@ void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target
addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op2)) {
emitLoad(op1, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
@@ -101,11 +65,11 @@ void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target
addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantImmediateInt(op1)) {
+ if (isOperandConstantInt(op1)) {
emitLoad(op2, regT3, regT2);
notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantImmediateInt(op2)) {
+ } else if (isOperandConstantInt(op2)) {
emitLoad(op1, regT1, regT0);
notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
@@ -124,28 +88,28 @@ void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target
Jump end = jump();
// Double less.
- emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
+ emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2));
end.link(this);
}
void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
{
- if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
+ if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
} else {
if (!supportsFloatingPoint()) {
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ if (!isOperandConstantInt(op1) && !isOperandConstantInt(op2))
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // int32 check
} else {
- if (!isOperandConstantImmediateInt(op1)) {
+ if (!isOperandConstantInt(op1)) {
linkSlowCase(iter); // double check
linkSlowCase(iter); // int32 check
}
- if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
+ if (isOperandConstantInt(op1) || !isOperandConstantInt(op2))
linkSlowCase(iter); // double check
}
}
@@ -155,155 +119,6 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
}
-// LeftShift (<<)
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
- emitStoreInt32(dst, regT0, dst == op1);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- lshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
-}
-
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
- slowPathCall.call();
-}
-
-// RightShift (>>) and UnsignedRightShift (>>>) helper
-
-void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- // Slow case of rshift makes assumptions about what registers hold the
- // shift arguments, so any changes must be updated there as well.
- if (isOperandConstantImmediateInt(op2)) {
- emitLoad(op1, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- int shift = getConstantOperand(op2).asInt32() & 0x1f;
- if (shift) {
- if (isUnsigned)
- urshift32(Imm32(shift), regT0);
- else
- rshift32(Imm32(shift), regT0);
- }
- emitStoreInt32(dst, regT0, dst == op1);
- } else {
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- if (!isOperandConstantImmediateInt(op1))
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- if (isUnsigned)
- urshift32(regT2, regT0);
- else
- rshift32(regT2, regT0);
- emitStoreInt32(dst, regT0, dst == op1);
- }
-}
-
-void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- if (isOperandConstantImmediateInt(op2)) {
- int shift = getConstantOperand(op2).asInt32() & 0x1f;
- // op1 = regT1:regT0
- linkSlowCase(iter); // int32 check
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
- emitLoadDouble(op1, fpRegT0);
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (shift) {
- if (isUnsigned)
- urshift32(Imm32(shift), regT0);
- else
- rshift32(Imm32(shift), regT0);
- }
- move(TrustedImm32(JSValue::Int32Tag), regT1);
- emitStoreInt32(dst, regT0, false);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- } else {
- // op1 = regT1:regT0
- // op2 = regT3:regT2
- if (!isOperandConstantImmediateInt(op1)) {
- linkSlowCase(iter); // int32 check -- op1 is not an int
- if (supportsFloatingPointTruncate()) {
- JumpList failures;
- failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
- emitLoadDouble(op1, fpRegT0);
- failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
- failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
- if (isUnsigned)
- urshift32(regT2, regT0);
- else
- rshift32(regT2, regT0);
- move(TrustedImm32(JSValue::Int32Tag), regT1);
- emitStoreInt32(dst, regT0, false);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
- failures.link(this);
- }
- }
-
- linkSlowCase(iter); // int32 check - op2 is not an int
- }
-
- JITSlowPathCall slowPathCall(this, currentInstruction, isUnsigned ? slow_path_urshift : slow_path_rshift);
- slowPathCall.call();
-}
-
-// RightShift (>>)
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- emitRightShift(currentInstruction, false);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitRightShiftSlowCase(currentInstruction, iter, false);
-}
-
-// UnsignedRightShift (>>>)
-
-void JIT::emit_op_urshift(Instruction* currentInstruction)
-{
- emitRightShift(currentInstruction, true);
-}
-
-void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- emitRightShiftSlowCase(currentInstruction, iter, true);
-}
-
void JIT::emit_op_unsigned(Instruction* currentInstruction)
{
int result = currentInstruction[1].u.operand;
@@ -325,120 +140,6 @@ void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseE
slowPathCall.call();
}
-// BitAnd (&)
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- int op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- and32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, dst == op);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- and32(regT2, regT0);
- emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
- slowPathCall.call();
-}
-
-// BitOr (|)
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- int op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- or32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, op == dst);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- or32(regT2, regT0);
- emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
- slowPathCall.call();
-}
-
-// BitXor (^)
-
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- int op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitLoad(op, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- xor32(Imm32(constant), regT0);
- emitStoreInt32(dst, regT0, op == dst);
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
- xor32(regT2, regT0);
- emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
-
- if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
- slowPathCall.call();
-}
-
void JIT::emit_op_inc(Instruction* currentInstruction)
{
int srcDst = currentInstruction[1].u.operand;
@@ -479,218 +180,6 @@ void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>
slowPathCall.call();
}
-// Addition (+)
-
-void JIT::emit_op_add(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- addSlowCase();
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
- slowPathCall.call();
- return;
- }
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- int op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchAdd32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT2);
- Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
- emitStoreInt32(dst, regT0, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- addDouble(fpRegT1, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
- linkDummySlowCase(iter);
- return;
- }
-
- int op;
- int32_t constant;
- if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter); // non-sse case
- else {
- ResultType opType = op == op1 ? types.first() : types.second();
- if (!opType.definitelyIsNumber())
- linkSlowCase(iter); // double check
- }
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
- slowPathCall.call();
-}
-
-// Subtraction (-)
-
-void JIT::emit_op_sub(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- if (isOperandConstantImmediateInt(op2)) {
- emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
- return;
- }
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- // Int32 case.
- addSlowCase(branchSub32(Overflow, regT2, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSub32Constant(int dst, int op, int32_t constant, ResultType opType)
-{
- // Int32 case.
- emitLoad(op, regT1, regT0);
- Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
- emitStoreInt32(dst, regT2, (op == dst));
-
- // Double case.
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32);
- return;
- }
- Jump end = jump();
-
- notInt32.link(this);
- if (!opType.definitelyIsNumber())
- addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
- move(Imm32(constant), regT2);
- convertInt32ToDouble(regT2, fpRegT0);
- emitLoadDouble(op, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
-
- end.link(this);
-}
-
-void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (isOperandConstantImmediateInt(op2)) {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
- linkSlowCase(iter); // int32 or double check
- } else {
- linkSlowCase(iter); // overflow check
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- } else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
- }
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_sub);
- slowPathCall.call();
-}
-
void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
{
JumpList end;
@@ -723,50 +212,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
// Do the math.
doTheMath.link(this);
switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op1, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op1, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op1, fpRegT1);
- subDouble(fpRegT0, fpRegT1);
- emitStoreDouble(dst, fpRegT1);
- break;
- case op_div: {
- emitLoadDouble(op1, fpRegT1);
- divDouble(fpRegT0, fpRegT1);
-
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- // FIXME: This will fail to convert to integer if the result is zero. We should
- // distinguish between positive zero and negative zero here.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
- // If we've got an integer, we might as well make that the result of the division.
- emitStoreInt32(dst, regT2);
- Jump isInteger = jump();
- notInteger.link(this);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
- emitStoreDouble(dst, fpRegT1);
- isInteger.link(this);
- break;
- }
case op_jless:
emitLoadDouble(op1, fpRegT2);
addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
@@ -824,49 +269,6 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
// Do the math.
switch (opcodeID) {
- case op_mul:
- emitLoadDouble(op2, fpRegT2);
- mulDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_add:
- emitLoadDouble(op2, fpRegT2);
- addDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_sub:
- emitLoadDouble(op2, fpRegT2);
- subDouble(fpRegT2, fpRegT0);
- emitStoreDouble(dst, fpRegT0);
- break;
- case op_div: {
- emitLoadDouble(op2, fpRegT2);
- divDouble(fpRegT2, fpRegT0);
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- // FIXME: This will fail to convert to integer if the result is zero. We should
- // distinguish between positive zero and negative zero here.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
- // If we've got an integer, we might as well make that the result of the division.
- emitStoreInt32(dst, regT2);
- Jump isInteger = jump();
- notInteger.link(this);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
- emitStoreDouble(dst, fpRegT0);
- isInteger.link(this);
- break;
- }
case op_jless:
emitLoadDouble(op2, fpRegT1);
addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
@@ -907,169 +309,13 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
end.link(this);
}
-// Multiplication (*)
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- // Int32 case.
- move(regT0, regT3);
- addSlowCase(branchMul32(Overflow, regT2, regT0));
- addSlowCase(branchTest32(Zero, regT0));
- emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
-
- if (!supportsFloatingPoint()) {
- addSlowCase(notInt32Op1);
- addSlowCase(notInt32Op2);
- return;
- }
- Jump end = jump();
-
- // Double case.
- emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- Jump overflow = getSlowCase(iter); // overflow check
- linkSlowCase(iter); // zero result check
-
- Jump negZero = branchOr32(Signed, regT2, regT3);
- emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
-
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
-
- negZero.link(this);
- // We only get here if we have a genuine negative zero. Record this,
- // so that the speculative JIT knows that we failed speculation
- // because of a negative zero.
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
- overflow.link(this);
-
- if (!supportsFloatingPoint()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // int32 check
- }
-
- if (supportsFloatingPoint()) {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
- slowPathCall.call();
-}
-
-// Division (/)
-
-void JIT::emit_op_div(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int op1 = currentInstruction[2].u.operand;
- int op2 = currentInstruction[3].u.operand;
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
-
- if (!supportsFloatingPoint()) {
- addSlowCase(jump());
- return;
- }
-
- // Int32 divide.
- JumpList notInt32Op1;
- JumpList notInt32Op2;
-
- JumpList end;
-
- emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
-
- notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
-
- convertInt32ToDouble(regT0, fpRegT0);
- convertInt32ToDouble(regT2, fpRegT1);
- divDouble(fpRegT1, fpRegT0);
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a specualtion
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- // FIXME: This will fail to convert to integer if the result is zero. We should
- // distinguish between positive zero and negative zero here.
-
- JumpList notInteger;
- branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
- // If we've got an integer, we might as well make that the result of the division.
- emitStoreInt32(dst, regT2);
- end.append(jump());
- notInteger.link(this);
- add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
- emitStoreDouble(dst, fpRegT0);
- end.append(jump());
-
- // Double divide.
- emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
- end.link(this);
-}
-
-void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
- if (!supportsFloatingPoint())
- linkSlowCase(iter);
- else {
- if (!types.first().definitelyIsNumber())
- linkSlowCase(iter); // double check
-
- if (!types.second().definitelyIsNumber()) {
- linkSlowCase(iter); // int32 check
- linkSlowCase(iter); // double check
- }
- }
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
- slowPathCall.call();
-}
-
// Mod (%)
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
void JIT::emit_op_mod(Instruction* currentInstruction)
{
-#if CPU(X86) || CPU(X86_64)
+#if CPU(X86)
int dst = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
@@ -1089,8 +335,8 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
denominatorNotNeg1.link(this);
- m_assembler.cdq();
- m_assembler.idivl_r(regT2);
+ x86ConvertToDoubleWord32();
+ x86Div32(regT2);
Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
addSlowCase(branchTest32(Zero, regT1));
numeratorPositive.link(this);
diff --git a/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp b/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp
new file mode 100644
index 000000000..715b503d2
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITBitAndGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITBitAndGenerator::generateFastPath(CCallHelpers& jit)
+{
+#if USE(JSVALUE64)
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#else
+ UNUSED_PARAM(m_scratchGPR);
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar & intConstant.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(var));
+
+ jit.moveValueRegs(var, m_result);
+ if (constOpr.asConstInt32() != static_cast<int32_t>(0xffffffff)) {
+#if USE(JSVALUE64)
+ jit.and64(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+ if (constOpr.asConstInt32() >= 0)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.and32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+#endif
+ }
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+
+ // Try to do intVar & intVar.
+#if USE(JSVALUE64)
+ jit.move(m_left.payloadGPR(), m_scratchGPR);
+ jit.and64(m_right.payloadGPR(), m_scratchGPR);
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_scratchGPR));
+ jit.move(m_scratchGPR, m_result.payloadGPR());
+#else
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+ jit.moveValueRegs(m_left, m_result);
+ jit.and32(m_right.payloadGPR(), m_result.payloadGPR());
+#endif
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitAndGenerator.h b/Source/JavaScriptCore/jit/JITBitAndGenerator.h
new file mode 100644
index 000000000..7536e86c5
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitAndGenerator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITBitAndGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITBitAndGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h b/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h
new file mode 100644
index 000000000..128ae18bf
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class JITBitBinaryOpGenerator {
+public:
+ JITBitBinaryOpGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_scratchGPR(scratchGPR)
+ {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ }
+
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+
+protected:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ GPRReg m_scratchGPR;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp b/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp
new file mode 100644
index 000000000..9f843c135
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITBitOrGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITBitOrGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar | intConstant.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(var));
+
+ jit.moveValueRegs(var, m_result);
+ if (constOpr.asConstInt32()) {
+#if USE(JSVALUE64)
+ jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+#endif
+ }
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+
+ // Try to do intVar | intVar.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ jit.moveValueRegs(m_left, m_result);
+#if USE(JSVALUE64)
+ jit.or64(m_right.payloadGPR(), m_result.payloadGPR());
+#else
+ jit.or32(m_right.payloadGPR(), m_result.payloadGPR());
+#endif
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitOrGenerator.h b/Source/JavaScriptCore/jit/JITBitOrGenerator.h
new file mode 100644
index 000000000..1f1d91a07
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitOrGenerator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITBitOrGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITBitOrGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg unused = InvalidGPRReg)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, unused)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp b/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp
new file mode 100644
index 000000000..8ccf1b5d6
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp
@@ -0,0 +1,73 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITBitXorGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITBitXorGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
+ JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
+ SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar ^ intConstant.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(var));
+
+ jit.moveValueRegs(var, m_result);
+#if USE(JSVALUE64)
+ jit.xor32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.xor32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
+#endif
+
+ } else {
+ ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
+
+ // Try to do intVar ^ intVar.
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ jit.moveValueRegs(m_left, m_result);
+#if USE(JSVALUE64)
+ jit.xor64(m_right.payloadGPR(), m_result.payloadGPR());
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#else
+ jit.xor32(m_right.payloadGPR(), m_result.payloadGPR());
+#endif
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitXorGenerator.h b/Source/JavaScriptCore/jit/JITBitXorGenerator.h
new file mode 100644
index 000000000..e5b5fb2e4
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITBitXorGenerator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITBitXorGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITBitXorGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg unused = InvalidGPRReg)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, unused)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index 90c2e4fb9..ff1cc6122 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,16 +29,17 @@
#if USE(JSVALUE64)
#include "JIT.h"
-#include "Arguments.h"
+#include "CallFrameShuffler.h"
#include "CodeBlock.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "Operations.h"
-#include "RepatchBuffer.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "ResultType.h"
-#include "SamplingTool.h"
+#include "SetupVarargsFrame.h"
+#include "StackAlignment.h"
#include "ThunkGenerators.h"
#include <wtf/StringPrintStream.h>
@@ -52,74 +53,57 @@ void JIT::emitPutCallResult(Instruction* instruction)
emitPutVirtualRegister(dst);
}
-void JIT::compileLoadVarargs(Instruction* instruction)
+void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info)
{
int thisValue = instruction[3].u.operand;
int arguments = instruction[4].u.operand;
int firstFreeRegister = instruction[5].u.operand;
-
- JumpList slowCase;
- JumpList end;
- bool canOptimize = m_codeBlock->usesArguments()
- && arguments == m_codeBlock->argumentsRegister().offset()
- && !m_codeBlock->symbolTable()->slowArguments();
-
- if (canOptimize) {
- emitGetVirtualRegister(arguments, regT0);
- slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));
-
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
- slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
- // regT0: argumentCountIncludingThis
-
- move(regT0, regT1);
- neg64(regT1);
- add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1);
- lshift64(TrustedImm32(3), regT1);
- addPtr(callFrameRegister, regT1);
- // regT1: newCallFrame
-
- slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1));
-
- // Initialize ArgumentCount.
- store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
-
- // Initialize 'this'.
- emitGetVirtualRegister(thisValue, regT2);
- store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
-
- // Copy arguments.
- signExtend32ToPtr(regT0, regT0);
- end.append(branchSub64(Zero, TrustedImm32(1), regT0));
- // regT0: argumentCount
-
- Label copyLoop = label();
- load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
- store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
- branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
-
- end.append(jump());
- }
-
- if (canOptimize)
- slowCase.link(this);
+ int firstVarArgOffset = instruction[6].u.operand;
emitGetVirtualRegister(arguments, regT1);
- callOperation(operationSizeAndAllocFrameForVarargs, regT1, firstFreeRegister);
- emitGetVirtualRegister(thisValue, regT1);
+ Z_JITOperation_EJZZ sizeOperation;
+ if (opcode == op_tail_call_forward_arguments)
+ sizeOperation = operationSizeFrameForForwardArguments;
+ else
+ sizeOperation = operationSizeFrameForVarargs;
+ callOperation(sizeOperation, regT1, -firstFreeRegister, firstVarArgOffset);
+ move(TrustedImm32(-firstFreeRegister), regT1);
+ emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
+ addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
emitGetVirtualRegister(arguments, regT2);
- callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2);
+ F_JITOperation_EFJZZ setupOperation;
+ if (opcode == op_tail_call_forward_arguments)
+ setupOperation = operationSetupForwardArgumentsFrame;
+ else
+ setupOperation = operationSetupVarargsFrame;
+ callOperation(setupOperation, regT1, regT2, firstVarArgOffset, regT0);
move(returnValueGPR, regT1);
- if (canOptimize)
- end.link(this);
+ // Profile the argument count.
+ load32(Address(regT1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
+ load32(info->addressOfMaxNumArguments(), regT0);
+ Jump notBiggest = branch32(Above, regT0, regT2);
+ store32(regT2, info->addressOfMaxNumArguments());
+ notBiggest.link(this);
+
+ // Initialize 'this'.
+ emitGetVirtualRegister(thisValue, regT0);
+ store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+
+ addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
}
void JIT::compileCallEval(Instruction* instruction)
{
- callOperationWithCallFrameRollbackOnException(operationCallEval);
+ addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
+ storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+
+ callOperation(operationCallEval, regT1);
+
addSlowCase(branch64(Equal, regT0, TrustedImm64(JSValue::encode(JSValue()))));
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
sampleCodeBlock(m_codeBlock);
@@ -128,10 +112,18 @@ void JIT::compileCallEval(Instruction* instruction)
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
+ info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
+
linkSlowCase(iter);
+ int registerOffset = -instruction[4].u.operand;
- emitGetFromCallFrameHeader64(JSStack::Callee, regT0);
- emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+
+ load64(Address(stackPointerRegister, sizeof(Register) * CallFrameSlot::callee - sizeof(CallerFrameAndPC)), regT0);
+ emitDumbVirtualCall(info);
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
@@ -147,17 +139,25 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
- Initializes ArgumentCount; CallerFrame; Callee.
For a JS call:
- - Caller initializes ScopeChain.
- Callee initializes ReturnPC; CodeBlock.
- Callee restores callFrameRegister before return.
For a non-JS call:
- - Caller initializes ScopeChain; ReturnPC; CodeBlock.
+ - Caller initializes ReturnPC; CodeBlock.
- Caller restores callFrameRegister after return.
*/
-
- if (opcodeID == op_call_varargs)
- compileLoadVarargs(instruction);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length);
+ COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_forward_arguments), call_and_tail_call_forward_arguments_opcodes_must_be_same_length);
+
+ CallLinkInfo* info = nullptr;
+ if (opcodeID != op_call_eval)
+ info = m_codeBlock->addCallLinkInfo();
+ if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
+ compileSetupVarargsFrame(opcodeID, instruction, info);
else {
int argCount = instruction[3].u.operand;
int registerOffset = -instruction[4].u.operand;
@@ -165,23 +165,21 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
if (opcodeID == op_call && shouldEmitProfiling()) {
emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
Jump done = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- storePtr(regT0, instruction[6].u.arrayProfile->addressOfLastSeenStructure());
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
- store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- } // regT1 holds newCallFrame with ArgumentCount initialized.
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+ store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+ } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin();
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset);
- store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
+ uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits();
+ store32(TrustedImm32(locationBits), Address(callFrameRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
- store64(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
- store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
- move(regT1, callFrameRegister);
+ emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
+ store64(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
if (opcodeID == op_call_eval) {
compileCallEval(instruction);
@@ -192,15 +190,45 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
addSlowCase(slowCase);
- ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
- m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
+ ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
+ info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
+ m_callCompilationInfo.append(CallCompilationInfo());
+ m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
+
+ if (opcodeID == op_tail_call) {
+ CallFrameShuffleData shuffleData;
+ shuffleData.numPassedArgs = instruction[3].u.operand;
+ shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
+ shuffleData.numLocals =
+ instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register);
+ shuffleData.args.resize(instruction[3].u.operand);
+ for (int i = 0; i < instruction[3].u.operand; ++i) {
+ shuffleData.args[i] =
+ ValueRecovery::displacedInJSStack(
+ virtualRegisterForArgument(i) - instruction[4].u.operand,
+ DataFormatJS);
+ }
+ shuffleData.callee =
+ ValueRecovery::inGPR(regT0, DataFormatJS);
+ shuffleData.setupCalleeSaveRegisters(m_codeBlock);
+ info->setFrameShuffleData(shuffleData);
+ CallFrameShuffler(*this, shuffleData).prepareForTailCall();
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+ return;
+ }
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+ if (opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
+ emitRestoreCalleeSaves();
+ prepareForTailCallSlow();
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+ return;
+ }
+
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
@@ -216,62 +244,34 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
linkSlowCase(iter);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructThunkGenerator).code() : m_vm->getCTIStub(linkCallThunkGenerator).code());
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
+ emitRestoreCalleeSaves();
+
+ move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
+
+ m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
+
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+ abortWithReason(JITDidReturnFromTailCall);
+ return;
+ }
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}
-void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
+void JIT::emit_op_call(Instruction* currentInstruction)
{
- JumpList slowCases;
-
- slowCases.append(branchTestPtr(NonZero, regT0, tagMaskRegister));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
-
- loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
- emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- Call call = nearCall();
- Jump done = jump();
-
- slowCases.link(this);
- move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
- restoreReturnAddressBeforeReturn(regT2);
- Jump slow = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
- patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code()));
-
- RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline closure call stub for %s, return point %p, target %p (%s)",
- toCString(*m_codeBlock).data(),
- callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
- codePtr.executableAddress(),
- toCString(pointerDump(calleeCodeBlock)).data())),
- *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
- callLinkInfo->codeOrigin));
-
- RepatchBuffer repatchBuffer(m_codeBlock);
-
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
- CodeLocationLabel(stubRoutine->code().code()));
- repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code());
-
- callLinkInfo->stub = stubRoutine.release();
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::emit_op_call(Instruction* currentInstruction)
+void JIT::emit_op_tail_call(Instruction* currentInstruction)
{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+ compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
}
void JIT::emit_op_call_eval(Instruction* currentInstruction)
@@ -284,6 +284,21 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
}
+void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_tail_call_forward_arguments(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call_forward_arguments, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
void JIT::emit_op_construct(Instruction* currentInstruction)
{
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
@@ -294,6 +309,11 @@ void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry
compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
+void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
@@ -304,6 +324,21 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
}
+void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_tail_call_forward_arguments(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call_forward_arguments, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index 6086038a2..573b0623b 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,16 +29,16 @@
#if USE(JSVALUE32_64)
#include "JIT.h"
-#include "Arguments.h"
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSFunction.h"
-#include "Operations.h"
-#include "RepatchBuffer.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
#include "ResultType.h"
-#include "SamplingTool.h"
+#include "SetupVarargsFrame.h"
+#include "StackAlignment.h"
#include <wtf/StringPrintStream.h>
@@ -56,43 +56,21 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
unsigned dst = currentInstruction[1].u.operand;
emitLoad(dst, regT1, regT0);
- emitGetReturnPCFromCallFrameHeaderPtr(regT2);
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
- restoreReturnAddressBeforeReturn(regT2);
+ checkStackPointerAlignment();
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
-void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- unsigned result = currentInstruction[1].u.operand;
- unsigned thisReg = currentInstruction[2].u.operand;
-
- emitLoad(result, regT1, regT0);
- Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump notObject = emitJumpIfNotObject(regT2);
-
- emitGetReturnPCFromCallFrameHeaderPtr(regT2);
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
-
- notJSCell.link(this);
- notObject.link(this);
- emitLoad(thisReg, regT1, regT0);
-
- emitGetReturnPCFromCallFrameHeaderPtr(regT2);
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
-
- restoreReturnAddressBeforeReturn(regT2);
- ret();
+ compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
+ compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -105,6 +83,21 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
}
+void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_tail_call_forward_arguments(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_tail_call_forward_arguments, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
+void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
+}
+
void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
@@ -115,6 +108,11 @@ void JIT::emit_op_call(Instruction* currentInstruction)
compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
}
+void JIT::emit_op_tail_call(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+}
+
void JIT::emit_op_call_eval(Instruction* currentInstruction)
{
compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
@@ -125,82 +123,77 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
}
+void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_tail_call_forward_arguments(Instruction* currentInstruction)
+{
+ compileOpCall(op_tail_call_forward_arguments, currentInstruction, m_callLinkInfoIndex++);
+}
+
+void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
+{
+ compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
+}
+
void JIT::emit_op_construct(Instruction* currentInstruction)
{
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::compileLoadVarargs(Instruction* instruction)
+void JIT::compileSetupVarargsFrame(OpcodeID opcode, Instruction* instruction, CallLinkInfo* info)
{
int thisValue = instruction[3].u.operand;
int arguments = instruction[4].u.operand;
int firstFreeRegister = instruction[5].u.operand;
-
- JumpList slowCase;
- JumpList end;
- bool canOptimize = m_codeBlock->usesArguments()
- && VirtualRegister(arguments) == m_codeBlock->argumentsRegister()
- && !m_codeBlock->symbolTable()->slowArguments();
-
- if (canOptimize) {
- emitLoadTag(arguments, regT1);
- slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
-
- load32(payloadFor(JSStack::ArgumentCount), regT2);
- slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
- // regT2: argumentCountIncludingThis
-
- move(regT2, regT3);
- neg32(regT3);
- add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3);
- lshift32(TrustedImm32(3), regT3);
- addPtr(callFrameRegister, regT3);
- // regT3: newCallFrame
-
- slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT3));
-
- // Initialize ArgumentCount.
- store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));
-
- // Initialize 'this'.
- emitLoad(thisValue, regT1, regT0);
- store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
-
- // Copy arguments.
- end.append(branchSub32(Zero, TrustedImm32(1), regT2));
- // regT2: argumentCount;
-
- Label copyLoop = label();
- load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
- load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
- store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);
-
- end.append(jump());
- }
-
- if (canOptimize)
- slowCase.link(this);
+ int firstVarArgOffset = instruction[6].u.operand;
emitLoad(arguments, regT1, regT0);
- callOperation(operationSizeAndAllocFrameForVarargs, regT1, regT0, firstFreeRegister);
- move(returnValueGPR, regT5);
- emitLoad(thisValue, regT1, regT0);
- emitLoad(arguments, regT3, regT2);
- callOperation(operationLoadVarargs, regT5, regT1, regT0, regT3, regT2);
- move(returnValueGPR, regT3);
-
- if (canOptimize)
- end.link(this);
+ Z_JITOperation_EJZZ sizeOperation;
+ if (opcode == op_tail_call_forward_arguments)
+ sizeOperation = operationSizeFrameForForwardArguments;
+ else
+ sizeOperation = operationSizeFrameForVarargs;
+ callOperation(sizeOperation, regT1, regT0, -firstFreeRegister, firstVarArgOffset);
+ move(TrustedImm32(-firstFreeRegister), regT1);
+ emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
+ addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister);
+ emitLoad(arguments, regT2, regT4);
+ F_JITOperation_EFJZZ setupOperation;
+ if (opcode == op_tail_call_forward_arguments)
+ setupOperation = operationSetupForwardArgumentsFrame;
+ else
+ setupOperation = operationSetupVarargsFrame;
+ callOperation(setupOperation, regT1, regT2, regT4, firstVarArgOffset, regT0);
+ move(returnValueGPR, regT1);
+
+ // Profile the argument count.
+ load32(Address(regT1, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
+ load32(info->addressOfMaxNumArguments(), regT0);
+ Jump notBiggest = branch32(Above, regT0, regT2);
+ store32(regT2, info->addressOfMaxNumArguments());
+ notBiggest.link(this);
+
+ // Initialize 'this'.
+ emitLoad(thisValue, regT2, regT0);
+ store32(regT0, Address(regT1, PayloadOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ store32(regT2, Address(regT1, TagOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+
+ addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
}
void JIT::compileCallEval(Instruction* instruction)
{
- callOperationWithCallFrameRollbackOnException(operationCallEval);
+ addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
+ storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+
+ callOperation(operationCallEval, regT1);
+
addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
sampleCodeBlock(m_codeBlock);
@@ -209,10 +202,23 @@ void JIT::compileCallEval(Instruction* instruction)
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
+ info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
+
linkSlowCase(iter);
- emitLoad(JSStack::Callee, regT1, regT0);
- emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
+ int registerOffset = -instruction[4].u.operand;
+
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+
+ move(TrustedImmPtr(info), regT2);
+
+ emitLoad(CallFrameSlot::callee, regT1, regT0);
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info);
+ info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true));
+ emitNakedCall(virtualThunk.code());
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
@@ -228,17 +234,18 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
- Initializes ArgumentCount; CallerFrame; Callee.
For a JS call:
- - Caller initializes ScopeChain.
- Callee initializes ReturnPC; CodeBlock.
- Callee restores callFrameRegister before return.
For a non-JS call:
- - Caller initializes ScopeChain; ReturnPC; CodeBlock.
+ - Caller initializes ReturnPC; CodeBlock.
- Caller restores callFrameRegister after return.
*/
-
- if (opcodeID == op_call_varargs)
- compileLoadVarargs(instruction);
+ CallLinkInfo* info = nullptr;
+ if (opcodeID != op_call_eval)
+ info = m_codeBlock->addCallLinkInfo();
+ if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments)
+ compileSetupVarargsFrame(opcodeID, instruction, info);
else {
int argCount = instruction[3].u.operand;
int registerOffset = -instruction[4].u.operand;
@@ -246,44 +253,55 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
if (opcodeID == op_call && shouldEmitProfiling()) {
emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT1, JSCell::structureOffset()), regT1);
- storePtr(regT1, instruction[6].u.arrayProfile->addressOfLastSeenStructure());
+ loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1);
+ storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);
+ addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
- store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3));
- } // regT3 holds newCallFrame with ArgumentCount initialized.
+ store32(TrustedImm32(argCount), Address(stackPointerRegister, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+ } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
- store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister));
+ uint32_t locationBits = CallSiteIndex(instruction).bits();
+ store32(TrustedImm32(locationBits), tagFor(CallFrameSlot::argumentCount, callFrameRegister));
emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
- storePtr(callFrameRegister, Address(regT3, CallFrame::callerFrameOffset()));
- emitStore(JSStack::Callee, regT1, regT0, regT3);
- move(regT3, callFrameRegister);
+ store32(regT0, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
+ store32(regT1, Address(stackPointerRegister, CallFrameSlot::callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC)));
if (opcodeID == op_call_eval) {
compileCallEval(instruction);
return;
}
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+ emitRestoreCalleeSaves();
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+
DataLabelPtr addressOfLinkedFunctionCheck;
Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
addSlowCase(slowCase);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
- m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
+ ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
+ info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
+ m_callCompilationInfo.append(CallCompilationInfo());
+ m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
- loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+ checkStackPointerAlignment();
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs || opcodeID == op_tail_call_forward_arguments) {
+ prepareForTailCallSlow();
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
+ return;
+ }
+
+ m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
@@ -299,56 +317,23 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
linkSlowCase(iter);
linkSlowCase(iter);
- m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructThunkGenerator).code() : m_vm->getCTIStub(linkCallThunkGenerator).code());
+ move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
- sampleCodeBlock(m_codeBlock);
- emitPutCallResult(instruction);
-}
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
+ emitRestoreCalleeSaves();
-void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
-{
- JumpList slowCases;
+ m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
- slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
- slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
-
- loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- Call call = nearCall();
- Jump done = jump();
-
- slowCases.link(this);
- move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
- restoreReturnAddressBeforeReturn(regT2);
- Jump slow = jump();
-
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
-
- patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
- patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code()));
-
- RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
- FINALIZE_CODE(
- patchBuffer,
- ("Baseline closure call stub for %s, return point %p, target %p (%s)",
- toCString(*m_codeBlock).data(),
- callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
- codePtr.executableAddress(),
- toCString(pointerDump(calleeCodeBlock)).data())),
- *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
- callLinkInfo->codeOrigin));
-
- RepatchBuffer repatchBuffer(m_codeBlock);
-
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
- CodeLocationLabel(stubRoutine->code().code()));
- repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code());
-
- callLinkInfo->stub = stubRoutine.release();
+ if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
+ abortWithReason(JITDidReturnFromTailCall);
+ return;
+ }
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+
+ sampleCodeBlock(m_codeBlock);
+ emitPutCallResult(instruction);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITCode.cpp b/Source/JavaScriptCore/jit/JITCode.cpp
index 213b7000c..0001d32f1 100644
--- a/Source/JavaScriptCore/jit/JITCode.cpp
+++ b/Source/JavaScriptCore/jit/JITCode.cpp
@@ -27,7 +27,8 @@
#include "JITCode.h"
#include "LLIntThunks.h"
-#include "Operations.h"
+#include "JSCInlines.h"
+#include "ProtoCallFrame.h"
#include <wtf/PrintStream.h>
namespace JSC {
@@ -41,12 +42,44 @@ JITCode::~JITCode()
{
}
-JSValue JITCode::execute(VM* vm, ProtoCallFrame* protoCallFrame, Register* topOfStack)
+const char* JITCode::typeName(JITType jitType)
{
- ASSERT(!vm->topCallFrame || ((Register*)(vm->topCallFrame) >= topOfStack));
+ switch (jitType) {
+ case None:
+ return "None";
+ case HostCallThunk:
+ return "Host";
+ case InterpreterThunk:
+ return "LLInt";
+ case BaselineJIT:
+ return "Baseline";
+ case DFGJIT:
+ return "DFG";
+ case FTLJIT:
+ return "FTL";
+ default:
+ CRASH();
+ return "";
+ }
+}
+
+void JITCode::validateReferences(const TrackedReferences&)
+{
+}
+
+JSValue JITCode::execute(VM* vm, ProtoCallFrame* protoCallFrame)
+{
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+ void* entryAddress;
+ JSFunction* function = jsDynamicCast<JSFunction*>(*vm, protoCallFrame->callee());
- JSValue result = JSValue::decode(callToJavaScript(executableAddress(), &vm->topCallFrame, protoCallFrame, topOfStack));
- return vm->exception() ? jsNull() : result;
+ if (!function || !protoCallFrame->needArityCheck()) {
+ ASSERT(!protoCallFrame->needArityCheck());
+ entryAddress = executableAddress();
+ } else
+ entryAddress = addressForCall(MustCheckArity).executableAddress();
+ JSValue result = JSValue::decode(vmEntryToJavaScript(entryAddress, vm, protoCallFrame));
+ return scope.exception() ? jsNull() : result;
}
DFG::CommonData* JITCode::dfgCommon()
@@ -73,52 +106,38 @@ FTL::ForOSREntryJITCode* JITCode::ftlForOSREntry()
return 0;
}
-PassRefPtr<JITCode> JITCode::hostFunction(JITCode::CodeRef code)
-{
- return adoptRef(new DirectJITCode(code, HostCallThunk));
-}
-
-DirectJITCode::DirectJITCode(JITType jitType)
+JITCodeWithCodeRef::JITCodeWithCodeRef(JITType jitType)
: JITCode(jitType)
{
}
-DirectJITCode::DirectJITCode(const JITCode::CodeRef ref, JITType jitType)
+JITCodeWithCodeRef::JITCodeWithCodeRef(CodeRef ref, JITType jitType)
: JITCode(jitType)
, m_ref(ref)
{
}
-DirectJITCode::~DirectJITCode()
-{
-}
-
-void DirectJITCode::initializeCodeRef(const JITCode::CodeRef ref)
+JITCodeWithCodeRef::~JITCodeWithCodeRef()
{
- RELEASE_ASSERT(!m_ref);
- m_ref = ref;
+ if ((Options::dumpDisassembly() || (isOptimizingJIT(jitType()) && Options::dumpDFGDisassembly()))
+ && m_ref.executableMemory())
+ dataLog("Destroying JIT code at ", pointerDump(m_ref.executableMemory()), "\n");
}
-JITCode::CodePtr DirectJITCode::addressForCall()
-{
- RELEASE_ASSERT(m_ref);
- return m_ref.code();
-}
-
-void* DirectJITCode::executableAddressAtOffset(size_t offset)
+void* JITCodeWithCodeRef::executableAddressAtOffset(size_t offset)
{
RELEASE_ASSERT(m_ref);
return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset;
}
-void* DirectJITCode::dataAddressAtOffset(size_t offset)
+void* JITCodeWithCodeRef::dataAddressAtOffset(size_t offset)
{
RELEASE_ASSERT(m_ref);
ASSERT(offset <= size()); // use <= instead of < because it is valid to ask for an address at the exclusive end of the code.
return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset;
}
-unsigned DirectJITCode::offsetOf(void* pointerIntoCode)
+unsigned JITCodeWithCodeRef::offsetOf(void* pointerIntoCode)
{
RELEASE_ASSERT(m_ref);
intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress());
@@ -126,47 +145,94 @@ unsigned DirectJITCode::offsetOf(void* pointerIntoCode)
return static_cast<unsigned>(result);
}
-size_t DirectJITCode::size()
+size_t JITCodeWithCodeRef::size()
{
RELEASE_ASSERT(m_ref);
return m_ref.size();
}
-bool DirectJITCode::contains(void* address)
+bool JITCodeWithCodeRef::contains(void* address)
{
RELEASE_ASSERT(m_ref);
return m_ref.executableMemory()->contains(address);
}
+DirectJITCode::DirectJITCode(JITType jitType)
+ : JITCodeWithCodeRef(jitType)
+{
+}
+
+DirectJITCode::DirectJITCode(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck, JITType jitType)
+ : JITCodeWithCodeRef(ref, jitType)
+ , m_withArityCheck(withArityCheck)
+{
+}
+
+DirectJITCode::~DirectJITCode()
+{
+}
+
+void DirectJITCode::initializeCodeRef(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck)
+{
+ RELEASE_ASSERT(!m_ref);
+ m_ref = ref;
+ m_withArityCheck = withArityCheck;
+}
+
+JITCode::CodePtr DirectJITCode::addressForCall(ArityCheckMode arity)
+{
+ switch (arity) {
+ case ArityCheckNotRequired:
+ RELEASE_ASSERT(m_ref);
+ return m_ref.code();
+ case MustCheckArity:
+ RELEASE_ASSERT(m_withArityCheck);
+ return m_withArityCheck;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return CodePtr();
+}
+
+NativeJITCode::NativeJITCode(JITType jitType)
+ : JITCodeWithCodeRef(jitType)
+{
+}
+
+NativeJITCode::NativeJITCode(CodeRef ref, JITType jitType)
+ : JITCodeWithCodeRef(ref, jitType)
+{
+}
+
+NativeJITCode::~NativeJITCode()
+{
+}
+
+void NativeJITCode::initializeCodeRef(CodeRef ref)
+{
+ ASSERT(!m_ref);
+ m_ref = ref;
+}
+
+JITCode::CodePtr NativeJITCode::addressForCall(ArityCheckMode)
+{
+ RELEASE_ASSERT(!!m_ref);
+ return m_ref.code();
+}
+
+#if ENABLE(JIT)
+RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex)
+{
+ return RegisterSet();
+}
+#endif
+
} // namespace JSC
namespace WTF {
void printInternal(PrintStream& out, JSC::JITCode::JITType type)
{
- switch (type) {
- case JSC::JITCode::None:
- out.print("None");
- return;
- case JSC::JITCode::HostCallThunk:
- out.print("Host");
- return;
- case JSC::JITCode::InterpreterThunk:
- out.print("LLInt");
- return;
- case JSC::JITCode::BaselineJIT:
- out.print("Baseline");
- return;
- case JSC::JITCode::DFGJIT:
- out.print("DFG");
- return;
- case JSC::JITCode::FTLJIT:
- out.print("FTL");
- return;
- default:
- CRASH();
- return;
- }
+ out.print(JSC::JITCode::typeName(type));
}
} // namespace WTF
diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h
index 52c78111a..75c70c7f7 100644
--- a/Source/JavaScriptCore/jit/JITCode.h
+++ b/Source/JavaScriptCore/jit/JITCode.h
@@ -23,17 +23,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITCode_h
-#define JITCode_h
+#pragma once
-#if ENABLE(JIT) || ENABLE(LLINT)
+#include "ArityCheckMode.h"
#include "CallFrame.h"
+#include "CodeOrigin.h"
#include "Disassembler.h"
-#include "JITStubs.h"
#include "JSCJSValue.h"
-#include "LegacyProfiler.h"
#include "MacroAssemblerCodeRef.h"
-#endif
+#include "RegisterSet.h"
+#include <wtf/Optional.h>
namespace JSC {
@@ -47,6 +46,7 @@ class JITCode;
}
struct ProtoCallFrame;
+class TrackedReferences;
class VM;
class JITCode : public ThreadSafeRefCounted<JITCode> {
@@ -54,8 +54,17 @@ public:
typedef MacroAssemblerCodeRef CodeRef;
typedef MacroAssemblerCodePtr CodePtr;
- enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT, FTLJIT };
+ enum JITType : uint8_t {
+ None,
+ HostCallThunk,
+ InterpreterThunk,
+ BaselineJIT,
+ DFGJIT,
+ FTLJIT
+ };
+ static const char* typeName(JITType);
+
static JITType bottomTierJIT()
{
return BaselineJIT;
@@ -112,7 +121,7 @@ public:
return false;
}
}
-
+
static bool isLowerTier(JITType expectedLower, JITType expectedHigher)
{
RELEASE_ASSERT(isExecutableScript(expectedLower));
@@ -164,7 +173,7 @@ public:
return jitCode->jitType();
}
- virtual CodePtr addressForCall() = 0;
+ virtual CodePtr addressForCall(ArityCheckMode) = 0;
virtual void* executableAddressAtOffset(size_t offset) = 0;
void* executableAddress() { return executableAddressAtOffset(0); }
virtual void* dataAddressAtOffset(size_t offset) = 0;
@@ -175,7 +184,9 @@ public:
virtual FTL::JITCode* ftl();
virtual FTL::ForOSREntryJITCode* ftlForOSREntry();
- JSValue execute(VM*, ProtoCallFrame*, Register*);
+ virtual void validateReferences(const TrackedReferences&);
+
+ JSValue execute(VM*, ProtoCallFrame*);
void* start() { return dataAddressAtOffset(0); }
virtual size_t size() = 0;
@@ -183,29 +194,56 @@ public:
virtual bool contains(void*) = 0;
- static PassRefPtr<JITCode> hostFunction(CodeRef);
+#if ENABLE(JIT)
+ virtual RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex);
+ virtual std::optional<CodeOrigin> findPC(CodeBlock*, void* pc) { UNUSED_PARAM(pc); return std::nullopt; }
+#endif
private:
JITType m_jitType;
};
-class DirectJITCode : public JITCode {
+class JITCodeWithCodeRef : public JITCode {
+protected:
+ JITCodeWithCodeRef(JITType);
+ JITCodeWithCodeRef(CodeRef, JITType);
+
+public:
+ virtual ~JITCodeWithCodeRef();
+
+ void* executableAddressAtOffset(size_t offset) override;
+ void* dataAddressAtOffset(size_t offset) override;
+ unsigned offsetOf(void* pointerIntoCode) override;
+ size_t size() override;
+ bool contains(void*) override;
+
+protected:
+ CodeRef m_ref;
+};
+
+class DirectJITCode : public JITCodeWithCodeRef {
public:
DirectJITCode(JITType);
- DirectJITCode(const CodeRef, JITType);
+ DirectJITCode(CodeRef, CodePtr withArityCheck, JITType);
virtual ~DirectJITCode();
- void initializeCodeRef(CodeRef ref);
+ void initializeCodeRef(CodeRef, CodePtr withArityCheck);
- virtual CodePtr addressForCall() override;
- virtual void* executableAddressAtOffset(size_t offset) override;
- virtual void* dataAddressAtOffset(size_t offset) override;
- virtual unsigned offsetOf(void* pointerIntoCode) override;
- virtual size_t size() override;
- virtual bool contains(void*) override;
+ CodePtr addressForCall(ArityCheckMode) override;
private:
- CodeRef m_ref;
+ CodePtr m_withArityCheck;
+};
+
+class NativeJITCode : public JITCodeWithCodeRef {
+public:
+ NativeJITCode(JITType);
+ NativeJITCode(CodeRef, JITType);
+ virtual ~NativeJITCode();
+
+ void initializeCodeRef(CodeRef);
+
+ CodePtr addressForCall(ArityCheckMode) override;
};
} // namespace JSC
@@ -216,5 +254,3 @@ class PrintStream;
void printInternal(PrintStream&, JSC::JITCode::JITType);
} // namespace WTF
-
-#endif
diff --git a/Source/JavaScriptCore/jit/JITCompilationEffort.h b/Source/JavaScriptCore/jit/JITCompilationEffort.h
index 5eb680178..18d53d54b 100644
--- a/Source/JavaScriptCore/jit/JITCompilationEffort.h
+++ b/Source/JavaScriptCore/jit/JITCompilationEffort.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITCompilationEffort_h
-#define JITCompilationEffort_h
+#pragma once
namespace JSC {
@@ -34,6 +33,3 @@ enum JITCompilationEffort {
};
} // namespace JSC
-
-#endif // JITCompilationEffort_h
-
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.cpp b/Source/JavaScriptCore/jit/JITDisassembler.cpp
index 2d91a6466..7aa21d06f 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.cpp
+++ b/Source/JavaScriptCore/jit/JITDisassembler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,11 +26,14 @@
#include "config.h"
#include "JITDisassembler.h"
-#if ENABLE(JIT) && ENABLE(DISASSEMBLER)
+#if ENABLE(JIT)
#include "CodeBlock.h"
#include "CodeBlockWithJITType.h"
#include "JIT.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+#include "ProfilerCompilation.h"
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -164,5 +167,5 @@ void JITDisassembler::dumpDisassembly(PrintStream& out, LinkBuffer& linkBuffer,
} // namespace JSC
-#endif // ENABLE(JIT) && ENABLE(DISASSEMBLER)
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.h b/Source/JavaScriptCore/jit/JITDisassembler.h
index 7ea13f47d..5e3228601 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.h
+++ b/Source/JavaScriptCore/jit/JITDisassembler.h
@@ -23,23 +23,22 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITDisassembler_h
-#define JITDisassembler_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
-#include "LinkBuffer.h"
#include "MacroAssembler.h"
-#include "ProfilerDatabase.h"
#include <wtf/Vector.h>
+#include <wtf/text/CString.h>
namespace JSC {
class CodeBlock;
+class LinkBuffer;
-#if ENABLE(DISASSEMBLER)
+namespace Profiler {
+class Compilation;
+}
class JITDisassembler {
WTF_MAKE_FAST_ALLOCATED;
@@ -86,28 +85,6 @@ private:
MacroAssembler::Label m_endOfCode;
};
-#else // ENABLE(DISASSEMBLER)
-
-class JITDisassembler {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- JITDisassembler(CodeBlock*) { }
-
- void setStartOfCode(MacroAssembler::Label) { }
- void setForBytecodeMainPath(unsigned, MacroAssembler::Label) { }
- void setForBytecodeSlowPath(unsigned, MacroAssembler::Label) { }
- void setEndOfSlowPath(MacroAssembler::Label) { }
- void setEndOfCode(MacroAssembler::Label) { }
-
- void dump(LinkBuffer&) { }
- void reportToProfiler(Profiler::Compilation*, LinkBuffer&) { }
-};
-
-#endif // ENABLE(DISASSEMBLER)
-
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JITDisassembler_h
-
diff --git a/Source/JavaScriptCore/jit/JITDivGenerator.cpp b/Source/JavaScriptCore/jit/JITDivGenerator.cpp
new file mode 100644
index 000000000..a5db7e1b2
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITDivGenerator.cpp
@@ -0,0 +1,139 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITDivGenerator.h"
+
+#if ENABLE(JIT)
+
+#include "ArithProfile.h"
+#include "JSCJSValueInlines.h"
+#include "MathCommon.h"
+
+namespace JSC {
+
+void JITDivGenerator::loadOperand(CCallHelpers& jit, SnippetOperand& opr, JSValueRegs oprRegs, FPRReg destFPR)
+{
+ if (opr.isConstInt32()) {
+ // FIXME: this does not looks right.
+ // -On x86_64, CVTSI2SD has partial register stall on its FPR.
+ // A move or load might be a tiny bit larger but safer.
+ // -On ARM64 we also have FMOV that can load small immediates.
+ jit.move(CCallHelpers::Imm32(opr.asConstInt32()), m_scratchGPR);
+ jit.convertInt32ToDouble(m_scratchGPR, destFPR);
+#if USE(JSVALUE64)
+ } else if (opr.isConstDouble()) {
+ jit.move(CCallHelpers::Imm64(opr.asRawBits()), m_scratchGPR);
+ jit.move64ToDouble(m_scratchGPR, destFPR);
+#endif
+ } else {
+ if (!opr.definitelyIsNumber())
+ m_slowPathJumpList.append(jit.branchIfNotNumber(oprRegs, m_scratchGPR));
+ CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(oprRegs);
+ jit.convertInt32ToDouble(oprRegs.payloadGPR(), destFPR);
+ CCallHelpers::Jump oprIsLoaded = jit.jump();
+ notInt32.link(&jit);
+ jit.unboxDoubleNonDestructive(oprRegs, destFPR, m_scratchGPR, m_scratchFPR);
+ oprIsLoaded.link(&jit);
+ }
+}
+
+void JITDivGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_didEmitFastPath);
+ if (!jit.supportsFloatingPoint())
+ return;
+ if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber())
+ return;
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ m_didEmitFastPath = true;
+ loadOperand(jit, m_leftOperand, m_left, m_leftFPR);
+
+#if USE(JSVALUE64)
+ std::optional<double> safeReciprocal;
+ if (m_rightOperand.isConst()) {
+ double constant = m_rightOperand.asConstNumber();
+ safeReciprocal = safeReciprocalForDivByConst(constant);
+ }
+
+ if (safeReciprocal) {
+ jit.move(CCallHelpers::Imm64(bitwise_cast<int64_t>(*safeReciprocal)), m_scratchGPR);
+ jit.move64ToDouble(m_scratchGPR, m_rightFPR);
+
+ jit.mulDouble(m_rightFPR, m_leftFPR);
+ } else
+#endif
+ {
+ loadOperand(jit, m_rightOperand, m_right, m_rightFPR);
+
+ jit.divDouble(m_rightFPR, m_leftFPR);
+ }
+
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we set a bit. If this together with the slow case counter are below
+ // threshold then the DFG JIT will compile this division with a speculation that the
+ // remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ CCallHelpers::JumpList notInt32;
+ jit.branchConvertDoubleToInt32(m_leftFPR, m_scratchGPR, notInt32, m_scratchFPR);
+
+ // If we've got an integer, we might as well make that the result of the division.
+ jit.boxInt32(m_scratchGPR, m_result);
+ m_endJumpList.append(jit.jump());
+
+ notInt32.link(&jit);
+#if USE(JSVALUE64)
+ jit.moveDoubleTo64(m_leftFPR, m_scratchGPR);
+ CCallHelpers::Jump notDoubleZero = jit.branchTest64(CCallHelpers::NonZero, m_scratchGPR);
+
+ jit.move(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+ m_endJumpList.append(jit.jump());
+
+ notDoubleZero.link(&jit);
+#endif
+ if (m_arithProfile)
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::specialFastPathBit), CCallHelpers::AbsoluteAddress(m_arithProfile->addressOfBits()));
+ jit.boxDouble(m_leftFPR, m_result);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITDivGenerator.h b/Source/JavaScriptCore/jit/JITDivGenerator.h
new file mode 100644
index 000000000..7a03cc7b7
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITDivGenerator.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class JITDivGenerator {
+public:
+ JITDivGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR,
+ ArithProfile* arithProfile = nullptr)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ , m_arithProfile(arithProfile)
+ {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ }
+
+ void generateFastPath(CCallHelpers&);
+
+ bool didEmitFastPath() const { return m_didEmitFastPath; }
+ CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
+ CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+
+private:
+ void loadOperand(CCallHelpers&, SnippetOperand&, JSValueRegs opRegs, FPRReg destFPR);
+
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+ ArithProfile* m_arithProfile;
+ bool m_didEmitFastPath { false };
+
+ CCallHelpers::JumpList m_endJumpList;
+ CCallHelpers::JumpList m_slowPathJumpList;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index 8084f773b..501d48b1b 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012-2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,42 +27,73 @@
#include "JITExceptions.h"
#include "CallFrame.h"
-#include "CallFrameInlines.h"
#include "CodeBlock.h"
#include "Interpreter.h"
-#include "JITStubs.h"
+#include "JSCInlines.h"
#include "JSCJSValue.h"
#include "LLIntData.h"
#include "LLIntOpcode.h"
#include "LLIntThunks.h"
#include "Opcode.h"
-#include "Operations.h"
+#include "ShadowChicken.h"
#include "VM.h"
namespace JSC {
-void genericUnwind(VM* vm, ExecState* callFrame, JSValue exceptionValue)
+void genericUnwind(VM* vm, ExecState* callFrame, UnwindStart unwindStart)
{
- RELEASE_ASSERT(exceptionValue);
- HandlerInfo* handler = vm->interpreter->unwind(callFrame, exceptionValue); // This may update callFrame.
+ auto scope = DECLARE_CATCH_SCOPE(*vm);
+ if (Options::breakOnThrow()) {
+ CodeBlock* codeBlock = callFrame->codeBlock();
+ if (codeBlock)
+ dataLog("In call frame ", RawPointer(callFrame), " for code block ", *codeBlock, "\n");
+ else
+ dataLog("In call frame ", RawPointer(callFrame), " with null CodeBlock\n");
+ CRASH();
+ }
+
+ ExecState* shadowChickenTopFrame = callFrame;
+ if (unwindStart == UnwindFromCallerFrame) {
+ VMEntryFrame* topVMEntryFrame = vm->topVMEntryFrame;
+ shadowChickenTopFrame = callFrame->callerFrame(topVMEntryFrame);
+ }
+ vm->shadowChicken().log(*vm, shadowChickenTopFrame, ShadowChicken::Packet::throwPacket());
+
+ Exception* exception = scope.exception();
+ RELEASE_ASSERT(exception);
+ HandlerInfo* handler = vm->interpreter->unwind(*vm, callFrame, exception, unwindStart); // This may update callFrame.
void* catchRoutine;
Instruction* catchPCForInterpreter = 0;
if (handler) {
- catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
+ // handler->target is meaningless for getting a code offset when catching
+ // the exception in a DFG/FTL frame. This bytecode target offset could be
+ // something that's in an inlined frame, which means an array access
+ // with this bytecode offset in the machine frame is utterly meaningless
+ // and can cause an overflow. OSR exit properly exits to handler->target
+ // in the proper frame.
+ if (!JITCode::isOptimizingJIT(callFrame->codeBlock()->jitType()))
+ catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
#if ENABLE(JIT)
catchRoutine = handler->nativeCode.executableAddress();
#else
catchRoutine = catchPCForInterpreter->u.pointer;
#endif
} else
- catchRoutine = LLInt::getCodePtr(returnFromJavaScript);
+ catchRoutine = LLInt::getCodePtr(handleUncaughtException);
- vm->callFrameForThrow = callFrame;
+ ASSERT(bitwise_cast<uintptr_t>(callFrame) < bitwise_cast<uintptr_t>(vm->topVMEntryFrame));
+
+ vm->callFrameForCatch = callFrame;
vm->targetMachinePCForThrow = catchRoutine;
vm->targetInterpreterPCForThrow = catchPCForInterpreter;
RELEASE_ASSERT(catchRoutine);
}
+void genericUnwind(VM* vm, ExecState* callFrame)
+{
+ genericUnwind(vm, callFrame, UnwindFromCurrentFrame);
+}
+
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITExceptions.h b/Source/JavaScriptCore/jit/JITExceptions.h
index 376e269f1..8549d949f 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.h
+++ b/Source/JavaScriptCore/jit/JITExceptions.h
@@ -23,23 +23,16 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITExceptions_h
-#define JITExceptions_h
-
-#include "JSCJSValue.h"
-
-#if ENABLE(JIT) || ENABLE(LLINT)
+#pragma once
namespace JSC {
+enum UnwindStart : uint8_t;
+
class ExecState;
class VM;
-void genericUnwind(VM*, ExecState*, JSValue exceptionValue);
+void genericUnwind(VM*, ExecState*, UnwindStart);
+void genericUnwind(VM*, ExecState*);
} // namespace JSC
-
-#endif // ENABLE(JIT) || ENABLE(LLINT)
-
-#endif // JITExceptions_h
-
diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
index 74b086a7c..04179394a 100644
--- a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
+++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,70 +29,60 @@
#if ENABLE(JIT)
#include "CodeBlock.h"
+#include "InlineAccess.h"
+#include "JSCInlines.h"
#include "LinkBuffer.h"
-#include "Operations.h"
+#include "StructureStubInfo.h"
namespace JSC {
static StructureStubInfo* garbageStubInfo()
{
- static StructureStubInfo* stubInfo = new StructureStubInfo();
+ static StructureStubInfo* stubInfo = new StructureStubInfo(AccessType::Get);
return stubInfo;
}
-JITInlineCacheGenerator::JITInlineCacheGenerator(CodeBlock* codeBlock, CodeOrigin codeOrigin)
+JITInlineCacheGenerator::JITInlineCacheGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType)
: m_codeBlock(codeBlock)
{
- m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo() : garbageStubInfo();
+ m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo(accessType) : garbageStubInfo();
m_stubInfo->codeOrigin = codeOrigin;
+ m_stubInfo->callSiteIndex = callSite;
}
JITByIdGenerator::JITByIdGenerator(
- CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
- GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value, bool registersFlushed)
- : JITInlineCacheGenerator(codeBlock, codeOrigin)
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
+ const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value)
+ : JITInlineCacheGenerator(codeBlock, codeOrigin, callSite, accessType)
, m_base(base)
, m_value(value)
{
- m_stubInfo->patch.registersFlushed = registersFlushed;
m_stubInfo->patch.usedRegisters = usedRegisters;
- // This is a convenience - in cases where the only registers you're using are base/value,
- // it allows you to pass RegisterSet() as the usedRegisters argument.
- m_stubInfo->patch.usedRegisters.set(base);
- m_stubInfo->patch.usedRegisters.set(value);
-
- m_stubInfo->patch.callFrameRegister = static_cast<int8_t>(callFrameRegister);
m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
#if USE(JSVALUE32_64)
+ m_stubInfo->patch.baseTagGPR = static_cast<int8_t>(base.tagGPR());
m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
#endif
}
void JITByIdGenerator::finalize(LinkBuffer& fastPath, LinkBuffer& slowPath)
{
- CodeLocationCall callReturnLocation = slowPath.locationOf(m_call);
- m_stubInfo->callReturnLocation = callReturnLocation;
- m_stubInfo->patch.deltaCheckImmToCall = MacroAssembler::differenceBetweenCodePtr(
- fastPath.locationOf(m_structureImm), callReturnLocation);
- m_stubInfo->patch.deltaCallToJump = MacroAssembler::differenceBetweenCodePtr(
- callReturnLocation, fastPath.locationOf(m_structureCheck));
-#if USE(JSVALUE64)
- m_stubInfo->patch.deltaCallToLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
- callReturnLocation, fastPath.locationOf(m_loadOrStore));
-#else
- m_stubInfo->patch.deltaCallToTagLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
- callReturnLocation, fastPath.locationOf(m_tagLoadOrStore));
- m_stubInfo->patch.deltaCallToPayloadLoadOrStore = MacroAssembler::differenceBetweenCodePtr(
- callReturnLocation, fastPath.locationOf(m_loadOrStore));
-#endif
- m_stubInfo->patch.deltaCallToSlowCase = MacroAssembler::differenceBetweenCodePtr(
- callReturnLocation, slowPath.locationOf(m_slowPathBegin));
- m_stubInfo->patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
- callReturnLocation, fastPath.locationOf(m_done));
- m_stubInfo->patch.deltaCallToStorageLoad = MacroAssembler::differenceBetweenCodePtr(
- callReturnLocation, fastPath.locationOf(m_propertyStorageLoad));
+ ASSERT(m_start.isSet());
+ CodeLocationLabel start = fastPath.locationOf(m_start);
+ m_stubInfo->patch.start = start;
+
+ int32_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
+ start, fastPath.locationOf(m_done));
+ ASSERT(inlineSize > 0);
+ m_stubInfo->patch.inlineSize = inlineSize;
+
+ m_stubInfo->patch.deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
+ start, slowPath.locationOf(m_slowPathCall));
+ m_stubInfo->patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
+ start, slowPath.locationOf(m_slowPathBegin));
}
void JITByIdGenerator::finalize(LinkBuffer& linkBuffer)
@@ -100,42 +90,38 @@ void JITByIdGenerator::finalize(LinkBuffer& linkBuffer)
finalize(linkBuffer, linkBuffer);
}
-void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit, GPRReg butterfly)
+void JITByIdGenerator::generateFastCommon(MacroAssembler& jit, size_t inlineICSize)
{
- m_structureCheck = jit.patchableBranchPtrWithPatch(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureOffset()),
- m_structureImm, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
-
- m_propertyStorageLoad = jit.convertibleLoadPtr(
- MacroAssembler::Address(m_base.payloadGPR(), JSObject::butterflyOffset()), butterfly);
+ m_start = jit.label();
+ size_t startSize = jit.m_assembler.buffer().codeSize();
+ m_slowPathJump = jit.jump();
+ size_t jumpSize = jit.m_assembler.buffer().codeSize() - startSize;
+ size_t nopsToEmitInBytes = inlineICSize - jumpSize;
+ jit.emitNops(nopsToEmitInBytes);
+ ASSERT(jit.m_assembler.buffer().codeSize() - startSize == inlineICSize);
+ m_done = jit.label();
+}
+
+JITGetByIdGenerator::JITGetByIdGenerator(
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
+ UniquedStringImpl* propertyName, JSValueRegs base, JSValueRegs value, AccessType accessType)
+ : JITByIdGenerator(codeBlock, codeOrigin, callSite, accessType, usedRegisters, base, value)
+ , m_isLengthAccess(propertyName == codeBlock->vm()->propertyNames->length.impl())
+{
+ RELEASE_ASSERT(base.payloadGPR() != value.tagGPR());
}
void JITGetByIdGenerator::generateFastPath(MacroAssembler& jit)
{
- generateFastPathChecks(jit, m_value.payloadGPR());
-
-#if USE(JSVALUE64)
- m_loadOrStore = jit.load64WithCompactAddressOffsetPatch(
- MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
-#else
- m_tagLoadOrStore = jit.load32WithCompactAddressOffsetPatch(
- MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.tagGPR()).label();
- m_loadOrStore = jit.load32WithCompactAddressOffsetPatch(
- MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
-#endif
-
- m_done = jit.label();
+ generateFastCommon(jit, m_isLengthAccess ? InlineAccess::sizeForLengthAccess() : InlineAccess::sizeForPropertyAccess());
}
JITPutByIdGenerator::JITPutByIdGenerator(
- CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
- GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value, GPRReg scratch,
- bool registersFlushed, ECMAMode ecmaMode, PutKind putKind)
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
+ JSValueRegs base, JSValueRegs value, GPRReg scratch,
+ ECMAMode ecmaMode, PutKind putKind)
: JITByIdGenerator(
- codeBlock, codeOrigin, usedRegisters, callFrameRegister, base, value,
- registersFlushed)
- , m_scratch(scratch)
+ codeBlock, codeOrigin, callSite, AccessType::Put, usedRegisters, base, value)
, m_ecmaMode(ecmaMode)
, m_putKind(putKind)
{
@@ -144,19 +130,7 @@ JITPutByIdGenerator::JITPutByIdGenerator(
void JITPutByIdGenerator::generateFastPath(MacroAssembler& jit)
{
- generateFastPathChecks(jit, m_scratch);
-
-#if USE(JSVALUE64)
- m_loadOrStore = jit.store64WithAddressOffsetPatch(
- m_value.payloadGPR(), MacroAssembler::Address(m_scratch, 0)).label();
-#else
- m_tagLoadOrStore = jit.store32WithAddressOffsetPatch(
- m_value.tagGPR(), MacroAssembler::Address(m_scratch, 0)).label();
- m_loadOrStore = jit.store32WithAddressOffsetPatch(
- m_value.payloadGPR(), MacroAssembler::Address(m_scratch, 0)).label();
-#endif
-
- m_done = jit.label();
+ generateFastCommon(jit, InlineAccess::sizeForPropertyReplace());
}
V_JITOperation_ESsiJJI JITPutByIdGenerator::slowPathFunction()
diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
index 6ff0c09b5..469290db5 100644
--- a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
+++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITInlineCacheGenerator_h
-#define JITInlineCacheGenerator_h
+#pragma once
#if ENABLE(JIT)
@@ -37,11 +36,16 @@
namespace JSC {
class CodeBlock;
+class StructureStubInfo;
+
+struct CallSiteIndex;
+
+enum class AccessType : int8_t;
class JITInlineCacheGenerator {
protected:
JITInlineCacheGenerator() { }
- JITInlineCacheGenerator(CodeBlock*, CodeOrigin);
+ JITInlineCacheGenerator(CodeBlock*, CodeOrigin, CallSiteIndex, AccessType);
public:
StructureStubInfo* stubInfo() const { return m_stubInfo; }
@@ -56,38 +60,37 @@ protected:
JITByIdGenerator() { }
JITByIdGenerator(
- CodeBlock*, CodeOrigin, const RegisterSet&, GPRReg callFrameRegister,
- JSValueRegs base, JSValueRegs value, bool registersFlushed);
+ CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet&, JSValueRegs base,
+ JSValueRegs value);
public:
void reportSlowPathCall(MacroAssembler::Label slowPathBegin, MacroAssembler::Call call)
{
m_slowPathBegin = slowPathBegin;
- m_call = call;
+ m_slowPathCall = call;
}
MacroAssembler::Label slowPathBegin() const { return m_slowPathBegin; }
- MacroAssembler::Jump slowPathJump() const { return m_structureCheck.m_jump; }
+ MacroAssembler::Jump slowPathJump() const
+ {
+ ASSERT(m_slowPathJump.isSet());
+ return m_slowPathJump;
+ }
void finalize(LinkBuffer& fastPathLinkBuffer, LinkBuffer& slowPathLinkBuffer);
void finalize(LinkBuffer&);
protected:
- void generateFastPathChecks(MacroAssembler&, GPRReg butterfly);
+ void generateFastCommon(MacroAssembler&, size_t size);
JSValueRegs m_base;
JSValueRegs m_value;
- MacroAssembler::DataLabelPtr m_structureImm;
- MacroAssembler::PatchableJump m_structureCheck;
- MacroAssembler::ConvertibleLoadLabel m_propertyStorageLoad;
- AssemblerLabel m_loadOrStore;
-#if USE(JSVALUE32_64)
- AssemblerLabel m_tagLoadOrStore;
-#endif
+ MacroAssembler::Label m_start;
MacroAssembler::Label m_done;
MacroAssembler::Label m_slowPathBegin;
- MacroAssembler::Call m_call;
+ MacroAssembler::Call m_slowPathCall;
+ MacroAssembler::Jump m_slowPathJump;
};
class JITGetByIdGenerator : public JITByIdGenerator {
@@ -95,16 +98,13 @@ public:
JITGetByIdGenerator() { }
JITGetByIdGenerator(
- CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
- GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value,
- bool registersFlushed)
- : JITByIdGenerator(
- codeBlock, codeOrigin, usedRegisters, callFrameRegister, base, value,
- registersFlushed)
- {
- }
+ CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, UniquedStringImpl* propertyName,
+ JSValueRegs base, JSValueRegs value, AccessType);
void generateFastPath(MacroAssembler&);
+
+private:
+ bool m_isLengthAccess;
};
class JITPutByIdGenerator : public JITByIdGenerator {
@@ -112,16 +112,14 @@ public:
JITPutByIdGenerator() { }
JITPutByIdGenerator(
- CodeBlock*, CodeOrigin, const RegisterSet& usedRegisters, GPRReg callFrameRegister,
- JSValueRegs base, JSValueRegs value, GPRReg scratch, bool registersFlushed,
- ECMAMode, PutKind);
+ CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base,
+ JSValueRegs, GPRReg scratch, ECMAMode, PutKind);
void generateFastPath(MacroAssembler&);
V_JITOperation_ESsiJJI slowPathFunction();
private:
- GPRReg m_scratch;
ECMAMode m_ecmaMode;
PutKind m_putKind;
};
@@ -129,6 +127,3 @@ private:
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JITInlineCacheGenerator_h
-
diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h
index 9330e773e..5d2d6882e 100644
--- a/Source/JavaScriptCore/jit/JITInlines.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012-2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,17 +23,60 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITInlines_h
-#define JITInlines_h
-
+#pragma once
#if ENABLE(JIT)
-#include "CallFrameInlines.h"
+#include "JSCInlines.h"
namespace JSC {
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(int src)
+#if USE(JSVALUE64)
+inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ JumpList slowCases = emitDoubleLoad(instruction, badType);
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
+ return slowCases;
+}
+#else
+inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ JumpList slowCases = emitDoubleLoad(instruction, badType);
+ moveDoubleToInts(fpRegT0, regT0, regT1);
+ return slowCases;
+}
+#endif // USE(JSVALUE64)
+
+ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
+{
+ switch (arrayMode) {
+ case JITInt32:
+ return emitInt32Load(currentInstruction, badType);
+ case JITDouble:
+ return emitDoubleLoad(currentInstruction, badType);
+ case JITContiguous:
+ return emitContiguousLoad(currentInstruction, badType);
+ case JITArrayStorage:
+ return emitArrayStorageLoad(currentInstruction, badType);
+ default:
+ break;
+ }
+ RELEASE_ASSERT_NOT_REACHED();
+ return MacroAssembler::JumpList();
+}
+
+inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(Instruction* instruction, PatchableJump& badType, IndexingType expectedShape)
+{
+ return emitContiguousLoad(instruction, badType, expectedShape);
+}
+
+inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(Instruction* instruction, PatchableJump& badType)
+{
+ return emitArrayStorageLoad(instruction, badType);
+}
+
+ALWAYS_INLINE bool JIT::isOperandConstantDouble(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
}
@@ -44,7 +87,7 @@ ALWAYS_INLINE JSValue JIT::getConstantOperand(int src)
return m_codeBlock->getConstant(src);
}
-ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, int entry)
{
#if USE(JSVALUE32_64)
store32(TrustedImm32(Int32Tag), intTagFor(entry, callFrameRegister));
@@ -54,26 +97,9 @@ ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::Ca
#endif
}
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- loadPtr(Address(from, entry * sizeof(Register)), to);
-}
-
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load32(Address(from, entry * sizeof(Register)), to);
-}
-
-#if USE(JSVALUE64)
-ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
-{
- load64(Address(from, entry * sizeof(Register)), to);
-}
-#endif
-
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
- failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ failures.append(branchStructure(NotEqual, Address(src, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
failures.append(branchTest32(Zero, dst));
@@ -92,23 +118,34 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
}
+ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr function)
+{
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ Call nakedCall = nearTailCall();
+ m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
+ return nakedCall;
+}
+
ALWAYS_INLINE void JIT::updateTopCallFrame()
{
ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
#if USE(JSVALUE32_64)
- Instruction* instruction = m_codeBlock->instructions().begin() + m_bytecodeOffset + 1;
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
+ Instruction* instruction = m_codeBlock->instructions().begin() + m_bytecodeOffset;
+ uint32_t locationBits = CallSiteIndex(instruction).bits();
#else
- uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(m_bytecodeOffset + 1);
+ uint32_t locationBits = CallSiteIndex(m_bytecodeOffset).bits();
#endif
- store32(TrustedImm32(locationBits), intTagFor(JSStack::ArgumentCount));
+ store32(TrustedImm32(locationBits), intTagFor(CallFrameSlot::argumentCount));
+
+ // FIXME: It's not clear that this is needed. JITOperations tend to update the top call frame on
+ // the C++ side.
+ // https://bugs.webkit.org/show_bug.cgi?id=155693
storePtr(callFrameRegister, &m_vm->topCallFrame);
}
@@ -120,6 +157,16 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheck(const Funct
return call;
}
+#if OS(WINDOWS) && CPU(X86_64)
+ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr& function)
+{
+ updateTopCallFrame();
+ MacroAssembler::Call call = appendCallWithSlowPathReturnType(function);
+ exceptionCheck();
+ return call;
+}
+#endif
+
ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithCallFrameRollbackOnException(const FunctionPtr& function)
{
updateTopCallFrame(); // The callee is responsible for setting topCallFrame to their caller
@@ -151,12 +198,49 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueRe
return call;
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(Z_JITOperation_E operation)
+{
+ setupArgumentsExecState();
+ updateTopCallFrame();
+ return appendCall(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_E operation)
{
setupArgumentsExecState();
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJsc operation, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJscZ operation, GPRReg arg1, int32_t arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, GPRReg arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, TrustedImmPtr arg1)
+{
+ setupArgumentsWithExecState(arg1);
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EO operation, GPRReg arg)
{
setupArgumentsWithExecState(arg);
@@ -205,6 +289,18 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EC operatio
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscC operation, int dst, GPRReg arg1, JSCell* cell)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), arg2);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EP operation, int dst, void* pointer)
{
setupArgumentsWithExecState(TrustedImmPtr(pointer));
@@ -217,16 +313,22 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(WithProfileTag, J_JITOpera
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EPc operation, int dst, Instruction* bytecodePC)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZ operation, int dst, int32_t arg)
{
setupArgumentsWithExecState(TrustedImm32(arg));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EZ operation, int32_t op)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZZ operation, int dst, int32_t arg1, int32_t arg2)
{
- setupArgumentsWithExecState(TrustedImm32(op));
- return appendCallWithExceptionCheck(operation);
+ setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImm32(arg2));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_ECC operation, RegisterID regOp1, RegisterID regOp2)
@@ -241,6 +343,17 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EOJss opera
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Sprt_JITOperation_EZ operation, int32_t op)
+{
+#if OS(WINDOWS) && CPU(X86_64)
+ setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32(op));
+ return appendCallWithExceptionCheckAndSlowPathReturnType(operation);
+#else
+ setupArgumentsWithExecState(TrustedImm32(op));
+ return appendCallWithExceptionCheck(operation);
+#endif
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_E operation)
{
setupArgumentsExecState();
@@ -259,6 +372,13 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECC operati
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EE operation, RegisterID regOp)
+{
+ setupArgumentsWithExecState(regOp);
+ updateTopCallFrame();
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EPc operation, Instruction* bytecodePC)
{
setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
@@ -289,21 +409,74 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnExce
return appendCallWithCallFrameRollbackOnException(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operation, JSValueRegs result, JSValueRegs arg)
+{
+ setupArgumentsWithExecState(arg);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2)
+{
+ setupArgumentsWithExecState(arg1, arg2);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJArp operation, JSValueRegs result, JSValueRegs operand, ArithProfile* arithProfile)
+{
+ setupArgumentsWithExecState(operand, TrustedImmPtr(arithProfile));
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJArp operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2, ArithProfile* arithProfile)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arithProfile));
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJMic operation, JSValueRegs result, JSValueRegs arg, TrustedImmPtr mathIC)
+{
+ setupArgumentsWithExecState(arg, mathIC);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJMic operation, JSValueRegs result, JSValueRegs arg1, JSValueRegs arg2, TrustedImmPtr mathIC)
+{
+ setupArgumentsWithExecState(arg1, arg2, mathIC);
+ Call call = appendCallWithExceptionCheck(operation);
+ setupResults(result);
+ return call;
+}
#if USE(JSVALUE64)
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1, int32_t arg3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1, int32_t arg2, int32_t arg3)
{
- setupArgumentsWithExecState(arg1, TrustedImm32(arg3));
+ setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3));
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2, int32_t arg3, GPRReg arg4)
{
- setupArgumentsWithExecState(arg1, arg2, arg3);
+ setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1, regOp2, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -315,7 +488,31 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operat
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, GPRReg arg)
+{
+ setupArgumentsWithExecState(TrustedImm32(dst), arg);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
@@ -339,7 +536,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operatio
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc operation, int dst, GPRReg arg1, const Identifier* arg2)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJI operation, int dst, GPRReg arg1, UniquedStringImpl* arg2)
{
setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
@@ -351,6 +548,24 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operati
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1, GPRReg arg2, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1, GPRReg arg2, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
+{
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1)
{
setupArgumentsWithExecState(arg1);
@@ -370,15 +585,29 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJI operation, GPRReg arg1, UniquedStringImpl* arg2)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operation, RegisterID regOp1, RegisterID regOp2)
{
setupArgumentsWithExecState(regOp1, regOp2);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2, int32_t op3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
{
- setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2, TrustedImm32(op3));
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
+{
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
return appendCallWithExceptionCheck(operation);
}
@@ -388,9 +617,21 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJIdJJ operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZJJ operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
+{
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJ operation, RegisterID arg1, RegisterID arg2)
+{
+ setupArgumentsWithExecState(arg1, arg2);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID regOp1, RegisterID regOp2, int32_t op3, RegisterID regOp4)
{
- setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
+ setupArgumentsWithExecState(regOp1, regOp2, TrustedImm32(op3), regOp4);
return appendCallWithExceptionCheck(operation);
}
@@ -408,23 +649,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operat
#else // USE(JSVALUE32_64)
-// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
-// To avoid assemblies from using wrong registers, let's occupy r1 or r3 with a dummy argument when necessary.
-#if (COMPILER_SUPPORTS(EABI) && CPU(ARM)) || CPU(MIPS)
-#define EABI_32BIT_DUMMY_ARG TrustedImm32(0),
-#else
-#define EABI_32BIT_DUMMY_ARG
-#endif
-
-// JSVALUE32_64 is a 64-bit integer that cannot be put half in an argument register and half on stack when using SH4 architecture.
-// To avoid this, let's occupy the 4th argument register (r7) with a dummy argument when necessary. This must only be done when there
-// is no other 32-bit value argument behind this 64-bit JSValue.
-#if CPU(SH4)
-#define SH4_32BIT_DUMMY_ARG TrustedImm32(0),
-#else
-#define SH4_32BIT_DUMMY_ARG
-#endif
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1Tag, GPRReg arg1Payload)
{
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag);
@@ -432,20 +656,21 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOpera
return appendCall(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
{
-#if CPU(SH4)
- // We have to put arg3 in the 4th argument register (r7) as 64-bit value arg2 will be put on stack for sh4 architecure.
- setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImm32(arg2));
-#else
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2));
-#endif
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2, int32_t arg3)
{
- setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, int32_t arg3, GPRReg arg4)
+{
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
return appendCallWithExceptionCheck(operation);
}
@@ -461,13 +686,19 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operatio
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
+{
+ setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, const Identifier* arg2)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJI operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* arg2)
{
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(arg2));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
@@ -475,13 +706,25 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJIdc opera
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag, TrustedImmPtr(byValInfo));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag);
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
@@ -497,15 +740,33 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJI operation, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* arg2)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImmPtr(arg2));
+ return appendCallWithExceptionCheck(operation);
+}
+
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operation, RegisterID arg1Tag, RegisterID arg1Payload, RegisterID arg2Tag, RegisterID arg2Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2Payload, arg2Tag);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECICC operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZCC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
{
- setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJ operation, RegisterID arg1, RegisterID arg2Tag, RegisterID arg2Payload)
+{
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID arg1, RegisterID arg2Tag, RegisterID arg2Payload, int32_t arg3, RegisterID arg4)
+{
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
return appendCallWithExceptionCheck(operation);
}
@@ -515,13 +776,13 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2Tag, RegisterID regOp2Payload, int32_t op3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int32_t op1, SymbolTable* symbolTable, RegisterID regOp3Tag, RegisterID regOp3Payload)
{
- setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2Payload, regOp2Tag, TrustedImm32(op3));
+ setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, StringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, UniquedStringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -529,7 +790,25 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI ope
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag);
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag);
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ArrayProfile* arrayProfile)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(arrayProfile));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ByValInfo* byValInfo)
+{
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(byValInfo));
+ return appendCallWithExceptionCheck(operation);
+}
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, RegisterID regOp1Tag, RegisterID regOp1Payload)
+{
+ setupArgumentsWithExecState(TrustedImm32(dst), regOp1Payload, regOp1Tag);
return appendCallWithExceptionCheck(operation);
}
@@ -544,15 +823,18 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operat
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
return appendCallWithExceptionCheck(operation);
}
-
-#undef EABI_32BIT_DUMMY_ARG
-#undef SH4_32BIT_DUMMY_ARG
+
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2Tag, GPRReg arg2Payload)
+{
+ setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), EABI_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
+ return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+}
#endif // USE(JSVALUE32_64)
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
- return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
+ return branchStructure(NotEqual, Address(reg, JSCell::structureIDOffset()), structure);
}
ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
@@ -561,26 +843,32 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
linkSlowCase(iter);
}
+ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, unsigned bytecodeOffset)
+{
+ while (iter != slowCases.end() && iter->to == bytecodeOffset) {
+ iter->from.link(this);
+ ++iter;
+ }
+}
+
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
-ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
+ALWAYS_INLINE void JIT::addSlowCase(const JumpList& jumpList)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- const JumpList::JumpVector& jumpVector = jumpList.jumps();
- size_t size = jumpVector.size();
- for (size_t i = 0; i < size; ++i)
- m_slowCases.append(SlowCaseEntry(jumpVector[i], m_bytecodeOffset));
+ for (const Jump& jump : jumpList.jumps())
+ m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addSlowCase()
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Jump emptyJump; // Doing it this way to make Windows happy.
m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
@@ -588,21 +876,26 @@ ALWAYS_INLINE void JIT::addSlowCase()
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellObject(RegisterID cellReg)
{
- return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ return branch8(AboveOrEqual, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellNotObject(RegisterID cellReg)
+{
+ return branch8(Below, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
}
#if ENABLE(SAMPLING_FLAGS)
@@ -658,28 +951,11 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantChar(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
-template<typename StructureType>
-inline void JIT::emitAllocateJSObject(RegisterID allocator, StructureType structure, RegisterID result, RegisterID scratch)
-{
- loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
- addSlowCase(branchTestPtr(Zero, result));
-
- // remove the object from the free list
- loadPtr(Address(result), scratch);
- storePtr(scratch, Address(allocator, MarkedAllocator::offsetOfFreeListHead()));
-
- // initialize the object's structure
- storePtr(structure, Address(result, JSCell::structureOffset()));
-
- // initialize the object's property storage pointer
- storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset()));
-}
-
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
{
ASSERT(shouldEmitProfiling());
@@ -713,22 +989,19 @@ inline void JIT::emitValueProfilingSite()
emitValueProfilingSite(m_bytecodeOffset);
}
-inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
+inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile)
{
- UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now.
-
- RegisterID structure = structureAndIndexingType;
- RegisterID indexingType = structureAndIndexingType;
-
- if (shouldEmitProfiling())
- storePtr(structure, arrayProfile->addressOfLastSeenStructure());
+ if (shouldEmitProfiling()) {
+ load32(MacroAssembler::Address(cell, JSCell::structureIDOffset()), indexingType);
+ store32(indexingType, arrayProfile->addressOfLastSeenStructureID());
+ }
- load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
+ load8(Address(cell, JSCell::indexingTypeAndMiscOffset()), indexingType);
}
-inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
+inline void JIT::emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex)
{
- emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
+ emitArrayProfilingSiteWithCell(cell, indexingType, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
}
inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
@@ -748,7 +1021,7 @@ static inline bool arrayProfileSaw(ArrayModes arrayModes, IndexingType capabilit
inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
{
- ConcurrentJITLocker locker(m_codeBlock->m_lock);
+ ConcurrentJSLocker locker(m_codeBlock->m_lock);
profile->computeUpdatedPrediction(locker, m_codeBlock);
ArrayModes arrayModes = profile->observedArrayModes(locker);
if (arrayProfileSaw(arrayModes, DoubleShape))
@@ -760,6 +1033,16 @@ inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
return JITContiguous;
}
+ALWAYS_INLINE int32_t JIT::getOperandConstantInt(int src)
+{
+ return getConstantOperand(src).asInt32();
+}
+
+ALWAYS_INLINE double JIT::getOperandConstantDouble(int src)
+{
+ return getConstantOperand(src).asDouble();
+}
+
#if USE(JSVALUE32_64)
inline void JIT::emitLoadTag(int index, RegisterID tag)
@@ -788,6 +1071,16 @@ inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
move(Imm32(v.tag()), tag);
}
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
+{
+ emitLoad(src, dst.tagGPR(), dst.payloadGPR());
+}
+
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
+{
+ emitStore(dst, from.tagGPR(), from.payloadGPR());
+}
+
inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
{
RELEASE_ASSERT(tag != payload);
@@ -819,7 +1112,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
+ loadDouble(TrustedImmPtr(&inConstantPool), value);
} else
loadDouble(addressFor(index), value);
}
@@ -904,20 +1197,20 @@ inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterI
}
}
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
-ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op, int32_t& constant)
+ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t& constant)
{
- if (isOperandConstantImmediateInt(op1)) {
+ if (isOperandConstantInt(op1)) {
constant = getConstantOperand(op1).asInt32();
op = op2;
return true;
}
- if (isOperandConstantImmediateInt(op2)) {
+ if (isOperandConstantInt(op2)) {
constant = getConstantOperand(op2).asInt32();
op = op1;
return true;
@@ -931,7 +1224,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
- ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -946,6 +1239,11 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
load64(Address(callFrameRegister, src * sizeof(Register)), dst);
}
+ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
+{
+ emitGetVirtualRegister(src, dst.payloadGPR());
+}
+
ALWAYS_INLINE void JIT::emitGetVirtualRegister(VirtualRegister src, RegisterID dst)
{
emitGetVirtualRegister(src.offset(), dst);
@@ -962,12 +1260,7 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegisters(VirtualRegister src1, RegisterID
emitGetVirtualRegisters(src1.offset(), dst1, src2.offset(), dst2);
}
-ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(int src)
-{
- return getConstantOperand(src).asInt32();
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
@@ -977,6 +1270,11 @@ ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
store64(from, Address(callFrameRegister, dst * sizeof(Register)));
}
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
+{
+ emitPutVirtualRegister(dst, from.payloadGPR());
+}
+
ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
{
emitPutVirtualRegister(dst.offset(), from);
@@ -1019,7 +1317,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(&inConstantPool, value);
+ loadDouble(TrustedImmPtr(&inConstantPool), value);
} else
loadDouble(addressFor(index), value);
}
@@ -1027,52 +1325,58 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- ASSERT(isOperandConstantImmediateInt(index));
+ ASSERT(isOperandConstantInt(index));
convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
} else
convertInt32ToDouble(addressFor(index), value);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfInt(RegisterID reg)
{
return branch64(AboveOrEqual, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg)
{
return branch64(Below, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotInt(RegisterID reg)
+{
+ return patchableBranch64(Below, reg, tagTypeNumberRegister);
+}
+
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
move(reg1, scratch);
and64(reg2, scratch);
- return emitJumpIfNotImmediateInteger(scratch);
+ return emitJumpIfNotInt(scratch);
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg)
{
- addSlowCase(emitJumpIfNotImmediateInteger(reg));
+ addSlowCase(emitJumpIfNotInt(reg));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
- addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
+ addSlowCase(emitJumpIfNotInt(reg1, reg2, scratch));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg)
{
- addSlowCase(emitJumpIfNotImmediateNumber(reg));
+ addSlowCase(emitJumpIfNotNumber(reg));
}
-ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
+ALWAYS_INLINE void JIT::emitTagBool(RegisterID reg)
{
- emitFastArithIntToImmNoCheck(src, dest);
+ or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
}
-ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
+inline Instruction* JIT::copiedInstruction(Instruction* inst)
{
- or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
+ ASSERT(inst >= m_codeBlock->instructions().begin() && inst < m_codeBlock->instructions().end());
+ return m_instructions.begin() + (inst - m_codeBlock->instructions().begin());
}
#endif // USE(JSVALUE32_64)
@@ -1080,6 +1384,3 @@ ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JITInlines_h
-
diff --git a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp
new file mode 100644
index 000000000..1ddaa6ab1
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITLeftShiftGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITLeftShiftGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_rightOperand.isConstInt32()) {
+ // Try to do (intVar << intConstant).
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+
+ jit.moveValueRegs(m_left, m_result);
+ jit.lshift32(CCallHelpers::Imm32(m_rightOperand.asConstInt32()), m_result.payloadGPR());
+
+ } else {
+ // Try to do (intConstant << intVar) or (intVar << intVar).
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ GPRReg rightOperandGPR = m_right.payloadGPR();
+ if (rightOperandGPR == m_result.payloadGPR()) {
+ jit.move(rightOperandGPR, m_scratchGPR);
+ rightOperandGPR = m_scratchGPR;
+ }
+
+ if (m_leftOperand.isConstInt32()) {
+#if USE(JSVALUE32_64)
+ jit.move(m_right.tagGPR(), m_result.tagGPR());
+#endif
+ jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR());
+ } else {
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
+ jit.moveValueRegs(m_left, m_result);
+ }
+
+ jit.lshift32(rightOperandGPR, m_result.payloadGPR());
+ }
+
+#if USE(JSVALUE64)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#endif
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h
new file mode 100644
index 000000000..07f97a538
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITLeftShiftGenerator : public JITBitBinaryOpGenerator {
+public:
+ JITLeftShiftGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITMathIC.h b/Source/JavaScriptCore/jit/JITMathIC.h
new file mode 100644
index 000000000..3806f3a4f
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITMathIC.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "ArithProfile.h"
+#include "CCallHelpers.h"
+#include "JITAddGenerator.h"
+#include "JITMathICInlineResult.h"
+#include "JITMulGenerator.h"
+#include "JITNegGenerator.h"
+#include "JITSubGenerator.h"
+#include "LinkBuffer.h"
+#include "Repatch.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+class LinkBuffer;
+
+struct MathICGenerationState {
+ MacroAssembler::Label fastPathStart;
+ MacroAssembler::Label fastPathEnd;
+ MacroAssembler::Label slowPathStart;
+ MacroAssembler::Call slowPathCall;
+ MacroAssembler::JumpList slowPathJumps;
+ bool shouldSlowPathRepatch;
+};
+
+#define ENABLE_MATH_IC_STATS 0
+
+template <typename GeneratorType, bool(*isProfileEmpty)(ArithProfile&)>
+class JITMathIC {
+public:
+ JITMathIC(ArithProfile* arithProfile)
+ : m_arithProfile(arithProfile)
+ {
+ }
+
+ CodeLocationLabel doneLocation() { return m_inlineStart.labelAtOffset(m_inlineSize); }
+ CodeLocationLabel slowPathStartLocation() { return m_inlineStart.labelAtOffset(m_deltaFromStartToSlowPathStart); }
+ CodeLocationCall slowPathCallLocation() { return m_inlineStart.callAtOffset(m_deltaFromStartToSlowPathCallLocation); }
+
+ bool generateInline(CCallHelpers& jit, MathICGenerationState& state, bool shouldEmitProfiling = true)
+ {
+#if CPU(ARM_TRADITIONAL)
+ // FIXME: Remove this workaround once the proper fixes are landed.
+ // [ARM] Disable Inline Caching on ARMv7 traditional until proper fix
+ // https://bugs.webkit.org/show_bug.cgi?id=159759
+ return false;
+#endif
+
+ state.fastPathStart = jit.label();
+ size_t startSize = jit.m_assembler.buffer().codeSize();
+
+ if (m_arithProfile) {
+ if (isProfileEmpty(*m_arithProfile)) {
+ // It looks like the MathIC has yet to execute. We don't want to emit code in this
+ // case for a couple reasons. First, the operation may never execute, so if we don't emit
+ // code, it's a win. Second, if the operation does execute, we can emit better code
+ // once we have an idea about the types.
+ state.slowPathJumps.append(jit.patchableJump());
+ size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
+ ASSERT_UNUSED(inlineSize, static_cast<ptrdiff_t>(inlineSize) <= MacroAssembler::patchableJumpSize());
+ state.shouldSlowPathRepatch = true;
+ state.fastPathEnd = jit.label();
+ ASSERT(!m_generateFastPathOnRepatch); // We should have gathered some observed type info about the types before trying to regenerate again.
+ m_generateFastPathOnRepatch = true;
+ return true;
+ }
+ }
+
+ JITMathICInlineResult result = m_generator.generateInline(jit, state, m_arithProfile);
+
+ switch (result) {
+ case JITMathICInlineResult::GeneratedFastPath: {
+ size_t inlineSize = jit.m_assembler.buffer().codeSize() - startSize;
+ if (static_cast<ptrdiff_t>(inlineSize) < MacroAssembler::patchableJumpSize()) {
+ size_t nopsToEmitInBytes = MacroAssembler::patchableJumpSize() - inlineSize;
+ jit.emitNops(nopsToEmitInBytes);
+ }
+ state.shouldSlowPathRepatch = true;
+ state.fastPathEnd = jit.label();
+ return true;
+ }
+ case JITMathICInlineResult::GenerateFullSnippet: {
+ MacroAssembler::JumpList endJumpList;
+ bool result = m_generator.generateFastPath(jit, endJumpList, state.slowPathJumps, m_arithProfile, shouldEmitProfiling);
+ if (result) {
+ state.fastPathEnd = jit.label();
+ state.shouldSlowPathRepatch = false;
+ endJumpList.link(&jit);
+ return true;
+ }
+ return false;
+ }
+ case JITMathICInlineResult::DontGenerate: {
+ return false;
+ }
+ default:
+ ASSERT_NOT_REACHED();
+ }
+
+ return false;
+ }
+
+ void generateOutOfLine(VM& vm, CodeBlock* codeBlock, FunctionPtr callReplacement)
+ {
+ auto linkJumpToOutOfLineSnippet = [&] () {
+ CCallHelpers jit(&vm, codeBlock);
+ auto jump = jit.jump();
+ // We don't need a nop sled here because nobody should be jumping into the middle of an IC.
+ bool needsBranchCompaction = false;
+ RELEASE_ASSERT(jit.m_assembler.buffer().codeSize() <= static_cast<size_t>(m_inlineSize));
+ LinkBuffer linkBuffer(jit, m_inlineStart.dataLocation(), jit.m_assembler.buffer().codeSize(), JITCompilationMustSucceed, needsBranchCompaction);
+ RELEASE_ASSERT(linkBuffer.isValid());
+ linkBuffer.link(jump, CodeLocationLabel(m_code.code()));
+ FINALIZE_CODE(linkBuffer, ("JITMathIC: linking constant jump to out of line stub"));
+ };
+
+ auto replaceCall = [&] () {
+ ftlThunkAwareRepatchCall(codeBlock, slowPathCallLocation(), callReplacement);
+ };
+
+ bool shouldEmitProfiling = !JITCode::isOptimizingJIT(codeBlock->jitType());
+
+ if (m_generateFastPathOnRepatch) {
+
+ CCallHelpers jit(&vm, codeBlock);
+ MathICGenerationState generationState;
+ bool generatedInline = generateInline(jit, generationState, shouldEmitProfiling);
+
+ // We no longer want to try to regenerate the fast path.
+ m_generateFastPathOnRepatch = false;
+
+ if (generatedInline) {
+ auto jumpToDone = jit.jump();
+
+ LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
+ if (!linkBuffer.didFailToAllocate()) {
+ linkBuffer.link(generationState.slowPathJumps, slowPathStartLocation());
+ linkBuffer.link(jumpToDone, doneLocation());
+
+ m_code = FINALIZE_CODE_FOR(
+ codeBlock, linkBuffer, ("JITMathIC: generating out of line fast IC snippet"));
+
+ if (!generationState.shouldSlowPathRepatch) {
+ // We won't need to regenerate, so we can wire the slow path call
+ // to a non repatching variant.
+ replaceCall();
+ }
+
+ linkJumpToOutOfLineSnippet();
+
+ return;
+ }
+ }
+
+ // We weren't able to generate an out of line fast path.
+ // We just generate the snippet in its full generality.
+ }
+
+ // We rewire to the alternate regardless of whether or not we can allocate the out of line path
+ // because if we fail allocating the out of line path, we don't want to waste time trying to
+ // allocate it in the future.
+ replaceCall();
+
+ {
+ CCallHelpers jit(&vm, codeBlock);
+
+ MacroAssembler::JumpList endJumpList;
+ MacroAssembler::JumpList slowPathJumpList;
+
+ bool emittedFastPath = m_generator.generateFastPath(jit, endJumpList, slowPathJumpList, m_arithProfile, shouldEmitProfiling);
+ if (!emittedFastPath)
+ return;
+ endJumpList.append(jit.jump());
+
+ LinkBuffer linkBuffer(vm, jit, codeBlock, JITCompilationCanFail);
+ if (linkBuffer.didFailToAllocate())
+ return;
+
+ linkBuffer.link(endJumpList, doneLocation());
+ linkBuffer.link(slowPathJumpList, slowPathStartLocation());
+
+ m_code = FINALIZE_CODE_FOR(
+ codeBlock, linkBuffer, ("JITMathIC: generating out of line IC snippet"));
+ }
+
+ linkJumpToOutOfLineSnippet();
+ }
+
+ void finalizeInlineCode(const MathICGenerationState& state, LinkBuffer& linkBuffer)
+ {
+ CodeLocationLabel start = linkBuffer.locationOf(state.fastPathStart);
+ m_inlineStart = start;
+
+ m_inlineSize = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(state.fastPathEnd));
+ ASSERT(m_inlineSize > 0);
+
+ m_deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(state.slowPathCall));
+ m_deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(state.slowPathStart));
+ }
+
+ ArithProfile* arithProfile() const { return m_arithProfile; }
+
+#if ENABLE(MATH_IC_STATS)
+ size_t m_generatedCodeSize { 0 };
+ size_t codeSize() const
+ {
+ size_t result = m_generatedCodeSize;
+ if (m_code)
+ result += m_code.size();
+ return result;
+ }
+#endif
+
+ ArithProfile* m_arithProfile;
+ MacroAssemblerCodeRef m_code;
+ CodeLocationLabel m_inlineStart;
+ int32_t m_inlineSize;
+ int32_t m_deltaFromStartToSlowPathCallLocation;
+ int32_t m_deltaFromStartToSlowPathStart;
+ bool m_generateFastPathOnRepatch { false };
+ GeneratorType m_generator;
+};
+
+inline bool isBinaryProfileEmpty(ArithProfile& arithProfile)
+{
+ return arithProfile.lhsObservedType().isEmpty() || arithProfile.rhsObservedType().isEmpty();
+}
+template <typename GeneratorType>
+class JITBinaryMathIC : public JITMathIC<GeneratorType, isBinaryProfileEmpty> {
+public:
+ JITBinaryMathIC(ArithProfile* arithProfile)
+ : JITMathIC<GeneratorType, isBinaryProfileEmpty>(arithProfile)
+ {
+ }
+};
+
+typedef JITBinaryMathIC<JITAddGenerator> JITAddIC;
+typedef JITBinaryMathIC<JITMulGenerator> JITMulIC;
+typedef JITBinaryMathIC<JITSubGenerator> JITSubIC;
+
+
+inline bool isUnaryProfileEmpty(ArithProfile& arithProfile)
+{
+ return arithProfile.lhsObservedType().isEmpty();
+}
+template <typename GeneratorType>
+class JITUnaryMathIC : public JITMathIC<GeneratorType, isUnaryProfileEmpty> {
+public:
+ JITUnaryMathIC(ArithProfile* arithProfile)
+ : JITMathIC<GeneratorType, isUnaryProfileEmpty>(arithProfile)
+ {
+ }
+};
+
+typedef JITUnaryMathIC<JITNegGenerator> JITNegIC;
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITMathICForwards.h b/Source/JavaScriptCore/jit/JITMathICForwards.h
new file mode 100644
index 000000000..08bfd1301
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITMathICForwards.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+template <typename Generator> class JITBinaryMathIC;
+template <typename Generator> class JITUnaryMathIC;
+class JITAddGenerator;
+class JITMulGenerator;
+class JITNegGenerator;
+class JITSubGenerator;
+
+typedef JITBinaryMathIC<JITAddGenerator> JITAddIC;
+typedef JITBinaryMathIC<JITMulGenerator> JITMulIC;
+typedef JITUnaryMathIC<JITNegGenerator> JITNegIC;
+typedef JITBinaryMathIC<JITSubGenerator> JITSubIC;
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITMathICInlineResult.h b/Source/JavaScriptCore/jit/JITMathICInlineResult.h
new file mode 100644
index 000000000..0189c1388
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITMathICInlineResult.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+enum class JITMathICInlineResult {
+ GeneratedFastPath,
+ GenerateFullSnippet,
+ DontGenerate
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITMulGenerator.cpp b/Source/JavaScriptCore/jit/JITMulGenerator.cpp
new file mode 100644
index 000000000..93c69e297
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITMulGenerator.cpp
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITMulGenerator.h"
+
+#if ENABLE(JIT)
+
+#include "ArithProfile.h"
+#include "JITMathIC.h"
+
+namespace JSC {
+
+JITMathICInlineResult JITMulGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
+{
+ // We default to speculating int32.
+ ObservedType lhs = ObservedType().withInt32();
+ ObservedType rhs = ObservedType().withInt32();
+ if (arithProfile) {
+ lhs = arithProfile->lhsObservedType();
+ rhs = arithProfile->rhsObservedType();
+ }
+
+ if (lhs.isOnlyNonNumber() && rhs.isOnlyNonNumber())
+ return JITMathICInlineResult::DontGenerate;
+
+ if (lhs.isOnlyNumber() && rhs.isOnlyNumber()) {
+ if (!jit.supportsFloatingPoint())
+ return JITMathICInlineResult::DontGenerate;
+
+ if (!m_leftOperand.definitelyIsNumber())
+ state.slowPathJumps.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ state.slowPathJumps.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+ state.slowPathJumps.append(jit.branchIfInt32(m_left));
+ state.slowPathJumps.append(jit.branchIfInt32(m_right));
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+ jit.mulDouble(m_rightFPR, m_leftFPR);
+ jit.boxDouble(m_leftFPR, m_result);
+
+ return JITMathICInlineResult::GeneratedFastPath;
+ }
+
+ if ((lhs.isOnlyInt32() || m_leftOperand.isPositiveConstInt32()) && (rhs.isOnlyInt32() || m_rightOperand.isPositiveConstInt32())) {
+ ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
+ if (!m_leftOperand.isPositiveConstInt32())
+ state.slowPathJumps.append(jit.branchIfNotInt32(m_left));
+ if (!m_rightOperand.isPositiveConstInt32())
+ state.slowPathJumps.append(jit.branchIfNotInt32(m_right));
+
+ if (m_leftOperand.isPositiveConstInt32() || m_rightOperand.isPositiveConstInt32()) {
+ JSValueRegs var = m_leftOperand.isPositiveConstInt32() ? m_right : m_left;
+ int32_t constValue = m_leftOperand.isPositiveConstInt32() ? m_leftOperand.asConstInt32() : m_rightOperand.asConstInt32();
+ state.slowPathJumps.append(jit.branchMul32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constValue), m_scratchGPR));
+ } else {
+ state.slowPathJumps.append(jit.branchMul32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), m_scratchGPR));
+ state.slowPathJumps.append(jit.branchTest32(CCallHelpers::Zero, m_scratchGPR)); // Go slow if potential negative zero.
+ }
+ jit.boxInt32(m_scratchGPR, m_result);
+
+ return JITMathICInlineResult::GeneratedFastPath;
+ }
+
+ return JITMathICInlineResult::GenerateFullSnippet;
+}
+
+bool JITMulGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE64)
+ ASSERT(m_scratchGPR != m_result.payloadGPR());
+#else
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
+
+ if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber())
+ return false;
+
+ if (m_leftOperand.isPositiveConstInt32() || m_rightOperand.isPositiveConstInt32()) {
+ JSValueRegs var = m_leftOperand.isPositiveConstInt32() ? m_right : m_left;
+ SnippetOperand& varOpr = m_leftOperand.isPositiveConstInt32() ? m_rightOperand : m_leftOperand;
+ SnippetOperand& constOpr = m_leftOperand.isPositiveConstInt32() ? m_leftOperand : m_rightOperand;
+
+ // Try to do intVar * intConstant.
+ CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(var);
+
+ GPRReg multiplyResultGPR = m_result.payloadGPR();
+ if (multiplyResultGPR == var.payloadGPR())
+ multiplyResultGPR = m_scratchGPR;
+
+ slowPathJumpList.append(jit.branchMul32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constOpr.asConstInt32()), multiplyResultGPR));
+
+ jit.boxInt32(multiplyResultGPR, m_result);
+ endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ slowPathJumpList.append(notInt32);
+ return true;
+ }
+
+ // Try to do doubleVar * double(intConstant).
+ notInt32.link(&jit);
+ if (!varOpr.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(var, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(var, m_leftFPR, m_scratchGPR, m_scratchFPR);
+
+ jit.move(CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR);
+ jit.convertInt32ToDouble(m_scratchGPR, m_rightFPR);
+
+ // Fall thru to doubleVar * doubleVar.
+
+ } else {
+ ASSERT(!m_leftOperand.isPositiveConstInt32() && !m_rightOperand.isPositiveConstInt32());
+
+ CCallHelpers::Jump leftNotInt;
+ CCallHelpers::Jump rightNotInt;
+
+ // Try to do intVar * intVar.
+ leftNotInt = jit.branchIfNotInt32(m_left);
+ rightNotInt = jit.branchIfNotInt32(m_right);
+
+ slowPathJumpList.append(jit.branchMul32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), m_scratchGPR));
+ slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_scratchGPR)); // Go slow if potential negative zero.
+
+ jit.boxInt32(m_scratchGPR, m_result);
+ endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ slowPathJumpList.append(leftNotInt);
+ slowPathJumpList.append(rightNotInt);
+ return true;
+ }
+
+ leftNotInt.link(&jit);
+ if (!m_leftOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
+
+ jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
+ CCallHelpers::Jump rightWasInteger = jit.jump();
+
+ rightNotInt.link(&jit);
+ if (!m_rightOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
+
+ rightIsDouble.link(&jit);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+
+ rightWasInteger.link(&jit);
+
+ // Fall thru to doubleVar * doubleVar.
+ }
+
+ // Do doubleVar * doubleVar.
+ jit.mulDouble(m_rightFPR, m_leftFPR);
+
+ if (!arithProfile || !shouldEmitProfiling)
+ jit.boxDouble(m_leftFPR, m_result);
+ else {
+ // The Int52 overflow check below intentionally omits 1ll << 51 as a valid negative Int52 value.
+ // Therefore, we will get a false positive if the result is that value. This is intentionally
+ // done to simplify the checking algorithm.
+
+ const int64_t negativeZeroBits = 1ll << 63;
+#if USE(JSVALUE64)
+ jit.moveDoubleTo64(m_leftFPR, m_result.payloadGPR());
+
+ CCallHelpers::Jump notNegativeZero = jit.branch64(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm64(negativeZeroBits));
+
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+ CCallHelpers::Jump done = jit.jump();
+
+ notNegativeZero.link(&jit);
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+
+ jit.move(m_result.payloadGPR(), m_scratchGPR);
+ jit.urshiftPtr(CCallHelpers::Imm32(52), m_scratchGPR);
+ jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
+ CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
+
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+ noInt52Overflow.link(&jit);
+
+ done.link(&jit);
+ jit.sub64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR()); // Box the double.
+#else
+ jit.boxDouble(m_leftFPR, m_result);
+ CCallHelpers::JumpList notNegativeZero;
+ notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm32(0)));
+ notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.tagGPR(), CCallHelpers::TrustedImm32(negativeZeroBits >> 32)));
+
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+ CCallHelpers::Jump done = jit.jump();
+
+ notNegativeZero.link(&jit);
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+
+ jit.move(m_result.tagGPR(), m_scratchGPR);
+ jit.urshiftPtr(CCallHelpers::Imm32(52 - 32), m_scratchGPR);
+ jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
+ CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
+
+ jit.or32(CCallHelpers::TrustedImm32(ArithProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(arithProfile->addressOfBits()));
+
+ endJumpList.append(noInt52Overflow);
+ if (m_scratchGPR == m_result.tagGPR() || m_scratchGPR == m_result.payloadGPR())
+ jit.boxDouble(m_leftFPR, m_result);
+
+ endJumpList.append(done);
+#endif
+ }
+
+ return true;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITMulGenerator.h b/Source/JavaScriptCore/jit/JITMulGenerator.h
new file mode 100644
index 000000000..36d26ea12
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITMulGenerator.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "JITMathICInlineResult.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+struct MathICGenerationState;
+
+class JITMulGenerator {
+public:
+ JITMulGenerator()
+ { }
+
+ JITMulGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ {
+ ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
+ }
+
+ JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+ bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowJumpList, const ArithProfile*, bool shouldEmitProfiling);
+
+ static bool isLeftOperandValidConstant(SnippetOperand leftOperand) { return leftOperand.isPositiveConstInt32(); }
+ static bool isRightOperandValidConstant(SnippetOperand rightOperand) { return rightOperand.isPositiveConstInt32(); }
+
+private:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITNegGenerator.cpp b/Source/JavaScriptCore/jit/JITNegGenerator.cpp
new file mode 100644
index 000000000..92c29dd63
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITNegGenerator.cpp
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITNegGenerator.h"
+
+#include "ArithProfile.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+JITMathICInlineResult JITNegGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_src.payloadGPR());
+ ASSERT(m_scratchGPR != m_result.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_src.tagGPR());
+ ASSERT(m_scratchGPR != m_result.tagGPR());
+#endif
+
+ // We default to speculating int32.
+ ObservedType observedTypes = ObservedType().withInt32();
+ if (arithProfile)
+ observedTypes = arithProfile->lhsObservedType();
+ ASSERT_WITH_MESSAGE(!observedTypes.isEmpty(), "We should not attempt to generate anything if we do not have a profile.");
+
+ if (observedTypes.isOnlyNonNumber())
+ return JITMathICInlineResult::DontGenerate;
+
+ if (observedTypes.isOnlyInt32()) {
+ jit.moveValueRegs(m_src, m_result);
+ state.slowPathJumps.append(jit.branchIfNotInt32(m_src));
+ state.slowPathJumps.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));
+ jit.neg32(m_result.payloadGPR());
+#if USE(JSVALUE64)
+ jit.boxInt32(m_result.payloadGPR(), m_result);
+#endif
+
+ return JITMathICInlineResult::GeneratedFastPath;
+ }
+ if (observedTypes.isOnlyNumber()) {
+ state.slowPathJumps.append(jit.branchIfInt32(m_src));
+ state.slowPathJumps.append(jit.branchIfNotNumber(m_src, m_scratchGPR));
+#if USE(JSVALUE64)
+ if (m_src.payloadGPR() != m_result.payloadGPR()) {
+ jit.move(CCallHelpers::TrustedImm64(static_cast<int64_t>(1ull << 63)), m_result.payloadGPR());
+ jit.xor64(m_src.payloadGPR(), m_result.payloadGPR());
+ } else {
+ jit.move(CCallHelpers::TrustedImm64(static_cast<int64_t>(1ull << 63)), m_scratchGPR);
+ jit.xor64(m_scratchGPR, m_result.payloadGPR());
+ }
+#else
+ jit.moveValueRegs(m_src, m_result);
+ jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
+#endif
+ return JITMathICInlineResult::GeneratedFastPath;
+ }
+ return JITMathICInlineResult::GenerateFullSnippet;
+}
+
+bool JITNegGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
+{
+ ASSERT(m_scratchGPR != m_src.payloadGPR());
+ ASSERT(m_scratchGPR != m_result.payloadGPR());
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_src.tagGPR());
+ ASSERT(m_scratchGPR != m_result.tagGPR());
+#endif
+
+ jit.moveValueRegs(m_src, m_result);
+ CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);
+
+ // -0 should produce a double, and hence cannot be negated as an int.
+ // The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
+ slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));
+
+ jit.neg32(m_result.payloadGPR());
+#if USE(JSVALUE64)
+ jit.boxInt32(m_result.payloadGPR(), m_result);
+#endif
+ endJumpList.append(jit.jump());
+
+ srcNotInt.link(&jit);
+ slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));
+
+ // For a double, all we need to do is to invert the sign bit.
+#if USE(JSVALUE64)
+ jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
+ jit.xor64(m_scratchGPR, m_result.payloadGPR());
+#else
+ jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
+#endif
+ // The flags of ArithNegate are basic in DFG.
+ // We only need to know if we ever produced a number.
+ if (shouldEmitProfiling && arithProfile && !arithProfile->lhsObservedType().sawNumber() && !arithProfile->didObserveDouble())
+ arithProfile->emitSetDouble(jit);
+ return true;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITNegGenerator.h b/Source/JavaScriptCore/jit/JITNegGenerator.h
new file mode 100644
index 000000000..8a0c2d517
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITNegGenerator.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2015, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "JITMathIC.h"
+#include "JITMathICInlineResult.h"
+
+namespace JSC {
+
+class JITNegGenerator {
+public:
+ JITNegGenerator() = default;
+
+ JITNegGenerator(JSValueRegs result, JSValueRegs src, GPRReg scratchGPR)
+ : m_result(result)
+ , m_src(src)
+ , m_scratchGPR(scratchGPR)
+ { }
+
+ JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+ bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile*, bool shouldEmitProfiling);
+
+private:
+ JSValueRegs m_result;
+ JSValueRegs m_src;
+ GPRReg m_scratchGPR;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 2bdae1914..337e0b7c0 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -28,18 +28,22 @@
#if ENABLE(JIT)
#include "JIT.h"
-#include "Arguments.h"
-#include "CopiedSpaceInlines.h"
-#include "Debugger.h"
+#include "BasicBlockLocation.h"
+#include "Exception.h"
#include "Heap.h"
+#include "Interpreter.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
+#include "JSPropertyNameEnumerator.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
#include "SlowPathCall.h"
+#include "TypeLocation.h"
+#include "TypeProfilerLog.h"
#include "VirtualRegister.h"
+#include "Watchdog.h"
namespace JSC {
@@ -59,21 +63,13 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_captured_mov(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitGetVirtualRegister(src, regT0);
- emitNotifyWrite(regT0, regT1, currentInstruction[3].u.watchpointSet);
- emitPutVirtualRegister(dst);
-}
void JIT::emit_op_end(Instruction* currentInstruction)
{
RELEASE_ASSERT(returnValueGPR != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset()));
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -87,38 +83,55 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
{
Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
- MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
+ MarkedAllocator* allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorFor(allocationSize);
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID scratchReg = regT2;
move(TrustedImmPtr(allocator), allocatorReg);
- emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg);
+ if (allocator)
+ addSlowCase(Jump());
+ JumpList slowCases;
+ emitAllocateJSObject(resultReg, allocator, allocatorReg, TrustedImmPtr(structure), TrustedImmPtr(0), scratchReg, slowCases);
+ emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
+ addSlowCase(slowCases);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
+ linkSlowCase(iter);
int dst = currentInstruction[1].u.operand;
Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
callOperation(operationNewObject, structure);
emitStoreCell(dst, returnValueGPR);
}
-void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
{
- int baseVal = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int constructor = currentInstruction[2].u.operand;
+ int hasInstanceValue = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(hasInstanceValue, regT0);
+
+ // We don't jump if we know what Symbol.hasInstance would do.
+ Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
+
+ emitGetVirtualRegister(constructor, regT0);
- emitGetVirtualRegister(baseVal, regT0);
+ // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
+ test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
+ emitTagBool(regT0);
+ Jump done = jump();
- // Check that baseVal is a cell.
- emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
+ customhasInstanceValue.link(this);
+ move(TrustedImm32(ValueTrue), regT0);
- // Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
+ done.link(this);
+ emitPutVirtualRegister(dst);
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -132,13 +145,12 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT2);
emitGetVirtualRegister(proto, regT1);
- // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
emitJumpSlowCaseIfNotJSCell(regT2, value);
emitJumpSlowCaseIfNotJSCell(regT1, proto);
// Check that prototype is an object
- loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
- addSlowCase(emitJumpIfNotObject(regT3));
+ addSlowCase(emitJumpIfCellNotObject(regT1));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -146,9 +158,11 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
Label loop(this);
+ addSlowCase(branch8(Equal, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType)));
+
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ emitLoadStructure(regT2, regT2, regT3);
load64(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
@@ -161,6 +175,24 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
+void JIT::emit_op_instanceof_custom(Instruction*)
+{
+ // This always goes to slow path since we expect it to be rare.
+ addSlowCase(jump());
+}
+
+void JIT::emit_op_is_empty(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(value, regT0);
+ compare64(Equal, regT0, TrustedImm32(JSValue::encode(JSValue())), regT0);
+
+ emitTagBool(regT0);
+ emitPutVirtualRegister(dst);
+}
+
void JIT::emit_op_is_undefined(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -173,19 +205,19 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
Jump done = jump();
isCell.link(this);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump notMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT1, regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
comparePtr(Equal, regT0, regT1, regT0);
notMasqueradesAsUndefined.link(this);
done.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
@@ -197,7 +229,7 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT0);
xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
@@ -208,72 +240,50 @@ void JIT::emit_op_is_number(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT0);
test64(NonZero, regT0, tagTypeNumberRegister, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_is_string(Instruction* currentInstruction)
+void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
-
+ int type = currentInstruction[3].u.operand;
+
emitGetVirtualRegister(value, regT0);
Jump isNotCell = emitJumpIfNotJSCell(regT0);
-
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
- emitTagAsBoolImmediate(regT0);
+
+ compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0);
+ emitTagBool(regT0);
Jump done = jump();
-
+
isNotCell.link(this);
move(TrustedImm32(ValueFalse), regT0);
-
+
done.link(this);
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
-{
- int activation = currentInstruction[1].u.operand;
- Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
- emitGetVirtualRegister(activation, regT0);
- callOperation(operationTearOffActivation, regT0);
- activationNotCreated.link(this);
-}
-
-void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
-{
- int arguments = currentInstruction[1].u.operand;
- int activation = currentInstruction[2].u.operand;
-
- Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset())));
- emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0);
- emitGetVirtualRegister(activation, regT1);
- callOperation(operationTearOffArguments, regT0, regT1);
- argsNotCreated.link(this);
-}
-
-void JIT::emit_op_ret(Instruction* currentInstruction)
+void JIT::emit_op_is_object(Instruction* currentInstruction)
{
- ASSERT(callFrameRegister != regT1);
- ASSERT(regT1 != returnValueGPR);
- ASSERT(returnValueGPR != callFrameRegister);
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
- // Return the result in %eax.
- emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
+ emitGetVirtualRegister(value, regT0);
+ Jump isNotCell = emitJumpIfNotJSCell(regT0);
- // Grab the return address.
- emitGetReturnPCFromCallFrameHeaderPtr(regT1);
+ compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
+ emitTagBool(regT0);
+ Jump done = jump();
- // Restore our caller's "r".
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
+ isNotCell.link(this);
+ move(TrustedImm32(ValueFalse), regT0);
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
+ done.link(this);
+ emitPutVirtualRegister(dst);
}
-void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
+void JIT::emit_op_ret(Instruction* currentInstruction)
{
ASSERT(callFrameRegister != regT1);
ASSERT(regT1 != returnValueGPR);
@@ -281,33 +291,10 @@ void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
// Return the result in %eax.
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
- Jump notJSCell = emitJumpIfNotJSCell(returnValueGPR);
- loadPtr(Address(returnValueGPR, JSCell::structureOffset()), regT2);
- Jump notObject = emitJumpIfNotObject(regT2);
-
- // Grab the return address.
- emitGetReturnPCFromCallFrameHeaderPtr(regT1);
-
- // Restore our caller's "r".
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
- ret();
- // Return 'this' in %eax.
- notJSCell.link(this);
- notObject.link(this);
- emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueGPR);
-
- // Grab the return address.
- emitGetReturnPCFromCallFrameHeaderPtr(regT1);
-
- // Restore our caller's "r".
- emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
-
- // Return.
- restoreReturnAddressBeforeReturn(regT1);
+ checkStackPointerAlignment();
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -319,7 +306,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitGetVirtualRegister(src, regT0);
Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ addSlowCase(emitJumpIfCellObject(regT0));
isImm.link(this);
if (dst != src)
@@ -327,6 +314,13 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
}
+void JIT::emit_op_set_function_name(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ callOperation(operationSetFunctionName, regT0, regT1);
+}
+
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat);
@@ -350,15 +344,16 @@ void JIT::emit_op_not(Instruction* currentInstruction)
void JIT::emit_op_jfalse(Instruction* currentInstruction)
{
unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target);
- Jump isNonZero = emitJumpIfImmediateInteger(regT0);
+ GPRReg value = regT0;
+ GPRReg result = regT1;
+ GPRReg scratch = regT2;
+ bool shouldCheckMasqueradesAsUndefined = true;
- addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target);
- addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))));
+ emitGetVirtualRegister(currentInstruction[1].u.operand, value);
+ emitConvertValueToBoolean(JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
- isNonZero.link(this);
+ addJump(branchTest32(Zero, result), target);
}
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
@@ -370,8 +365,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump masqueradesGlobalObjectIsForeign = jump();
@@ -393,8 +388,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
@@ -414,179 +409,58 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
unsigned target = currentInstruction[3].u.operand;
emitGetVirtualRegister(src, regT0);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target);
+ CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr)));
+ store32(TrustedImm32(1), &currentInstruction[4].u.operand);
+ addJump(jump(), target);
+ equal.link(this);
}
void JIT::emit_op_eq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
compare32(Equal, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_jtrue(Instruction* currentInstruction)
{
unsigned target = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
-
- Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0))));
- addJump(emitJumpIfImmediateInteger(regT0), target);
-
- addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target);
- addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))));
- isZero.link(this);
+ GPRReg value = regT0;
+ GPRReg result = regT1;
+ GPRReg scratch = regT2;
+ bool shouldCheckMasqueradesAsUndefined = true;
+ emitGetVirtualRegister(currentInstruction[1].u.operand, value);
+ emitConvertValueToBoolean(JSValueRegs(value), result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
+ addJump(branchTest32(NonZero, result), target);
}
void JIT::emit_op_neq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
compare32(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
-
-}
+ emitTagBool(regT0);
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- xor64(regT1, regT0);
- emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
-}
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
- or64(regT1, regT0);
- emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_throw(Instruction* currentInstruction)
{
ASSERT(regT0 == returnValueGPR);
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
callOperationNoExceptionCheck(operationThrow, regT0);
jumpToExceptionHandler();
}
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitGetVirtualRegister(base, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(emitJumpIfNotJSCell(regT0));
- if (base != m_codeBlock->thisRegister().offset() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- isNotObject.append(emitJumpIfNotObject(regT2));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- callOperation(operationGetPNames, regT0);
- emitStoreCell(dst, returnValueGPR);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store64(tagTypeNumberRegister, addressFor(i));
- store32(TrustedImm32(Int32Tag), intTagFor(size));
- store32(regT3, intPayloadFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- move(regT0, regT1);
- and32(TrustedImm32(~TagBitUndefined), regT1);
- addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget);
- callOperation(operationToObject, base, regT0);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(intPayloadFor(i), regT0);
- Jump end = branch32(Equal, regT0, intPayloadFor(size));
-
- // Grab key @ i
- loadPtr(addressFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
-
- load64(BaseIndex(regT2, regT0, TimesEight), regT2);
-
- emitPutVirtualRegister(dst, regT2);
-
- // Increment i
- add32(TrustedImm32(1), regT0);
- store32(regT0, intPayloadFor(i));
-
- // Verify that i is valid:
- emitGetVirtualRegister(base, regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- load64(Address(regT2, Structure::prototypeOffset()), regT2);
- callHasProperty.append(emitJumpIfNotJSCell(regT2));
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(TrustedImm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- emitGetVirtualRegister(dst, regT1);
- callOperation(operationHasProperty, regT0, regT1);
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
-}
-
void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- callOperation(operationPushWithScope, regT0);
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- callOperation(operationPopScope);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope);
+ slowPathCall.call();
}
void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
@@ -604,18 +478,18 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// Jump slow if either is a double. First test if it's an integer, which is fine, and then test
// if it's a double.
- Jump leftOK = emitJumpIfImmediateInteger(regT0);
- addSlowCase(emitJumpIfImmediateNumber(regT0));
+ Jump leftOK = emitJumpIfInt(regT0);
+ addSlowCase(emitJumpIfNumber(regT0));
leftOK.link(this);
- Jump rightOK = emitJumpIfImmediateInteger(regT1);
- addSlowCase(emitJumpIfImmediateNumber(regT1));
+ Jump rightOK = emitJumpIfInt(regT1);
+ addSlowCase(emitJumpIfNumber(regT1));
rightOK.link(this);
if (type == OpStrictEq)
compare64(Equal, regT1, regT0, regT0);
else
compare64(NotEqual, regT1, regT0, regT0);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
@@ -632,27 +506,70 @@ void JIT::emit_op_nstricteq(Instruction* currentInstruction)
void JIT::emit_op_to_number(Instruction* currentInstruction)
{
+ int dstVReg = currentInstruction[1].u.operand;
int srcVReg = currentInstruction[2].u.operand;
emitGetVirtualRegister(srcVReg, regT0);
- addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ addSlowCase(emitJumpIfNotNumber(regT0));
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitValueProfilingSite();
+ if (srcVReg != dstVReg)
+ emitPutVirtualRegister(dstVReg);
}
-void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
+void JIT::emit_op_to_string(Instruction* currentInstruction)
{
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand);
+ int srcVReg = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(srcVReg, regT0);
+
+ addSlowCase(emitJumpIfNotJSCell(regT0));
+ addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_catch(Instruction* currentInstruction)
{
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+
+ move(TrustedImmPtr(m_vm), regT3);
+ load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
+ storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+
+ callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
+ Jump isCatchableException = branchTest32(Zero, returnValueGPR);
+ jumpToExceptionHandler();
+ isCatchableException.link(this);
+
move(TrustedImmPtr(m_vm), regT3);
- load64(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister);
load64(Address(regT3, VM::exceptionOffset()), regT0);
store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset()));
emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+ load64(Address(regT0, Exception::valueOffset()), regT0);
+ emitPutVirtualRegister(currentInstruction[2].u.operand);
+}
+
+void JIT::emit_op_assert(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
+{
+ int currentScope = currentInstruction[2].u.operand;
+ emitGetVirtualRegister(currentScope, regT0);
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
@@ -664,7 +581,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
emitGetVirtualRegister(scrutinee, regT0);
callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
@@ -680,7 +597,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
emitGetVirtualRegister(scrutinee, regT0);
callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
@@ -702,12 +619,6 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
jump(returnValueGPR);
}
-void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
-{
- move(TrustedImm64(JSValue::encode(m_codeBlock->getConstant(currentInstruction[1].u.operand))), regT0);
- callOperation(operationThrowStaticError, regT0, currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_debug(Instruction* currentInstruction)
{
load32(codeBlock()->debuggerRequestsAddress(), regT0);
@@ -724,12 +635,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(Equal, regT0, regT2, regT0);
@@ -743,7 +654,7 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
@@ -756,12 +667,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(NotEqual, regT0, regT2, regT0);
@@ -775,53 +686,30 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagAsBoolImmediate(regT0);
+ emitTagBool(regT0);
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_enter(Instruction* currentInstruction)
+void JIT::emit_op_enter(Instruction*)
{
- emitEnterOptimizationCheck();
-
// Even though CTI doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
// object lifetime and increasing GC pressure.
size_t count = m_codeBlock->m_numVars;
- for (size_t j = 0; j < count; ++j)
+ for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j)
emitInitRegister(virtualRegisterForLocal(j).offset());
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter);
- slowPathCall.call();
-}
-
-void JIT::emit_op_create_activation(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
-
- Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
- callOperation(operationCreateActivation, 0);
- emitStoreCell(dst, returnValueGPR);
- activationCreated.link(this);
-}
-
-void JIT::emit_op_create_arguments(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
+ emitWriteBarrier(m_codeBlock);
- Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
-
- callOperation(operationCreateArguments);
- emitStoreCell(dst, returnValueGPR);
- emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)), returnValueGPR);
-
- argsCreated.link(this);
+ emitEnterOptimizationCheck();
}
-void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+void JIT::emit_op_get_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
-
- store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst));
+ emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0);
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
+ emitStoreCell(dst, regT0);
}
void JIT::emit_op_to_this(Instruction* currentInstruction)
@@ -830,74 +718,73 @@ void JIT::emit_op_to_this(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT1);
emitJumpSlowCaseIfNotJSCell(regT1);
- loadPtr(Address(regT1, JSCell::structureOffset()), regT0);
- addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
loadPtr(cachedStructure, regT2);
- addSlowCase(branchPtr(NotEqual, regT0, regT2));
-}
-
-void JIT::emit_op_get_callee(Instruction* currentInstruction)
-{
- int result = currentInstruction[1].u.operand;
- WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[2].u.jsCell;
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
-
- loadPtr(cachedFunction, regT2);
- addSlowCase(branchPtr(NotEqual, regT0, regT2));
-
- emitPutVirtualRegister(result);
-}
-
-void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee);
- slowPathCall.call();
+ addSlowCase(branchTestPtr(Zero, regT2));
+ load32(Address(regT2, Structure::structureIDOffset()), regT2);
+ addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
}
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
RegisterID calleeReg = regT0;
+ RegisterID rareDataReg = regT4;
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID structureReg = regT2;
+ RegisterID cachedFunctionReg = regT4;
RegisterID scratchReg = regT3;
emitGetVirtualRegister(callee, calleeReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ addSlowCase(branch8(NotEqual, Address(calleeReg, JSCell::typeInfoTypeOffset()), TrustedImm32(JSFunctionType)));
+ loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
+ addSlowCase(branchTestPtr(Zero, rareDataReg));
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
addSlowCase(branchTestPtr(Zero, allocatorReg));
- emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
+ loadPtr(cachedFunction, cachedFunctionReg);
+ Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
+ addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
+ hasSeenMultipleCallees.link(this);
+
+ JumpList slowCases;
+ emitAllocateJSObject(resultReg, nullptr, allocatorReg, structureReg, TrustedImmPtr(0), scratchReg, slowCases);
+ emitGetVirtualRegister(callee, scratchReg);
+ loadPtr(Address(scratchReg, JSFunction::offsetOfRareData()), scratchReg);
+ load32(Address(scratchReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfInlineCapacity()), scratchReg);
+ emitInitializeInlineStorage(resultReg, scratchReg);
+ addSlowCase(slowCases);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkSlowCase(iter); // Callee::m_type != JSFunctionType.
+ linkSlowCase(iter); // doesn't have rare data
linkSlowCase(iter); // doesn't have an allocation profile
- linkSlowCase(iter); // allocation failed
+ linkSlowCase(iter); // allocation failed (no allocator)
+ linkSlowCase(iter); // allocation failed (allocator empty)
+ linkSlowCase(iter); // cached function didn't match
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
slowPathCall.call();
}
-void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+void JIT::emit_op_check_tdz(Instruction* currentInstruction)
{
- Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- callOperation(operationProfileWillCall, regT0);
- profilerDone.link(this);
+ addSlowCase(branchTest64(Zero, regT0));
}
-void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- callOperation(operationProfileDidCall, regT0);
- profilerDone.link(this);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error);
+ slowPathCall.call();
}
@@ -908,6 +795,7 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
+ linkSlowCase(iter);
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this);
slowPathCall.call();
@@ -929,39 +817,11 @@ void JIT::emitSlow_op_not(Instruction* currentInstruction, Vector<SlowCaseEntry>
slowPathCall.call();
}
-void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- callOperation(operationConvertJSValueToBoolean, regT0);
- emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), currentInstruction[2].u.operand); // inverted!
-}
-
-void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- callOperation(operationConvertJSValueToBoolean, regT0);
- emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), currentInstruction[2].u.operand);
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
- slowPathCall.call();
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
- slowPathCall.call();
-}
-
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
callOperation(operationCompareEq, regT0, regT1);
- emitTagAsBoolImmediate(returnValueGPR);
+ emitTagBool(returnValueGPR);
emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
@@ -970,7 +830,7 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
callOperation(operationCompareEq, regT0, regT1);
xor32(TrustedImm32(0x1), regT0);
- emitTagAsBoolImmediate(returnValueGPR);
+ emitTagBool(returnValueGPR);
emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
@@ -992,33 +852,35 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
slowPathCall.call();
}
-void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
- int baseVal = currentInstruction[3].u.operand;
+ int proto = currentInstruction[3].u.operand;
- linkSlowCaseIfNotJSCell(iter, baseVal);
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCase(iter);
linkSlowCase(iter);
emitGetVirtualRegister(value, regT0);
- emitGetVirtualRegister(baseVal, regT1);
- callOperation(operationCheckHasInstance, dst, regT0, regT1);
-
- emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
+ emitGetVirtualRegister(proto, regT1);
+ callOperation(operationInstanceOf, dst, regT0, regT1);
}
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
- int proto = currentInstruction[3].u.operand;
+ int constructor = currentInstruction[3].u.operand;
+ int hasInstanceValue = currentInstruction[4].u.operand;
- linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
emitGetVirtualRegister(value, regT0);
- emitGetVirtualRegister(proto, regT1);
- callOperation(operationInstanceOf, dst, regT0, regT1);
+ emitGetVirtualRegister(constructor, regT1);
+ emitGetVirtualRegister(hasInstanceValue, regT2);
+ callOperation(operationInstanceOfCustom, regT0, regT1, regT2);
+ emitTagBool(returnValueGPR);
+ emitPutVirtualRegister(dst, returnValueGPR);
}
void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -1029,125 +891,69 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCase
slowPathCall.call();
}
-void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
- sub32(TrustedImm32(1), regT0);
- emitFastArithReTagImmediate(regT0, regT0);
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- callOperation(operationGetArgumentsLength, dst, base);
-}
-
-void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
- emitGetVirtualRegister(property, regT1);
- addSlowCase(emitJumpIfNotImmediateInteger(regT1));
- add32(TrustedImm32(1), regT1);
- // regT1 now contains the integer index of the argument we want, including this
- emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2);
- addSlowCase(branch32(AboveOrEqual, regT1, regT2));
-
- signExtend32ToPtr(regT1, regT1);
- load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(dst, regT0);
-}
+ linkSlowCase(iter); // Not JSCell.
+ linkSlowCase(iter); // Not JSString.
-void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int arguments = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- Jump skipArgumentsCreation = jump();
-
- linkSlowCase(iter);
- linkSlowCase(iter);
- callOperation(operationCreateArguments);
- emitStoreCell(arguments, returnValueGPR);
- emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)), returnValueGPR);
-
- skipArgumentsCreation.link(this);
- emitGetVirtualRegister(arguments, regT0);
- emitGetVirtualRegister(property, regT1);
- callOperation(WithProfile, operationGetByValGeneric, dst, regT0, regT1);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string);
+ slowPathCall.call();
}
#endif // USE(JSVALUE64)
-void JIT::emit_op_touch_entry(Instruction* currentInstruction)
-{
- if (m_codeBlock->symbolTable()->m_functionEnteredOnce.hasBeenInvalidated())
- return;
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_touch_entry);
- slowPathCall.call();
-}
-
void JIT::emit_op_loop_hint(Instruction*)
{
// Emit the JIT optimization check:
if (canBeOptimized()) {
- if (Options::enableOSREntryInLoops()) {
- addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
- AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
- } else {
- // Add with saturation.
- move(TrustedImmPtr(m_codeBlock->addressOfJITExecuteCounter()), regT3);
- load32(regT3, regT2);
- Jump dontAdd = branch32(
- GreaterThan, regT2,
- TrustedImm32(std::numeric_limits<int32_t>::max() - Options::executionCounterIncrementForLoop()));
- add32(TrustedImm32(Options::executionCounterIncrementForLoop()), regT2);
- store32(regT2, regT3);
- dontAdd.link(this);
- }
+ addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
+ AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
}
-
- // Emit the watchdog timer check:
- if (m_vm->watchdog.isEnabled())
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress())));
}
void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
{
#if ENABLE(DFG_JIT)
// Emit the slow path for the JIT optimization check:
- if (canBeOptimized() && Options::enableOSREntryInLoops()) {
+ if (canBeOptimized()) {
linkSlowCase(iter);
-
+
+ copyCalleeSavesFromFrameOrRegisterToVMEntryFrameCalleeSavesBuffer();
+
callOperation(operationOptimize, m_bytecodeOffset);
Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
+ if (!ASSERT_DISABLED) {
+ Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
+ abortWithReason(JITUnreasonableLoopHintJumpTarget);
+ ok.link(this);
+ }
jump(returnValueGPR);
noOptimizedEntry.link(this);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
}
+#else
+ UNUSED_PARAM(iter);
#endif
+}
- // Emit the slow path of the watchdog timer check:
- if (m_vm->watchdog.isEnabled()) {
- linkSlowCase(iter);
- callOperation(operationHandleWatchdogTimer);
+void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_static_error);
+ slowPathCall.call();
+}
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
- }
+void JIT::emit_op_watchdog(Instruction*)
+{
+ ASSERT(m_vm->watchdog());
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog()->timerDidFireAddress())));
+}
+void JIT::emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
+{
+ ASSERT(m_vm->watchdog());
+ linkSlowCase(iter);
+ callOperation(operationHandleWatchdogTimer);
}
void JIT::emit_op_new_regexp(Instruction* currentInstruction)
@@ -1155,36 +961,88 @@ void JIT::emit_op_new_regexp(Instruction* currentInstruction)
callOperation(operationNewRegexp, currentInstruction[1].u.operand, m_codeBlock->regexp(currentInstruction[2].u.operand));
}
-void JIT::emit_op_new_func(Instruction* currentInstruction)
+void JIT::emitNewFuncCommon(Instruction* currentInstruction)
{
Jump lazyJump;
int dst = currentInstruction[1].u.operand;
- if (currentInstruction[3].u.operand) {
-#if USE(JSVALUE32_64)
- lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
+
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
#else
- lazyJump = branchTest64(NonZero, addressFor(dst));
+ emitLoadPayload(currentInstruction[2].u.operand, regT0);
#endif
+ FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand);
+
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
+ if (opcodeID == op_new_func)
+ callOperation(operationNewFunction, dst, regT0, funcExec);
+ else if (opcodeID == op_new_generator_func)
+ callOperation(operationNewGeneratorFunction, dst, regT0, funcExec);
+ else {
+ ASSERT(opcodeID == op_new_async_func);
+ callOperation(operationNewAsyncFunction, dst, regT0, funcExec);
}
+}
- FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand);
- callOperation(operationNewFunction, dst, funcExec);
+void JIT::emit_op_new_func(Instruction* currentInstruction)
+{
+ emitNewFuncCommon(currentInstruction);
+}
- if (currentInstruction[3].u.operand)
- lazyJump.link(this);
+void JIT::emit_op_new_generator_func(Instruction* currentInstruction)
+{
+ emitNewFuncCommon(currentInstruction);
}
-void JIT::emit_op_new_captured_func(Instruction* currentInstruction)
+void JIT::emit_op_new_async_func(Instruction* currentInstruction)
{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_captured_func);
- slowPathCall.call();
+ emitNewFuncCommon(currentInstruction);
}
-void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+void JIT::emitNewFuncExprCommon(Instruction* currentInstruction)
{
+ Jump notUndefinedScope;
int dst = currentInstruction[1].u.operand;
- FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[2].u.operand);
- callOperation(operationNewFunction, dst, funcExpr);
+#if USE(JSVALUE64)
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined())));
+ store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst));
+#else
+ emitLoadPayload(currentInstruction[2].u.operand, regT0);
+ notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag));
+ emitStore(dst, jsUndefined());
+#endif
+ Jump done = jump();
+ notUndefinedScope.link(this);
+
+ FunctionExecutable* function = m_codeBlock->functionExpr(currentInstruction[3].u.operand);
+ OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
+
+ if (opcodeID == op_new_func_exp)
+ callOperation(operationNewFunction, dst, regT0, function);
+ else if (opcodeID == op_new_generator_func_exp)
+ callOperation(operationNewGeneratorFunction, dst, regT0, function);
+ else {
+ ASSERT(opcodeID == op_new_async_func_exp);
+ callOperation(operationNewAsyncFunction, dst, regT0, function);
+ }
+
+ done.link(this);
+}
+
+void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
+{
+ emitNewFuncExprCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_generator_func_exp(Instruction* currentInstruction)
+{
+ emitNewFuncExprCommon(currentInstruction);
+}
+
+void JIT::emit_op_new_async_func_exp(Instruction* currentInstruction)
+{
+ emitNewFuncExprCommon(currentInstruction);
}
void JIT::emit_op_new_array(Instruction* currentInstruction)
@@ -1221,16 +1079,445 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size);
}
-void JIT::emitSlow_op_captured_mov(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_new_array_with_spread(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_array_with_spread);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_spread(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_spread);
+ slowPathCall.call();
+}
+
+#if USE(JSVALUE64)
+void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int enumerator = currentInstruction[4].u.operand;
+
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+ Jump done = jump();
+
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+
+ patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+
+ patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
+}
+
+void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
+
+ emitGetVirtualRegisters(base, regT0, property, regT1);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT2);
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
+
+ move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+
+ Label done = label();
+
+ emitPutVirtualRegister(dst);
+
+ Label nextHotPath = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
+}
+
+void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
+
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ Label slowPath = label();
+
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(property, regT1);
+ Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
+}
+
+void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int index = currentInstruction[4].u.operand;
+ int enumerator = currentInstruction[5].u.operand;
+
+ // Check that base is a cell
+ emitGetVirtualRegister(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Check the structure
+ emitGetVirtualRegister(enumerator, regT2);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT1);
+ addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ // Compute the offset
+ emitGetVirtualRegister(index, regT1);
+ // If index is less than the enumerator's cached inline storage, then it's an inline access
+ Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
+ signExtend32ToPtr(regT1, regT1);
+ load64(BaseIndex(regT0, regT1, TimesEight), regT0);
+
+ Jump done = jump();
+
+ // Otherwise it's out of line
+ outOfLineAccess.link(this);
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1);
+ neg32(regT1);
+ signExtend32ToPtr(regT1, regT1);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0);
+
+ done.link(this);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet;
- if (!set || set->state() == IsInvalidated)
- return;
+ int base = currentInstruction[2].u.operand;
+ linkSlowCaseIfNotJSCell(iter, base);
linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_captured_mov);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname);
slowPathCall.call();
}
+void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(index, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
+
+ move(TrustedImm64(JSValue::encode(jsNull())), regT0);
+
+ Jump done = jump();
+ inBounds.link(this);
+
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ signExtend32ToPtr(regT0, regT0);
+ load64(BaseIndex(regT1, regT0, TimesEight), regT0);
+
+ done.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(index, regT0);
+ emitGetVirtualRegister(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
+
+ move(TrustedImm64(JSValue::encode(jsNull())), regT0);
+
+ Jump done = jump();
+ inBounds.link(this);
+
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ signExtend32ToPtr(regT0, regT0);
+ load64(BaseIndex(regT1, regT0, TimesEight), regT0);
+
+ done.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emit_op_profile_type(Instruction* currentInstruction)
+{
+ TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
+ int valueToProfile = currentInstruction[1].u.operand;
+
+ emitGetVirtualRegister(valueToProfile, regT0);
+
+ JumpList jumpToEnd;
+
+ jumpToEnd.append(branchTest64(Zero, regT0));
+
+ // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
+ // These typechecks are inlined to match those of the 64-bit JSValue type checks.
+ if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
+ jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined()))));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNull)
+ jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))));
+ else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) {
+ move(regT0, regT1);
+ and64(TrustedImm32(~1), regT1);
+ jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse)));
+ } else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt)
+ jumpToEnd.append(emitJumpIfInt(regT0));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
+ jumpToEnd.append(emitJumpIfNumber(regT0));
+ else if (cachedTypeLocation->m_lastSeenType == TypeString) {
+ Jump isNotCell = emitJumpIfNotJSCell(regT0);
+ jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+ isNotCell.link(this);
+ }
+
+ // Load the type profiling log into T2.
+ TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
+ move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
+ // Load the next log entry into T1.
+ loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
+
+ // Store the JSValue onto the log entry.
+ store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset()));
+
+ // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry.
+ Jump notCell = emitJumpIfNotJSCell(regT0);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ Jump skipIsCell = jump();
+ notCell.link(this);
+ store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ skipIsCell.link(this);
+
+ // Store the typeLocation on the log entry.
+ move(TrustedImmPtr(cachedTypeLocation), regT0);
+ store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
+
+ // Increment the current log entry.
+ addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
+ store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
+ Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
+ // Clear the log if we're at the end of the log.
+ callOperation(operationProcessTypeProfilerLog);
+ skipClearLog.link(this);
+
+ jumpToEnd.link(this);
+}
+
+void JIT::emit_op_log_shadow_chicken_prologue(Instruction* currentInstruction)
+{
+ updateTopCallFrame();
+ static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
+ GPRReg shadowPacketReg = regT0;
+ GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
+ GPRReg scratch2Reg = regT2;
+ ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT3);
+ logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3);
+}
+
+void JIT::emit_op_log_shadow_chicken_tail(Instruction* currentInstruction)
+{
+ updateTopCallFrame();
+ static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
+ GPRReg shadowPacketReg = regT0;
+ GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
+ GPRReg scratch2Reg = regT2;
+ ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT2);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT3);
+ logShadowChickenTailPacket(shadowPacketReg, JSValueRegs(regT2), regT3, m_codeBlock, CallSiteIndex(m_bytecodeOffset));
+}
+
+#endif // USE(JSVALUE64)
+
+void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length);
+ slowPathCall.call();
+}
+
+void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_has_generic_property(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_to_index_string(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_profile_control_flow(Instruction* currentInstruction)
+{
+ BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
+#if USE(JSVALUE64)
+ basicBlockLocation->emitExecuteCode(*this);
+#else
+ basicBlockLocation->emitExecuteCode(*this, regT0);
+#endif
+}
+
+void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_create_cloned_arguments(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_cloned_arguments);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_argument_count(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ load32(payloadFor(CallFrameSlot::argumentCount), regT0);
+ sub32(TrustedImm32(1), regT0);
+ JSValueRegs result = JSValueRegs::withTwoAvailableRegs(regT0, regT1);
+ boxInt32(regT0, result);
+ emitPutVirtualRegister(dst, result);
+}
+
+void JIT::emit_op_create_rest(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_rest);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_rest_length(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue;
+ load32(payloadFor(CallFrameSlot::argumentCount), regT0);
+ sub32(TrustedImm32(1), regT0);
+ Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip));
+ sub32(Imm32(numParamsToSkip), regT0);
+#if USE(JSVALUE64)
+ boxInt32(regT0, JSValueRegs(regT0));
+#endif
+ Jump done = jump();
+
+ zeroLength.link(this);
+#if USE(JSVALUE64)
+ move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0);
+#else
+ move(TrustedImm32(0), regT0);
+#endif
+
+ done.link(this);
+#if USE(JSVALUE64)
+ emitPutVirtualRegister(dst, regT0);
+#else
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0));
+#endif
+}
+
+void JIT::emit_op_get_argument(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+#if USE(JSVALUE64)
+ JSValueRegs resultRegs(regT0);
+#else
+ JSValueRegs resultRegs(regT1, regT0);
+#endif
+
+ load32(payloadFor(CallFrameSlot::argumentCount), regT2);
+ Jump argumentOutOfBounds = branch32(LessThanOrEqual, regT2, TrustedImm32(index));
+ loadValue(addressFor(CallFrameSlot::thisArgument + index), resultRegs);
+ Jump done = jump();
+
+ argumentOutOfBounds.link(this);
+ moveValue(jsUndefined(), resultRegs);
+
+ done.link(this);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst, resultRegs);
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index 29e8880aa..b53b208cc 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -31,58 +31,46 @@
#include "JIT.h"
#include "CCallHelpers.h"
-#include "Debugger.h"
+#include "Exception.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "JSVariableObject.h"
+#include "JSPropertyNameEnumerator.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "Opcode.h"
#include "SlowPathCall.h"
+#include "TypeProfilerLog.h"
#include "VirtualRegister.h"
namespace JSC {
JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
{
+ // FIXME: This should be able to log ShadowChicken prologue packets.
+ // https://bugs.webkit.org/show_bug.cgi?id=155689
+
Call nativeCall;
- emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
storePtr(callFrameRegister, &m_vm->topCallFrame);
#if CPU(X86)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- emitGetCallerFrameFromCallFrameHeaderPtr(regT0);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- peek(regT1);
- emitPutReturnPCToCallFrameHeader(regT1);
-
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::ecx);
- subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
-
- move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ subPtr(TrustedImm32(8), stackPointerRegister); // Align stack for call.
+ storePtr(X86Registers::ecx, Address(stackPointerRegister));
// call the function
nativeCall = call();
- addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
-
-#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
- emitGetCallerFrameFromCallFrameHeaderPtr(regT2);
- emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
- emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
-
- preserveReturnAddressAfterCall(regT3); // Callee preserved
- emitPutReturnPCToCallFrameHeader(regT3);
+ addPtr(TrustedImm32(8), stackPointerRegister);
+#elif CPU(ARM) || CPU(MIPS)
#if CPU(MIPS)
// Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
subPtr(TrustedImm32(16), stackPointerRegister);
@@ -92,8 +80,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
// Host function signature is f(ExecState*).
move(callFrameRegister, argumentGPR0);
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, argumentGPR1);
- move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, argumentGPR1);
loadPtr(Address(argumentGPR1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
// call the function
@@ -107,28 +94,25 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
restoreReturnAddressBeforeReturn(regT3);
#else
#error "JIT not supported on this platform."
- breakpoint();
+ abortWithReason(JITNotSupported);
#endif // CPU(X86)
// Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(vm->addressOfException()), TrustedImm32(0));
+ emitFunctionEpilogue();
// Return.
ret();
// Handle an exception
sawException.link(this);
- // Grab the return address.
- preserveReturnAddressAfterCall(regT1);
-
- move(TrustedImmPtr(&vm->exceptionLocation), regT2);
- storePtr(regT1, regT2);
storePtr(callFrameRegister, &m_vm->topCallFrame);
#if CPU(X86)
- addPtr(TrustedImm32(-12), stackPointerRegister);
- push(callFrameRegister);
+ addPtr(TrustedImm32(-4), stackPointerRegister);
+ move(callFrameRegister, X86Registers::ecx);
+ push(X86Registers::ecx);
#else
move(callFrameRegister, argumentGPR0);
#endif
@@ -136,13 +120,13 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
call(regT3);
#if CPU(X86)
- addPtr(TrustedImm32(16), stackPointerRegister);
+ addPtr(TrustedImm32(8), stackPointerRegister);
#endif
jumpToExceptionHandler();
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
patchBuffer.link(nativeCall, FunctionPtr(func));
return FINALIZE_CODE(patchBuffer, ("JIT CTI native call"));
@@ -161,21 +145,12 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
}
}
-void JIT::emit_op_captured_mov(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
- emitLoad(src, regT1, regT0);
- emitNotifyWrite(regT1, regT0, regT2, currentInstruction[3].u.watchpointSet);
- emitStore(dst, regT1, regT0);
-}
-
void JIT::emit_op_end(Instruction* currentInstruction)
{
ASSERT(returnValueGPR != callFrameRegister);
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset()));
+ emitLoad(currentInstruction[1].u.operand, regT1, returnValueGPR);
+ emitRestoreCalleeSaves();
+ emitFunctionEpilogue();
ret();
}
@@ -189,38 +164,57 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
{
Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
- MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
+ MarkedAllocator* allocator = subspaceFor<JSFinalObject>(*m_vm)->allocatorFor(allocationSize);
- RegisterID resultReg = regT0;
+ RegisterID resultReg = returnValueGPR;
RegisterID allocatorReg = regT1;
- RegisterID scratchReg = regT2;
+ RegisterID scratchReg = regT3;
move(TrustedImmPtr(allocator), allocatorReg);
- emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg);
+ if (allocator)
+ addSlowCase(Jump());
+ JumpList slowCases;
+ emitAllocateJSObject(resultReg, allocator, allocatorReg, TrustedImmPtr(structure), TrustedImmPtr(0), scratchReg, slowCases);
+ emitInitializeInlineStorage(resultReg, structure->inlineCapacity());
+ addSlowCase(slowCases);
emitStoreCell(currentInstruction[1].u.operand, resultReg);
}
void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
+ linkSlowCase(iter);
int dst = currentInstruction[1].u.operand;
Structure* structure = currentInstruction[3].u.objectAllocationProfile->structure();
callOperation(operationNewObject, structure);
emitStoreCell(dst, returnValueGPR);
}
-void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
{
- int baseVal = currentInstruction[3].u.operand;
+ int dst = currentInstruction[1].u.operand;
+ int constructor = currentInstruction[2].u.operand;
+ int hasInstanceValue = currentInstruction[3].u.operand;
- emitLoadPayload(baseVal, regT0);
+ emitLoadPayload(hasInstanceValue, regT0);
+ // We don't jump if we know what Symbol.hasInstance would do.
+ Jump hasInstanceValueNotCell = emitJumpIfNotJSCell(hasInstanceValue);
+ Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
+
+ // We know that constructor is an object from the way bytecode is emitted for instanceof expressions.
+ emitLoadPayload(constructor, regT0);
+
+ // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
+ test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
+ Jump done = jump();
+
+ hasInstanceValueNotCell.link(this);
+ customhasInstanceValue.link(this);
+ move(TrustedImm32(1), regT0);
+
+ done.link(this);
+ emitStoreBool(dst, regT0);
- // Check that baseVal is a cell.
- emitJumpSlowCaseIfNotJSCell(baseVal);
-
- // Check that baseVal 'ImplementsHasInstance'.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
- addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -234,13 +228,12 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitLoadPayload(value, regT2);
emitLoadPayload(proto, regT1);
- // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
+ // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
emitJumpSlowCaseIfNotJSCell(value);
emitJumpSlowCaseIfNotJSCell(proto);
// Check that prototype is an object
- loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
- addSlowCase(emitJumpIfNotObject(regT3));
+ addSlowCase(emitJumpIfCellNotObject(regT1));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -248,9 +241,11 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
move(TrustedImm32(1), regT0);
Label loop(this);
+ addSlowCase(branch8(Equal, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(ProxyObjectType)));
+
// Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ loadPtr(Address(regT2, JSCell::structureIDOffset()), regT2);
load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
branchTest32(NonZero, regT2).linkTo(loop, this);
@@ -263,20 +258,10 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitStoreBool(dst, regT0);
}
-void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_instanceof_custom(Instruction*)
{
- int dst = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
- int baseVal = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, baseVal);
- linkSlowCase(iter);
-
- emitLoad(value, regT1, regT0);
- emitLoad(baseVal, regT3, regT2);
- callOperation(operationCheckHasInstance, dst, regT1, regT0, regT3, regT2);
-
- emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
+ // This always goes to slow path since we expect it to be rare.
+ addSlowCase(jump());
}
void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -288,12 +273,40 @@ void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCas
linkSlowCaseIfNotJSCell(iter, value);
linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
+ linkSlowCase(iter);
emitLoad(value, regT1, regT0);
emitLoad(proto, regT3, regT2);
callOperation(operationInstanceOf, dst, regT1, regT0, regT3, regT2);
}
+void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+ int constructor = currentInstruction[3].u.operand;
+ int hasInstanceValue = currentInstruction[4].u.operand;
+
+ linkSlowCase(iter);
+
+ emitLoad(value, regT1, regT0);
+ emitLoadPayload(constructor, regT2);
+ emitLoad(hasInstanceValue, regT4, regT3);
+ callOperation(operationInstanceOfCustom, regT1, regT0, regT2, regT4, regT3);
+ emitStoreBool(dst, returnValueGPR);
+}
+
+void JIT::emit_op_is_empty(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
+
+ emitLoad(value, regT1, regT0);
+ compare32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag), regT0);
+
+ emitStoreBool(dst, regT0);
+}
+
void JIT::emit_op_is_undefined(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -306,12 +319,12 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
Jump done = jump();
isCell.link(this);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump notMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
compare32(Equal, regT0, regT1, regT0);
@@ -342,44 +355,41 @@ void JIT::emit_op_is_number(Instruction* currentInstruction)
emitStoreBool(dst, regT0);
}
-void JIT::emit_op_is_string(Instruction* currentInstruction)
+void JIT::emit_op_is_cell_with_type(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
-
+ int type = currentInstruction[3].u.operand;
+
emitLoad(value, regT1, regT0);
Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
-
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
+
+ compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(type), regT0);
Jump done = jump();
-
+
isNotCell.link(this);
move(TrustedImm32(0), regT0);
-
+
done.link(this);
emitStoreBool(dst, regT0);
}
-void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
+void JIT::emit_op_is_object(Instruction* currentInstruction)
{
- int activation = currentInstruction[1].u.operand;
- Jump activationNotCreated = branch32(Equal, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
- emitLoadPayload(activation, regT0);
- callOperation(operationTearOffActivation, regT0);
- activationNotCreated.link(this);
-}
+ int dst = currentInstruction[1].u.operand;
+ int value = currentInstruction[2].u.operand;
-void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
-{
- VirtualRegister arguments = VirtualRegister(currentInstruction[1].u.operand);
- int activation = currentInstruction[2].u.operand;
+ emitLoad(value, regT1, regT0);
+ Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+
+ compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
+ Jump done = jump();
+
+ isNotCell.link(this);
+ move(TrustedImm32(0), regT0);
- Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(arguments).offset()), TrustedImm32(JSValue::EmptyValueTag));
- emitLoadPayload(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0);
- emitLoadPayload(activation, regT1);
- callOperation(operationTearOffArguments, regT0, regT1);
- argsNotCreated.link(this);
+ done.link(this);
+ emitStoreBool(dst, regT0);
}
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
@@ -390,7 +400,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ addSlowCase(emitJumpIfCellObject(regT0));
isImm.link(this);
if (dst != src)
@@ -405,6 +415,15 @@ void JIT::emitSlow_op_to_primitive(Instruction* currentInstruction, Vector<SlowC
slowPathCall.call();
}
+void JIT::emit_op_set_function_name(Instruction* currentInstruction)
+{
+ int func = currentInstruction[1].u.operand;
+ int name = currentInstruction[2].u.operand;
+ emitLoadPayload(func, regT1);
+ emitLoad(name, regT3, regT2);
+ callOperation(operationSetFunctionName, regT1, regT3, regT2);
+}
+
void JIT::emit_op_strcat(Instruction* currentInstruction)
{
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_strcat);
@@ -440,31 +459,13 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
emitLoad(cond, regT1, regT0);
- ASSERT((JSValue::BooleanTag + 1 == JSValue::Int32Tag) && !(JSValue::Int32Tag + 1));
- addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::BooleanTag)));
- addJump(branchTest32(Zero, regT0), target);
-}
-
-void JIT::emitSlow_op_jfalse(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- // regT1 contains the tag from the hot path.
- Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag));
-
- emitLoadDouble(cond, fpRegT0);
- emitJumpSlowToHot(branchDoubleZeroOrNaN(fpRegT0, fpRegT1), target);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jfalse));
+ JSValueRegs value(regT1, regT0);
+ GPRReg scratch = regT2;
+ GPRReg result = regT3;
+ bool shouldCheckMasqueradesAsUndefined = true;
+ emitConvertValueToBoolean(value, result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
- notNumber.link(this);
- }
-
- callOperation(operationConvertJSValueToBoolean, regT1, regT0);
- emitJumpSlowToHot(branchTest32(Zero, returnValueGPR), target); // Inverted.
+ addJump(branchTest32(Zero, result), target);
}
void JIT::emit_op_jtrue(Instruction* currentInstruction)
@@ -473,32 +474,13 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
unsigned target = currentInstruction[2].u.operand;
emitLoad(cond, regT1, regT0);
+ bool shouldCheckMasqueradesAsUndefined = true;
+ JSValueRegs value(regT1, regT0);
+ GPRReg scratch = regT2;
+ GPRReg result = regT3;
+ emitConvertValueToBoolean(value, result, scratch, fpRegT0, fpRegT1, shouldCheckMasqueradesAsUndefined, m_codeBlock->globalObject());
- ASSERT((JSValue::BooleanTag + 1 == JSValue::Int32Tag) && !(JSValue::Int32Tag + 1));
- addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::BooleanTag)));
- addJump(branchTest32(NonZero, regT0), target);
-}
-
-void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int cond = currentInstruction[1].u.operand;
- unsigned target = currentInstruction[2].u.operand;
-
- linkSlowCase(iter);
-
- if (supportsFloatingPoint()) {
- // regT1 contains the tag from the hot path.
- Jump notNumber = branch32(Above, regT1, TrustedImm32(JSValue::LowestTag));
-
- emitLoadDouble(cond, fpRegT0);
- emitJumpSlowToHot(branchDoubleNonZero(fpRegT0, fpRegT1), target);
- emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_jtrue));
-
- notNumber.link(this);
- }
-
- callOperation(operationConvertJSValueToBoolean, regT1, regT0);
- emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), target);
+ addJump(branchTest32(NonZero, result), target);
}
void JIT::emit_op_jeq_null(Instruction* currentInstruction)
@@ -510,9 +492,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump masqueradesGlobalObjectIsForeign = jump();
@@ -536,9 +517,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
@@ -560,8 +540,12 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
unsigned target = currentInstruction[3].u.operand;
emitLoad(src, regT1, regT0);
- addJump(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)), target);
- addJump(branchPtr(NotEqual, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr))), target);
+ CCallHelpers::Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ CCallHelpers::Jump equal = branchPtr(Equal, regT0, TrustedImmPtr(actualPointerFor(m_codeBlock, ptr)));
+ notCell.link(this);
+ store32(TrustedImm32(1), &currentInstruction[4].u.operand);
+ addJump(jump(), target);
+ equal.link(this);
}
void JIT::emit_op_eq(Instruction* currentInstruction)
@@ -592,8 +576,8 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>:
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
callOperation(operationCompareStringEq, regT0, regT2);
@@ -636,8 +620,8 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
callOperation(operationCompareStringEq, regT0, regT2);
@@ -665,12 +649,12 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
addSlowCase(branch32(NotEqual, regT1, regT3));
addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
- // Jump to a slow case if both are strings.
+ // Jump to a slow case if both are strings or symbols (non object).
Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
- addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ Jump firstIsObject = emitJumpIfCellObject(regT0);
+ addSlowCase(emitJumpIfCellNotObject(regT2));
notCell.link(this);
- firstNotString.link(this);
+ firstIsObject.link(this);
// Simply compare the payloads.
if (type == OpStrictEq)
@@ -719,12 +703,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
compare32(Equal, regT0, regT2, regT1);
@@ -750,12 +734,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
+ loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
compare32(NotEqual, regT0, regT2, regT1);
@@ -776,121 +760,16 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
void JIT::emit_op_throw(Instruction* currentInstruction)
{
ASSERT(regT0 == returnValueGPR);
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
emitLoad(currentInstruction[1].u.operand, regT1, regT0);
callOperationNoExceptionCheck(operationThrow, regT1, regT0);
jumpToExceptionHandler();
}
-void JIT::emit_op_get_pnames(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int breakTarget = currentInstruction[5].u.operand;
-
- JumpList isNotObject;
-
- emitLoad(base, regT1, regT0);
- if (!m_codeBlock->isKnownNotImmediate(base))
- isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- if (VirtualRegister(base) != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- isNotObject.append(emitJumpIfNotObject(regT2));
- }
-
- // We could inline the case where you have a valid cache, but
- // this call doesn't seem to be hot.
- Label isObject(this);
- callOperation(operationGetPNames, regT0);
- emitStoreCell(dst, returnValueGPR);
- load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
- store32(TrustedImm32(Int32Tag), intTagFor(i));
- store32(TrustedImm32(0), intPayloadFor(i));
- store32(TrustedImm32(Int32Tag), intTagFor(size));
- store32(regT3, payloadFor(size));
- Jump end = jump();
-
- isNotObject.link(this);
- addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget);
- addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget);
- callOperation(operationToObject, base, regT1, regT0);
- jump().linkTo(isObject, this);
-
- end.link(this);
-}
-
-void JIT::emit_op_next_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int i = currentInstruction[3].u.operand;
- int size = currentInstruction[4].u.operand;
- int it = currentInstruction[5].u.operand;
- int target = currentInstruction[6].u.operand;
-
- JumpList callHasProperty;
-
- Label begin(this);
- load32(intPayloadFor(i), regT0);
- Jump end = branch32(Equal, regT0, intPayloadFor(size));
-
- // Grab key @ i
- loadPtr(payloadFor(it), regT1);
- loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
- load32(BaseIndex(regT2, regT0, TimesEight), regT2);
- store32(TrustedImm32(JSValue::CellTag), tagFor(dst));
- store32(regT2, payloadFor(dst));
-
- // Increment i
- add32(TrustedImm32(1), regT0);
- store32(regT0, intPayloadFor(i));
-
- // Verify that i is valid:
- loadPtr(payloadFor(base), regT0);
-
- // Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
-
- // Test base's prototype chain
- loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
- loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
- addJump(branchTestPtr(Zero, Address(regT3)), target);
-
- Label checkPrototype(this);
- callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag)));
- loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
- loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
- callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
- addPtr(TrustedImm32(sizeof(Structure*)), regT3);
- branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
-
- // Continue loop.
- addJump(jump(), target);
-
- // Slow case: Ask the object if i is valid.
- callHasProperty.link(this);
- loadPtr(addressFor(dst), regT1);
- callOperation(operationHasProperty, regT0, regT1);
-
- // Test for valid key.
- addJump(branchTest32(NonZero, regT0), target);
- jump().linkTo(begin, this);
-
- // End of loop.
- end.link(this);
-}
-
void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- callOperation(operationPushWithScope, regT1, regT0);
-}
-
-void JIT::emit_op_pop_scope(Instruction*)
-{
- callOperation(operationPopScope);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope);
+ slowPathCall.call();
}
void JIT::emit_op_to_number(Instruction* currentInstruction)
@@ -904,6 +783,7 @@ void JIT::emit_op_to_number(Instruction* currentInstruction)
addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
isInt32.link(this);
+ emitValueProfilingSite();
if (src != dst)
emitStore(dst, regT1, regT0);
}
@@ -916,25 +796,81 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCase
slowPathCall.call();
}
-void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
+void JIT::emit_op_to_string(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+
+ if (src != dst)
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- emitLoad(currentInstruction[2].u.operand, regT1, regT0);
- callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT1, regT0, currentInstruction[3].u.operand);
+ linkSlowCase(iter); // Not JSCell.
+ linkSlowCase(iter); // Not JSString.
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string);
+ slowPathCall.call();
}
void JIT::emit_op_catch(Instruction* currentInstruction)
{
+ restoreCalleeSavesFromVMEntryFrameCalleeSavesBuffer();
+
move(TrustedImmPtr(m_vm), regT3);
// operationThrow returns the callFrame for the handler.
- load32(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister);
+ load32(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
+ storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
+
+ addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
+
+ callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
+ Jump isCatchableException = branchTest32(Zero, returnValueGPR);
+ jumpToExceptionHandler();
+ isCatchableException.link(this);
+
+ move(TrustedImmPtr(m_vm), regT3);
+
// Now store the exception returned by operationThrow.
- load32(Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- load32(Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- store32(TrustedImm32(JSValue().payload()), Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(TrustedImm32(JSValue().tag()), Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+ load32(Address(regT3, VM::exceptionOffset()), regT2);
+ move(TrustedImm32(JSValue::CellTag), regT1);
+
+ store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset()));
unsigned exception = currentInstruction[1].u.operand;
- emitStore(exception, regT1, regT0);
+ emitStore(exception, regT1, regT2);
+
+ load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+
+ unsigned thrownValue = currentInstruction[2].u.operand;
+ emitStore(thrownValue, regT1, regT0);
+}
+
+void JIT::emit_op_assert(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
+{
+ int currentScope = currentInstruction[2].u.operand;
+ emitLoadPayload(currentScope, regT0);
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ emitStoreCell(currentInstruction[1].u.operand, regT0);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
@@ -946,7 +882,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
emitLoad(scrutinee, regT1, regT0);
callOperation(operationSwitchImmWithUnknownKeyType, regT1, regT0, tableIndex);
@@ -962,7 +898,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
+ jumpTable->ensureCTITable();
emitLoad(scrutinee, regT1, regT0);
callOperation(operationSwitchCharWithUnknownKeyType, regT1, regT0, tableIndex);
@@ -984,12 +920,6 @@ void JIT::emit_op_switch_string(Instruction* currentInstruction)
jump(returnValueGPR);
}
-void JIT::emit_op_throw_static_error(Instruction* currentInstruction)
-{
- emitLoad(m_codeBlock->getConstant(currentInstruction[1].u.operand), regT1, regT0);
- callOperation(operationThrowStaticError, regT1, regT0, currentInstruction[2].u.operand);
-}
-
void JIT::emit_op_debug(Instruction* currentInstruction)
{
load32(codeBlock()->debuggerRequestsAddress(), regT0);
@@ -1013,77 +943,53 @@ void JIT::emit_op_enter(Instruction* currentInstruction)
slowPathCall.call();
}
-void JIT::emit_op_create_activation(Instruction* currentInstruction)
-{
- int activation = currentInstruction[1].u.operand;
-
- Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
- callOperation(operationCreateActivation, 0);
- emitStoreCell(activation, returnValueGPR);
- activationCreated.link(this);
-}
-
-void JIT::emit_op_create_arguments(Instruction* currentInstruction)
+void JIT::emit_op_get_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
-
- Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
- callOperation(operationCreateArguments);
- emitStoreCell(dst, returnValueGPR);
- emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)).offset(), returnValueGPR);
- argsCreated.link(this);
-}
-
-void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
-
- emitStore(dst, JSValue());
-}
-
-void JIT::emit_op_get_callee(Instruction* currentInstruction)
-{
- int result = currentInstruction[1].u.operand;
- WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[2].u.jsCell;
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
-
- loadPtr(cachedFunction, regT2);
- addSlowCase(branchPtr(NotEqual, regT0, regT2));
-
- move(TrustedImm32(JSValue::CellTag), regT1);
- emitStore(result, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee);
- slowPathCall.call();
+ emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, regT0);
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
+ emitStoreCell(dst, regT0);
}
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
RegisterID calleeReg = regT0;
+ RegisterID rareDataReg = regT4;
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID structureReg = regT2;
+ RegisterID cachedFunctionReg = regT4;
RegisterID scratchReg = regT3;
emitLoadPayload(callee, calleeReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ addSlowCase(branch8(NotEqual, Address(calleeReg, JSCell::typeInfoTypeOffset()), TrustedImm32(JSFunctionType)));
+ loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
+ addSlowCase(branchTestPtr(Zero, rareDataReg));
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
addSlowCase(branchTestPtr(Zero, allocatorReg));
- emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
+ loadPtr(cachedFunction, cachedFunctionReg);
+ Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
+ addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
+ hasSeenMultipleCallees.link(this);
+
+ JumpList slowCases;
+ emitAllocateJSObject(resultReg, nullptr, allocatorReg, structureReg, TrustedImmPtr(0), scratchReg, slowCases);
+ addSlowCase(slowCases);
emitStoreCell(currentInstruction[1].u.operand, resultReg);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkSlowCase(iter); // Callee::m_type != JSFunctionType.
+ linkSlowCase(iter); // doesn't have rare data
linkSlowCase(iter); // doesn't have an allocation profile
- linkSlowCase(iter); // allocation failed
+ linkSlowCase(iter); // allocation failed (no allocator)
+ linkSlowCase(iter); // allocation failed (allocator empty)
+ linkSlowCase(iter); // cached function didn't match
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
slowPathCall.call();
@@ -1097,8 +1003,8 @@ void JIT::emit_op_to_this(Instruction* currentInstruction)
emitLoad(thisRegister, regT3, regT2);
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
- loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
- addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ addSlowCase(branch8(NotEqual, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0);
loadPtr(cachedStructure, regT2);
addSlowCase(branchPtr(NotEqual, regT0, regT2));
}
@@ -1112,81 +1018,323 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn
slowPathCall.call();
}
-void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
+void JIT::emit_op_check_tdz(Instruction* currentInstruction)
{
- load32(m_vm->enabledProfilerAddress(), regT0);
- Jump profilerDone = branchTestPtr(Zero, regT0);
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- callOperation(operationProfileWillCall, regT1, regT0);
- profilerDone.link(this);
+ emitLoadTag(currentInstruction[1].u.operand, regT0);
+ addSlowCase(branch32(Equal, regT0, TrustedImm32(JSValue::EmptyValueTag)));
}
-void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
+void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- load32(m_vm->enabledProfilerAddress(), regT0);
- Jump profilerDone = branchTestPtr(Zero, regT0);
- emitLoad(currentInstruction[1].u.operand, regT1, regT0);
- callOperation(operationProfileDidCall, regT1, regT0);
- profilerDone.link(this);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error);
+ slowPathCall.call();
}
-void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
+void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
- load32(payloadFor(JSStack::ArgumentCount), regT0);
- sub32(TrustedImm32(1), regT0);
- emitStoreInt32(dst, regT0);
+ int base = currentInstruction[2].u.operand;
+ int enumerator = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(base);
+
+ emitLoadPayload(enumerator, regT1);
+
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ move(TrustedImm32(1), regT0);
+ emitStoreBool(dst, regT0);
}
-void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
+ move(TrustedImm32(1), regT0);
+ Jump done = jump();
+
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+
+ patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+
+ patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
+
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
+}
+
+void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
{
- linkSlowCase(iter);
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
- callOperation(operationGetArgumentsLength, dst, base);
+ int property = currentInstruction[3].u.operand;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
+
+ emitLoadPayload(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(base);
+
+ emitLoadPayload(property, regT1);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+ and32(TrustedImm32(IndexingShapeMask), regT2);
+
+ JITArrayMode mode = chooseArrayMode(profile);
+ PatchableJump badType;
+
+ // FIXME: Add support for other types like TypedArrays and Arguments.
+ // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
+ JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
+ move(TrustedImm32(1), regT0);
+
+ addSlowCase(badType);
+ addSlowCase(slowCases);
+
+ Label done = label();
+
+ emitStoreBool(dst, regT0);
+
+ Label nextHotPath = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
}
-void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- int argumentsRegister = currentInstruction[2].u.operand;
+ int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
- emitLoad(property, regT1, regT2);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
- add32(TrustedImm32(1), regT2);
- // regT2 now contains the integer index of the argument we want, including this
- load32(payloadFor(JSStack::ArgumentCount), regT3);
- addSlowCase(branch32(AboveOrEqual, regT2, regT3));
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
- loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT1);
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // base array check
+ linkSlowCase(iter); // vector length check
+ linkSlowCase(iter); // empty value
+
+ Label slowPath = label();
+
+ emitLoad(base, regT1, regT0);
+ emitLoad(property, regT3, regT2);
+ Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT1, regT0, regT3, regT2, byValInfo);
+
+ m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
+ m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
+ m_byValInstructionIndex++;
+}
+
+void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int index = currentInstruction[4].u.operand;
+ int enumerator = currentInstruction[5].u.operand;
+
+ // Check that base is a cell
+ emitLoadPayload(base, regT0);
+ emitJumpSlowCaseIfNotJSCell(base);
+
+ // Check the structure
+ emitLoadPayload(enumerator, regT1);
+ load32(Address(regT0, JSCell::structureIDOffset()), regT2);
+ addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
+
+ // Compute the offset
+ emitLoadPayload(index, regT2);
+ // If index is less than the enumerator's cached inline storage, then it's an inline access
+ Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+
+ Jump done = jump();
+
+ // Otherwise it's out of line
+ outOfLineAccess.link(this);
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2);
+ neg32(regT2);
+ int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
+ load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+
+ done.link(this);
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
}
-void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int base = currentInstruction[2].u.operand;
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int arguments = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
- linkSlowCase(iter);
- Jump skipArgumentsCreation = jump();
+ emitLoadPayload(index, regT0);
+ emitLoadPayload(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
- linkSlowCase(iter);
- linkSlowCase(iter);
+ move(TrustedImm32(JSValue::NullTag), regT2);
+ move(TrustedImm32(0), regT0);
+
+ Jump done = jump();
+ inBounds.link(this);
+
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0);
+ move(TrustedImm32(JSValue::CellTag), regT2);
+
+ done.link(this);
+ emitStore(dst, regT2, regT0);
+}
+
+void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int enumerator = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitLoadPayload(index, regT0);
+ emitLoadPayload(enumerator, regT1);
+ Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
+
+ move(TrustedImm32(JSValue::NullTag), regT2);
+ move(TrustedImm32(0), regT0);
+
+ Jump done = jump();
+ inBounds.link(this);
- callOperation(operationCreateArguments);
- emitStoreCell(arguments, returnValueGPR);
- emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), returnValueGPR);
+ loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
+ loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0);
+ move(TrustedImm32(JSValue::CellTag), regT2);
- skipArgumentsCreation.link(this);
- emitLoad(arguments, regT1, regT0);
- emitLoad(property, regT3, regT2);
- callOperation(WithProfile, operationGetByValGeneric, dst, regT1, regT0, regT3, regT2);
+ done.link(this);
+ emitStore(dst, regT2, regT0);
+}
+
+void JIT::emit_op_profile_type(Instruction* currentInstruction)
+{
+ TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
+ int valueToProfile = currentInstruction[1].u.operand;
+
+ // Load payload in T0. Load tag in T3.
+ emitLoadPayload(valueToProfile, regT0);
+ emitLoadTag(valueToProfile, regT3);
+
+ JumpList jumpToEnd;
+
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::EmptyValueTag)));
+
+ // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
+ // These typechecks are inlined to match those of the 32-bit JSValue type checks.
+ if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::UndefinedTag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNull)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::NullTag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeBoolean)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::BooleanTag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeAnyInt)
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag)));
+ else if (cachedTypeLocation->m_lastSeenType == TypeNumber) {
+ jumpToEnd.append(branch32(Below, regT3, TrustedImm32(JSValue::LowestTag)));
+ jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag)));
+ } else if (cachedTypeLocation->m_lastSeenType == TypeString) {
+ Jump isNotCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag));
+ jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+ isNotCell.link(this);
+ }
+
+ // Load the type profiling log into T2.
+ TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
+ move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
+
+ // Load the next log entry into T1.
+ loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
+
+ // Store the JSValue onto the log entry.
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
+
+ // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry.
+ Jump notCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag));
+ load32(Address(regT0, JSCell::structureIDOffset()), regT0);
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ Jump skipNotCell = jump();
+ notCell.link(this);
+ store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
+ skipNotCell.link(this);
+
+ // Store the typeLocation on the log entry.
+ move(TrustedImmPtr(cachedTypeLocation), regT0);
+ store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
+
+ // Increment the current log entry.
+ addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
+ store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
+ jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())));
+ // Clear the log if we're at the end of the log.
+ callOperation(operationProcessTypeProfilerLog);
+
+ jumpToEnd.link(this);
+}
+
+void JIT::emit_op_log_shadow_chicken_prologue(Instruction* currentInstruction)
+{
+ updateTopCallFrame();
+ static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
+ GPRReg shadowPacketReg = regT0;
+ GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
+ GPRReg scratch2Reg = regT2;
+ ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+
+ scratch1Reg = regT4;
+ emitLoadPayload(currentInstruction[1].u.operand, regT3);
+ logShadowChickenProloguePacket(shadowPacketReg, scratch1Reg, regT3);
+}
+
+void JIT::emit_op_log_shadow_chicken_tail(Instruction* currentInstruction)
+{
+ updateTopCallFrame();
+ static_assert(nonArgGPR0 != regT0 && nonArgGPR0 != regT2, "we will have problems if this is true.");
+ GPRReg shadowPacketReg = regT0;
+ GPRReg scratch1Reg = nonArgGPR0; // This must be a non-argument register.
+ GPRReg scratch2Reg = regT2;
+ ensureShadowChickenPacket(shadowPacketReg, scratch1Reg, scratch2Reg);
+
+ emitLoadPayload(currentInstruction[1].u.operand, regT2);
+ emitLoadTag(currentInstruction[1].u.operand, regT1);
+ JSValueRegs thisRegs(regT1, regT2);
+ emitLoadPayload(currentInstruction[2].u.operand, regT3);
+ logShadowChickenTailPacket(shadowPacketReg, thisRegs, regT3, m_codeBlock, CallSiteIndex(currentInstruction));
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITOperationWrappers.h b/Source/JavaScriptCore/jit/JITOperationWrappers.h
deleted file mode 100644
index f9624fdbc..000000000
--- a/Source/JavaScriptCore/jit/JITOperationWrappers.h
+++ /dev/null
@@ -1,413 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITOperationWrappers_h
-#define JITOperationWrappers_h
-
-#include "JITOperations.h"
-#include <wtf/Compiler.h>
-#include <wtf/InlineASM.h>
-
-#if COMPILER(MSVC)
-#include <intrin.h>
-#endif
-
-namespace JSC {
-
-#if CPU(MIPS)
-#if WTF_MIPS_PIC
-#define LOAD_FUNCTION_TO_T9(function) \
- ".set noreorder" "\n" \
- ".cpload $25" "\n" \
- ".set reorder" "\n" \
- "la $t9, " LOCAL_REFERENCE(function) "\n"
-#else
-#define LOAD_FUNCTION_TO_T9(function) "" "\n"
-#endif
-#endif
-
-#if COMPILER(GCC) && CPU(X86_64)
-
-#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
- asm( \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "mov (%rsp), %" STRINGIZE(register) "\n" \
- "jmp " LOCAL_REFERENCE(function##WithReturnAddress) "\n" \
- );
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx)
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, r8)
-
-#elif COMPILER(GCC) && CPU(X86)
-
-#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "mov (%esp), %eax\n" \
- "mov %eax, " STRINGIZE(offset) "(%esp)\n" \
- "jmp " LOCAL_REFERENCE(function##WithReturnAddress) "\n" \
- );
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 16)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 20)
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 28)
-
-#elif CPU(ARM64)
-
-#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "mov " STRINGIZE(register) ", lr" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x1)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x1)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x3)
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x3)
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x4)
-
-#elif COMPILER(GCC) && CPU(ARM_THUMB2)
-
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- ".thumb" "\n" \
- ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "mov a2, lr" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- ".thumb" "\n" \
- ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "mov a4, lr" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
-// As a result, return address will be at a 4-byte further location in the following cases.
-#if COMPILER_SUPPORTS(EABI) && CPU(ARM)
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #4]"
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #12]"
-#else
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #0]"
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #8]"
-#endif
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- ".thumb" "\n" \
- ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- INSTRUCTION_STORE_RETURN_ADDRESS_EJI "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
- asm ( \
- ".text" "\n" \
- ".align 2" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- ".thumb" "\n" \
- ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
-
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- asm ( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- INLINE_ARM_FUNCTION(function) \
- SYMBOL_STRING(function) ":" "\n" \
- "mov a2, lr" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
- asm ( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- INLINE_ARM_FUNCTION(function) \
- SYMBOL_STRING(function) ":" "\n" \
- "mov a4, lr" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
-// As a result, return address will be at a 4-byte further location in the following cases.
-#if COMPILER_SUPPORTS(EABI) && CPU(ARM)
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #4]"
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #12]"
-#else
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #0]"
-#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #8]"
-#endif
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
- asm ( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- INLINE_ARM_FUNCTION(function) \
- SYMBOL_STRING(function) ":" "\n" \
- INSTRUCTION_STORE_RETURN_ADDRESS_EJI "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
- asm ( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- INLINE_ARM_FUNCTION(function) \
- SYMBOL_STRING(function) ":" "\n" \
- INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#elif COMPILER(GCC) && CPU(MIPS)
-
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
- "move $a1, $ra" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
- "move $a3, $ra" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
- "sw $ra, 20($sp)" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
- "sw $ra, 28($sp)" "\n" \
- "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- );
-
-#elif COMPILER(GCC) && CPU(SH4)
-
-#define SH4_SCRATCH_REGISTER "r11"
-
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "sts pr, r5" "\n" \
- "bra " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
- "nop" "\n" \
- );
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "sts pr, r7" "\n" \
- "mov.l 2f, " SH4_SCRATCH_REGISTER "\n" \
- "braf " SH4_SCRATCH_REGISTER "\n" \
- "nop" "\n" \
- "1: .balign 4" "\n" \
- "2: .long " LOCAL_REFERENCE(function) "WithReturnAddress-1b" "\n" \
- );
-
-#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset, scratch) \
- asm( \
- ".text" "\n" \
- ".globl " SYMBOL_STRING(function) "\n" \
- HIDE_SYMBOL(function) "\n" \
- SYMBOL_STRING(function) ":" "\n" \
- "sts pr, " scratch "\n" \
- "mov.l " scratch ", @(" STRINGIZE(offset) ", r15)" "\n" \
- "mov.l 2f, " scratch "\n" \
- "braf " scratch "\n" \
- "nop" "\n" \
- "1: .balign 4" "\n" \
- "2: .long " LOCAL_REFERENCE(function) "WithReturnAddress-1b" "\n" \
- );
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 0, SH4_SCRATCH_REGISTER)
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8, SH4_SCRATCH_REGISTER)
-
-#elif COMPILER(MSVC) && CPU(X86)
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
-__declspec(naked) EncodedJSValue JIT_OPERATION function(ExecState*, EncodedJSValue, StringImpl*) \
-{ \
- __asm { \
- __asm mov eax, [esp] \
- __asm mov [esp + 20], eax \
- __asm jmp function##WithReturnAddress \
- } \
-}
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
-__declspec(naked) EncodedJSValue JIT_OPERATION function(ExecState*, JSCell*, StringImpl*) \
-{ \
- __asm { \
- __asm mov eax, [esp] \
- __asm mov [esp + 16], eax \
- __asm jmp function##WithReturnAddress \
- } \
-}
-
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) \
-__declspec(naked) void JIT_OPERATION function(ExecState*, EncodedJSValue, JSCell*, StringImpl*) \
-{ \
- __asm { \
- __asm mov eax, [esp] \
- __asm mov [esp + 24], eax \
- __asm jmp function##WithReturnAddress \
- } \
-}
-
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
-__declspec(naked) void JIT_OPERATION function(ExecState*, EncodedJSValue, EncodedJSValue, StringImpl*) \
-{ \
- __asm { \
- __asm mov eax, [esp] \
- __asm mov [esp + 28], eax \
- __asm jmp function##WithReturnAddress \
- } \
-}
-
-#elif COMPILER(MSVC)
-
-#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- void* JIT_OPERATION function(ExecState* exec) { return function##WithReturnAddress(exec, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
- EncodedJSValue JIT_OPERATION function(ExecState* exec) { return function##WithReturnAddress(exec, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
- EncodedJSValue JIT_OPERATION function(ExecState* exec, JSCell* cell, StringImpl* string) { return function##WithReturnAddress(exec, cell, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
-
-#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
- EncodedJSValue JIT_OPERATION function(ExecState* exec, EncodedJSValue value, StringImpl* string) { return function##WithReturnAddress(exec, value, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
-
-#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
- void JIT_OPERATION function(ExecState* exec, EncodedJSValue value, EncodedJSValue baseValue, StringImpl* string) { return function##WithReturnAddress(exec, value, baseValue, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
-
-#endif
-
-#define P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
-void* JIT_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
-_P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
-
-#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
-EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
-_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
-
-#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
-EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, JSCell*, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
-_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function)
-
-#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
-EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
-_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function)
-
-#define V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
-void JIT_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, EncodedJSValue, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
-_V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function)
-
-} // namespace JSC
-
-#endif // JITOperationWrappers_h
-
diff --git a/Source/JavaScriptCore/jit/JITOperations.cpp b/Source/JavaScriptCore/jit/JITOperations.cpp
index 578d15dac..a1d4e7351 100644
--- a/Source/JavaScriptCore/jit/JITOperations.cpp
+++ b/Source/JavaScriptCore/jit/JITOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,32 +24,53 @@
*/
#include "config.h"
-#if ENABLE(JIT)
#include "JITOperations.h"
-#include "Arguments.h"
+#if ENABLE(JIT)
+
+#include "ArithProfile.h"
#include "ArrayConstructor.h"
-#include "CallFrameInlines.h"
#include "CommonSlowPaths.h"
#include "DFGCompilationMode.h"
#include "DFGDriver.h"
#include "DFGOSREntry.h"
+#include "DFGThunks.h"
#include "DFGWorklist.h"
+#include "Debugger.h"
+#include "DirectArguments.h"
#include "Error.h"
+#include "ErrorHandlingScope.h"
+#include "EvalCodeBlock.h"
+#include "ExceptionFuzz.h"
+#include "FunctionCodeBlock.h"
#include "GetterSetter.h"
#include "HostCallReturnValue.h"
+#include "ICStats.h"
+#include "Interpreter.h"
#include "JIT.h"
-#include "JITOperationWrappers.h"
+#include "JITExceptions.h"
#include "JITToDFGDeferredCompilationCallback.h"
+#include "JSAsyncFunction.h"
+#include "JSCInlines.h"
+#include "JSGeneratorFunction.h"
#include "JSGlobalObjectFunctions.h"
-#include "JSNameScope.h"
-#include "JSPropertyNameIterator.h"
-#include "JSStackInlines.h"
-#include "JSWithScope.h"
+#include "JSLexicalEnvironment.h"
+#include "JSPropertyNameEnumerator.h"
+#include "ModuleProgramCodeBlock.h"
#include "ObjectConstructor.h"
-#include "Operations.h"
+#include "PolymorphicAccess.h"
+#include "ProgramCodeBlock.h"
+#include "PropertyName.h"
+#include "RegExpObject.h"
#include "Repatch.h"
-#include "RepatchBuffer.h"
+#include "ScopedArguments.h"
+#include "ShadowChicken.h"
+#include "StructureStubInfo.h"
+#include "SuperSampler.h"
+#include "TestRunnerUtils.h"
+#include "TypeProfilerLog.h"
+#include "VMInlines.h"
+#include <wtf/InlineASM.h>
namespace JSC {
@@ -71,33 +92,63 @@ void * _ReturnAddress(void);
#endif
-void JIT_OPERATION operationStackCheck(ExecState* exec, CodeBlock* codeBlock)
+void JIT_OPERATION operationThrowStackOverflowError(ExecState* exec, CodeBlock* codeBlock)
{
// We pass in our own code block, because the callframe hasn't been populated.
VM* vm = codeBlock->vm();
- CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel();
- if (!callerFrame)
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ if (!callerFrame) {
callerFrame = exec;
+ vmEntryFrame = vm->topVMEntryFrame;
+ }
+
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ throwStackOverflowError(callerFrame, scope);
+}
- NativeCallFrameTracer tracer(vm, callerFrame);
+#if ENABLE(WEBASSEMBLY)
+void JIT_OPERATION operationThrowDivideError(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(*vm);
- JSStack& stack = vm->interpreter->stack();
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
- if (UNLIKELY(!stack.grow(&exec->registers()[virtualRegisterForLocal(codeBlock->frameRegisterCount()).offset()])))
- vm->throwException(callerFrame, createStackOverflowError(callerFrame));
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ ErrorHandlingScope errorScope(*vm);
+ throwException(callerFrame, scope, createError(callerFrame, ASCIILiteral("Division by zero or division overflow.")));
}
-int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec)
+void JIT_OPERATION operationThrowOutOfBoundsAccessError(ExecState* exec)
{
VM* vm = &exec->vm();
- CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel();
- NativeCallFrameTracer tracer(vm, callerFrame);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
- JSStack& stack = vm->interpreter->stack();
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
- int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForCall);
- if (missingArgCount < 0)
- vm->throwException(callerFrame, createStackOverflowError(callerFrame));
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ ErrorHandlingScope errorScope(*vm);
+ throwException(callerFrame, scope, createError(callerFrame, ASCIILiteral("Out-of-bounds access.")));
+}
+#endif
+
+int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec)
+{
+ VM* vm = &exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, *vm, CodeForCall);
+ if (missingArgCount < 0) {
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ throwStackOverflowError(callerFrame, scope);
+ }
return missingArgCount;
}
@@ -105,489 +156,616 @@ int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec)
int32_t JIT_OPERATION operationConstructArityCheck(ExecState* exec)
{
VM* vm = &exec->vm();
- CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel();
- NativeCallFrameTracer tracer(vm, callerFrame);
-
- JSStack& stack = vm->interpreter->stack();
-
- int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForConstruct);
- if (missingArgCount < 0)
- vm->throwException(callerFrame, createStackOverflowError(callerFrame));
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, *vm, CodeForConstruct);
+ if (missingArgCount < 0) {
+ VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
+ CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
+ throwStackOverflowError(callerFrame, scope);
+ }
return missingArgCount;
}
-EncodedJSValue JIT_OPERATION operationGetById(ExecState* exec, StructureStubInfo*, EncodedJSValue base, StringImpl* uid)
+EncodedJSValue JIT_OPERATION operationTryGetById(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
-
+ Identifier ident = Identifier::fromUid(vm, uid);
+ stubInfo->tookSlowPath = true;
+
JSValue baseValue = JSValue::decode(base);
- PropertySlot slot(baseValue);
- Identifier ident(vm, uid);
- return JSValue::encode(baseValue.get(exec, ident, slot));
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::VMInquiry);
+ baseValue.getPropertySlot(exec, ident, slot);
+
+ return JSValue::encode(slot.getPureResult());
}
-EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, StringImpl* uid)
+
+EncodedJSValue JIT_OPERATION operationTryGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
+ Identifier ident = Identifier::fromUid(vm, uid);
- Identifier ident(vm, uid);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::VMInquiry);
+ baseValue.getPropertySlot(exec, ident, slot);
+
+ return JSValue::encode(slot.getPureResult());
+}
+
+EncodedJSValue JIT_OPERATION operationTryGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+ Identifier ident = Identifier::fromUid(vm, uid);
JSValue baseValue = JSValue::decode(base);
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(exec, ident, slot);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::VMInquiry);
- if (accessType == static_cast<AccessType>(stubInfo->accessType))
- buildGetByIDList(exec, baseValue, ident, slot, *stubInfo);
+ baseValue.getPropertySlot(exec, ident, slot);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
- return JSValue::encode(result);
+ if (stubInfo->considerCaching(exec->codeBlock(), baseValue.structureOrNull()) && !slot.isTaintedByOpaqueObject() && (slot.isCacheableValue() || slot.isCacheableGetter() || slot.isUnset()))
+ repatchGetByID(exec, baseValue, ident, slot, *stubInfo, GetByIDKind::Try);
+
+ return JSValue::encode(slot.getPureResult());
}
-EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, StringImpl* uid)
+EncodedJSValue JIT_OPERATION operationGetById(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident = uid->isEmptyUnique() ? Identifier::from(PrivateName(uid)) : Identifier(vm, uid);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
+
+ stubInfo->tookSlowPath = true;
+
JSValue baseValue = JSValue::decode(base);
- PropertySlot slot(baseValue);
- JSValue result = baseValue.get(exec, ident, slot);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
+ Identifier ident = Identifier::fromUid(vm, uid);
- if (accessType == static_cast<AccessType>(stubInfo->accessType)) {
- if (stubInfo->seen)
- repatchGetByID(exec, baseValue, ident, slot, *stubInfo);
- else
- stubInfo->seen = true;
- }
+ LOG_IC((ICEvent::OperationGetById, baseValue.classInfoOrNull(*vm), ident));
+ return JSValue::encode(baseValue.get(exec, ident, slot));
+}
- return JSValue::encode(result);
+EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
+{
+ SuperSamplerScope superSamplerScope(false);
+
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue baseValue = JSValue::decode(base);
+ PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
+ Identifier ident = Identifier::fromUid(vm, uid);
+ LOG_IC((ICEvent::OperationGetByIdGeneric, baseValue.classInfoOrNull(*vm), ident));
+ return JSValue::encode(baseValue.get(exec, ident, slot));
}
-EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, StringImpl* key)
+EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
+ Identifier ident = Identifier::fromUid(vm, uid);
+
+ JSValue baseValue = JSValue::decode(base);
+ LOG_IC((ICEvent::OperationGetByIdOptimize, baseValue.classInfoOrNull(*vm), ident));
+
+ return JSValue::encode(baseValue.getPropertySlot(exec, ident, [&] (bool found, PropertySlot& slot) -> JSValue {
+ if (stubInfo->considerCaching(exec->codeBlock(), baseValue.structureOrNull()))
+ repatchGetByID(exec, baseValue, ident, slot, *stubInfo, GetByIDKind::Normal);
+ return found ? slot.getValue(exec, ident) : jsUndefined();
+ }));
+}
+
+EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key)
+{
+ SuperSamplerScope superSamplerScope(false);
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
if (!base->isObject()) {
- vm->throwException(exec, createInvalidParameterError(exec, "in", base));
+ throwException(exec, scope, createInvalidInParameterError(exec, base));
return JSValue::encode(jsUndefined());
}
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
- Identifier ident(vm, key);
- PropertySlot slot(base);
+ Identifier ident = Identifier::fromUid(vm, key);
+ LOG_IC((ICEvent::OperationInOptimize, base->classInfo(*vm), ident));
+ PropertySlot slot(base, PropertySlot::InternalMethodType::HasProperty);
bool result = asObject(base)->getPropertySlot(exec, ident, slot);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
RELEASE_ASSERT(accessType == stubInfo->accessType);
- if (stubInfo->seen)
+ if (stubInfo->considerCaching(exec->codeBlock(), asObject(base)->structure()))
repatchIn(exec, base, ident, result, slot, *stubInfo);
- else
- stubInfo->seen = true;
return JSValue::encode(jsBoolean(result));
}
-EncodedJSValue JIT_OPERATION operationIn(ExecState* exec, StructureStubInfo*, JSCell* base, StringImpl* key)
+EncodedJSValue JIT_OPERATION operationIn(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ stubInfo->tookSlowPath = true;
if (!base->isObject()) {
- vm->throwException(exec, createInvalidParameterError(exec, "in", base));
+ throwException(exec, scope, createInvalidInParameterError(exec, base));
return JSValue::encode(jsUndefined());
}
- Identifier ident(vm, key);
+ Identifier ident = Identifier::fromUid(vm, key);
+ LOG_IC((ICEvent::OperationIn, base->classInfo(*vm), ident));
return JSValue::encode(jsBoolean(asObject(base)->hasProperty(exec, ident)));
}
EncodedJSValue JIT_OPERATION operationGenericIn(ExecState* exec, JSCell* base, EncodedJSValue key)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- return JSValue::encode(jsBoolean(CommonSlowPaths::opIn(exec, JSValue::decode(key), base)));
+ return JSValue::encode(jsBoolean(CommonSlowPaths::opIn(exec, base, JSValue::decode(key))));
}
-EncodedJSValue JIT_OPERATION operationCallCustomGetter(ExecState* exec, JSCell* base, PropertySlot::GetValueFunc function, StringImpl* uid)
+void JIT_OPERATION operationPutByIdStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident(vm, uid);
+ stubInfo->tookSlowPath = true;
- return function(exec, JSValue::encode(base), JSValue::encode(base), ident);
-}
-
-EncodedJSValue JIT_OPERATION operationCallGetter(ExecState* exec, JSCell* base, JSCell* getterSetter)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ Identifier ident = Identifier::fromUid(vm, uid);
+ LOG_IC((ICEvent::OperationPutByIdStrict, baseValue.classInfoOrNull(*vm), ident));
- return JSValue::encode(callGetter(exec, base, getterSetter));
+ PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext());
+ baseValue.putInline(exec, ident, JSValue::decode(encodedValue), slot);
}
-void JIT_OPERATION operationPutByIdStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+void JIT_OPERATION operationPutByIdNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
+ SuperSamplerScope superSamplerScope(false);
- Identifier ident(vm, uid);
- PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext());
- JSValue::decode(encodedBase).put(exec, ident, JSValue::decode(encodedValue), slot);
-}
-
-void JIT_OPERATION operationPutByIdNonStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
-{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident(vm, uid);
- PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext());
- JSValue::decode(encodedBase).put(exec, ident, JSValue::decode(encodedValue), slot);
-}
-
-void JIT_OPERATION operationPutByIdDirectStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
+ stubInfo->tookSlowPath = true;
- Identifier ident(vm, uid);
- PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext());
- asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ Identifier ident = Identifier::fromUid(vm, uid);
+ LOG_IC((ICEvent::OperationPutByIdNonStrict, baseValue.classInfoOrNull(*vm), ident));
+ PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext());
+ baseValue.putInline(exec, ident, JSValue::decode(encodedValue), slot);
}
-void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
+ SuperSamplerScope superSamplerScope(false);
- Identifier ident(vm, uid);
- PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext());
- asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
-}
-
-void JIT_OPERATION operationPutByIdStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
-{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident(vm, uid);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
- JSValue value = JSValue::decode(encodedValue);
+ stubInfo->tookSlowPath = true;
+
JSValue baseValue = JSValue::decode(encodedBase);
+ Identifier ident = Identifier::fromUid(vm, uid);
+ LOG_IC((ICEvent::OperationPutByIdDirectStrict, baseValue.classInfoOrNull(*vm), ident));
PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext());
-
- baseValue.put(exec, ident, value, slot);
-
- if (accessType != static_cast<AccessType>(stubInfo->accessType))
- return;
-
- if (stubInfo->seen)
- repatchPutByID(exec, baseValue, ident, slot, *stubInfo, NotDirect);
- else
- stubInfo->seen = true;
+ asObject(baseValue)->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
}
-void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident(vm, uid);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
- JSValue value = JSValue::decode(encodedValue);
+ stubInfo->tookSlowPath = true;
+
JSValue baseValue = JSValue::decode(encodedBase);
+ Identifier ident = Identifier::fromUid(vm, uid);
+ LOG_IC((ICEvent::OperationPutByIdDirectNonStrict, baseValue.classInfoOrNull(*vm), ident));
PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext());
-
- baseValue.put(exec, ident, value, slot);
-
- if (accessType != static_cast<AccessType>(stubInfo->accessType))
- return;
-
- if (stubInfo->seen)
- repatchPutByID(exec, baseValue, ident, slot, *stubInfo, NotDirect);
- else
- stubInfo->seen = true;
+ asObject(baseValue)->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
}
-void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+void JIT_OPERATION operationPutByIdStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
-
- Identifier ident(vm, uid);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ Identifier ident = Identifier::fromUid(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue value = JSValue::decode(encodedValue);
- JSObject* baseObject = asObject(JSValue::decode(encodedBase));
- PutPropertySlot slot(baseObject, true, exec->codeBlock()->putByIdContext());
-
- baseObject->putDirect(exec->vm(), ident, value, slot);
-
- if (accessType != static_cast<AccessType>(stubInfo->accessType))
- return;
-
- if (stubInfo->seen)
- repatchPutByID(exec, baseObject, ident, slot, *stubInfo, Direct);
- else
- stubInfo->seen = true;
-}
+ JSValue baseValue = JSValue::decode(encodedBase);
+ LOG_IC((ICEvent::OperationPutByIdStrictOptimize, baseValue.classInfoOrNull(*vm), ident));
+ CodeBlock* codeBlock = exec->codeBlock();
+ PutPropertySlot slot(baseValue, true, codeBlock->putByIdContext());
-void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-
- Identifier ident(vm, uid);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+ Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr;
+ baseValue.putInline(exec, ident, value, slot);
+ RETURN_IF_EXCEPTION(scope, void());
- JSValue value = JSValue::decode(encodedValue);
- JSObject* baseObject = asObject(JSValue::decode(encodedBase));
- PutPropertySlot slot(baseObject, false, exec->codeBlock()->putByIdContext());
-
- baseObject->putDirect(exec->vm(), ident, value, slot);
-
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- if (stubInfo->seen)
- repatchPutByID(exec, baseObject, ident, slot, *stubInfo, Direct);
- else
- stubInfo->seen = true;
+ if (stubInfo->considerCaching(codeBlock, structure))
+ repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect);
}
-void JIT_OPERATION operationPutByIdStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
-
- Identifier ident(vm, uid);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+
+ Identifier ident = Identifier::fromUid(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue value = JSValue::decode(encodedValue);
JSValue baseValue = JSValue::decode(encodedBase);
- PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext());
-
- baseValue.put(exec, ident, value, slot);
-
- if (accessType != static_cast<AccessType>(stubInfo->accessType))
- return;
-
- buildPutByIdList(exec, baseValue, ident, slot, *stubInfo, NotDirect);
-}
+ LOG_IC((ICEvent::OperationPutByIdNonStrictOptimize, baseValue.classInfoOrNull(*vm), ident));
+ CodeBlock* codeBlock = exec->codeBlock();
+ PutPropertySlot slot(baseValue, false, codeBlock->putByIdContext());
-void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-
- Identifier ident(vm, uid);
- AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+ Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr;
+ baseValue.putInline(exec, ident, value, slot);
+ RETURN_IF_EXCEPTION(scope, void());
- JSValue value = JSValue::decode(encodedValue);
- JSValue baseValue = JSValue::decode(encodedBase);
- PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext());
-
- baseValue.put(exec, ident, value, slot);
-
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- buildPutByIdList(exec, baseValue, ident, slot, *stubInfo, NotDirect);
+ if (stubInfo->considerCaching(codeBlock, structure))
+ repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect);
}
-void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident(vm, uid);
+ Identifier ident = Identifier::fromUid(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
-
+
JSValue value = JSValue::decode(encodedValue);
JSObject* baseObject = asObject(JSValue::decode(encodedBase));
- PutPropertySlot slot(baseObject, true, exec->codeBlock()->putByIdContext());
+ LOG_IC((ICEvent::OperationPutByIdDirectStrictOptimize, baseObject->classInfo(*vm), ident));
+ CodeBlock* codeBlock = exec->codeBlock();
+ PutPropertySlot slot(baseObject, true, codeBlock->putByIdContext());
+ Structure* structure = baseObject->structure(*vm);
baseObject->putDirect(exec->vm(), ident, value, slot);
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- buildPutByIdList(exec, baseObject, ident, slot, *stubInfo, Direct);
+ if (stubInfo->considerCaching(codeBlock, structure))
+ repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct);
}
-void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
+ SuperSamplerScope superSamplerScope(false);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident(vm, uid);
+ Identifier ident = Identifier::fromUid(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue value = JSValue::decode(encodedValue);
JSObject* baseObject = asObject(JSValue::decode(encodedBase));
- PutPropertySlot slot(baseObject, false, exec->codeBlock()->putByIdContext());
+ LOG_IC((ICEvent::OperationPutByIdDirectNonStrictOptimize, baseObject->classInfo(*vm), ident));
+ CodeBlock* codeBlock = exec->codeBlock();
+ PutPropertySlot slot(baseObject, false, codeBlock->putByIdContext());
- baseObject ->putDirect(exec->vm(), ident, value, slot);
+ Structure* structure = baseObject->structure(*vm);
+ baseObject->putDirect(exec->vm(), ident, value, slot);
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- buildPutByIdList(exec, baseObject, ident, slot, *stubInfo, Direct);
+ if (stubInfo->considerCaching(codeBlock, structure))
+ repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct);
}
-void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObject* base, Structure* structure, PropertyOffset offset, EncodedJSValue value)
+ALWAYS_INLINE static bool isStringOrSymbol(JSValue value)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- ASSERT(structure->outOfLineCapacity() > base->structure()->outOfLineCapacity());
- ASSERT(!vm.heap.storageAllocator().fastPathShouldSucceed(structure->outOfLineCapacity() * sizeof(JSValue)));
- base->setStructureAndReallocateStorageIfNecessary(vm, structure);
- base->putDirect(vm, offset, JSValue::decode(value));
+ return value.isString() || value.isSymbol();
}
-static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value)
+static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value, ByValInfo* byValInfo)
{
+ VM& vm = callFrame->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
if (LIKELY(subscript.isUInt32())) {
+ byValInfo->tookSlowPath = true;
uint32_t i = subscript.asUInt32();
if (baseValue.isObject()) {
JSObject* object = asObject(baseValue);
if (object->canSetIndexQuickly(i))
object->setIndexQuickly(callFrame->vm(), i, value);
- else
- object->methodTable()->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
+ else {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ object->methodTable(vm)->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
+ }
} else
baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode());
- } else if (isName(subscript)) {
- PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode());
- baseValue.put(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
- } else {
- Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- if (!callFrame->vm().exception()) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode());
- baseValue.put(callFrame, property, value, slot);
- }
+ return;
}
+
+ auto property = subscript.toPropertyKey(callFrame);
+ // Don't put to an object if toString threw an exception.
+ RETURN_IF_EXCEPTION(scope, void());
+
+ if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
+ byValInfo->tookSlowPath = true;
+
+ scope.release();
+ PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode());
+ baseValue.putInline(callFrame, property, value, slot);
}
-static void directPutByVal(CallFrame* callFrame, JSObject* baseObject, JSValue subscript, JSValue value)
+static void directPutByVal(CallFrame* callFrame, JSObject* baseObject, JSValue subscript, JSValue value, ByValInfo* byValInfo)
{
+ VM& vm = callFrame->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ bool isStrictMode = callFrame->codeBlock()->isStrictMode();
if (LIKELY(subscript.isUInt32())) {
- uint32_t i = subscript.asUInt32();
- baseObject->putDirectIndex(callFrame, i, value);
- } else if (isName(subscript)) {
- PutPropertySlot slot(baseObject, callFrame->codeBlock()->isStrictMode());
- baseObject->putDirect(callFrame->vm(), jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
- } else {
- Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
- if (!callFrame->vm().exception()) { // Don't put to an object if toString threw an exception.
- PutPropertySlot slot(baseObject, callFrame->codeBlock()->isStrictMode());
- baseObject->putDirect(callFrame->vm(), property, value, slot);
+ // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices.
+ byValInfo->tookSlowPath = true;
+ uint32_t index = subscript.asUInt32();
+ ASSERT(isIndex(index));
+ if (baseObject->canSetIndexQuicklyForPutDirect(index)) {
+ baseObject->setIndexQuickly(callFrame->vm(), index, value);
+ return;
}
+
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ baseObject->putDirectIndex(callFrame, index, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ return;
+ }
+
+ if (subscript.isDouble()) {
+ double subscriptAsDouble = subscript.asDouble();
+ uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble);
+ if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) {
+ byValInfo->tookSlowPath = true;
+ baseObject->putDirectIndex(callFrame, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ return;
+ }
+ }
+
+ // Don't put to an object if toString threw an exception.
+ auto property = subscript.toPropertyKey(callFrame);
+ RETURN_IF_EXCEPTION(scope, void());
+
+ if (std::optional<uint32_t> index = parseIndex(property)) {
+ byValInfo->tookSlowPath = true;
+ baseObject->putDirectIndex(callFrame, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
+ return;
}
+
+ if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
+ byValInfo->tookSlowPath = true;
+
+ PutPropertySlot slot(baseObject, isStrictMode);
+ baseObject->putDirect(callFrame->vm(), property, value, slot);
}
-void JIT_OPERATION operationPutByVal(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
+
+enum class OptimizationResult {
+ NotOptimized,
+ SeenOnce,
+ Optimized,
+ GiveUp,
+};
+
+static OptimizationResult tryPutByValOptimize(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
+ // See if it's worth optimizing at all.
+ OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
- JSValue baseValue = JSValue::decode(encodedBaseValue);
- JSValue subscript = JSValue::decode(encodedSubscript);
- JSValue value = JSValue::decode(encodedValue);
+ VM& vm = exec->vm();
if (baseValue.isObject() && subscript.isInt32()) {
- // See if it's worth optimizing at all.
JSObject* object = asObject(baseValue);
- bool didOptimize = false;
- unsigned bytecodeOffset = exec->locationAsBytecodeOffset();
- ASSERT(bytecodeOffset);
- ByValInfo& byValInfo = exec->codeBlock()->getByValInfo(bytecodeOffset - 1);
- ASSERT(!byValInfo.stubRoutine);
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
- if (hasOptimizableIndexing(object->structure())) {
+ Structure* structure = object->structure(vm);
+ if (hasOptimizableIndexing(structure)) {
// Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
- if (arrayMode != byValInfo.arrayMode) {
- JIT::compilePutByVal(&vm, exec->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
- didOptimize = true;
+ JITArrayMode arrayMode = jitArrayModeForStructure(structure);
+ if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) {
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+ byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
+
+ JIT::compilePutByVal(&vm, codeBlock, byValInfo, returnAddress, arrayMode);
+ optimizationResult = OptimizationResult::Optimized;
}
}
- if (!didOptimize) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. Or, if we failed to patch and we have some object
- // that intercepts indexed get, then don't even wait until 10 times. For cases
- // where we see non-index-intercepting objects, this gives 10 iterations worth of
- // opportunity for us to observe that the get_by_val may be polymorphic.
- if (++byValInfo.slowPathCount >= 10
- || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
- // Don't ever try to optimize.
- RepatchBuffer repatchBuffer(exec->codeBlock());
- repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationPutByValGeneric));
+ // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
+ if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ if (baseValue.isObject() && isStringOrSymbol(subscript)) {
+ const Identifier propertyName = subscript.toPropertyKey(exec);
+ if (subscript.isSymbol() || !parseIndex(propertyName)) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+ if (byValInfo->seen) {
+ if (byValInfo->cachedId == propertyName) {
+ JIT::compilePutByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, NotDirect, propertyName);
+ optimizationResult = OptimizationResult::Optimized;
+ } else {
+ // Seem like a generic property access site.
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+ } else {
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+ byValInfo->seen = true;
+ byValInfo->cachedId = propertyName;
+ if (subscript.isSymbol())
+ byValInfo->cachedSymbol.set(vm, codeBlock, asSymbol(subscript));
+ optimizationResult = OptimizationResult::SeenOnce;
}
}
}
- putByVal(exec, baseValue, subscript, value);
+ if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. For cases where we see non-index-intercepting
+ // objects, this gives 10 iterations worth of opportunity for us to observe
+ // that the put_by_val may be polymorphic. We count up slowPathCount even if
+ // the result is GiveUp.
+ if (++byValInfo->slowPathCount >= 10)
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ return optimizationResult;
}
-void JIT_OPERATION operationDirectPutByVal(ExecState* callFrame, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
+void JIT_OPERATION operationPutByValOptimize(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
{
- VM& vm = callFrame->vm();
- NativeCallFrameTracer tracer(&vm, callFrame);
-
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
JSValue baseValue = JSValue::decode(encodedBaseValue);
JSValue subscript = JSValue::decode(encodedSubscript);
JSValue value = JSValue::decode(encodedValue);
- RELEASE_ASSERT(baseValue.isObject());
- JSObject* object = asObject(baseValue);
+ if (tryPutByValOptimize(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)) == OptimizationResult::GiveUp) {
+ // Don't ever try to optimize.
+ byValInfo->tookSlowPath = true;
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationPutByValGeneric));
+ }
+ putByVal(exec, baseValue, subscript, value, byValInfo);
+}
+
+static OptimizationResult tryDirectPutByValOptimize(ExecState* exec, JSObject* object, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+{
+ // See if it's worth optimizing at all.
+ OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
+
+ VM& vm = exec->vm();
+
if (subscript.isInt32()) {
- // See if it's worth optimizing at all.
- bool didOptimize = false;
-
- unsigned bytecodeOffset = callFrame->locationAsBytecodeOffset();
- ASSERT(bytecodeOffset);
- ByValInfo& byValInfo = callFrame->codeBlock()->getByValInfo(bytecodeOffset - 1);
- ASSERT(!byValInfo.stubRoutine);
-
- if (hasOptimizableIndexing(object->structure())) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+
+ Structure* structure = object->structure(vm);
+ if (hasOptimizableIndexing(structure)) {
// Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
- if (arrayMode != byValInfo.arrayMode) {
- JIT::compileDirectPutByVal(&vm, callFrame->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
- didOptimize = true;
+ JITArrayMode arrayMode = jitArrayModeForStructure(structure);
+ if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) {
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+ byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
+
+ JIT::compileDirectPutByVal(&vm, codeBlock, byValInfo, returnAddress, arrayMode);
+ optimizationResult = OptimizationResult::Optimized;
}
}
-
- if (!didOptimize) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. Or, if we failed to patch and we have some object
- // that intercepts indexed get, then don't even wait until 10 times. For cases
- // where we see non-index-intercepting objects, this gives 10 iterations worth of
- // opportunity for us to observe that the get_by_val may be polymorphic.
- if (++byValInfo.slowPathCount >= 10
- || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
- // Don't ever try to optimize.
- RepatchBuffer repatchBuffer(callFrame->codeBlock());
- repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationDirectPutByValGeneric));
+
+ // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
+ if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
+ optimizationResult = OptimizationResult::GiveUp;
+ } else if (isStringOrSymbol(subscript)) {
+ const Identifier propertyName = subscript.toPropertyKey(exec);
+ if (subscript.isSymbol() || !parseIndex(propertyName)) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+ if (byValInfo->seen) {
+ if (byValInfo->cachedId == propertyName) {
+ JIT::compilePutByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, Direct, propertyName);
+ optimizationResult = OptimizationResult::Optimized;
+ } else {
+ // Seem like a generic property access site.
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+ } else {
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+ byValInfo->seen = true;
+ byValInfo->cachedId = propertyName;
+ if (subscript.isSymbol())
+ byValInfo->cachedSymbol.set(vm, codeBlock, asSymbol(subscript));
+ optimizationResult = OptimizationResult::SeenOnce;
}
}
}
- directPutByVal(callFrame, object, subscript, value);
+
+ if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. For cases where we see non-index-intercepting
+ // objects, this gives 10 iterations worth of opportunity for us to observe
+ // that the get_by_val may be polymorphic. We count up slowPathCount even if
+ // the result is GiveUp.
+ if (++byValInfo->slowPathCount >= 10)
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ return optimizationResult;
}
-void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
+void JIT_OPERATION operationDirectPutByValOptimize(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ JSValue value = JSValue::decode(encodedValue);
+ RELEASE_ASSERT(baseValue.isObject());
+ JSObject* object = asObject(baseValue);
+ if (tryDirectPutByValOptimize(exec, object, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)) == OptimizationResult::GiveUp) {
+ // Don't ever try to optimize.
+ byValInfo->tookSlowPath = true;
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationDirectPutByValGeneric));
+ }
+
+ directPutByVal(exec, object, subscript, value, byValInfo);
+}
+
+void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -596,11 +774,11 @@ void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue enco
JSValue subscript = JSValue::decode(encodedSubscript);
JSValue value = JSValue::decode(encodedValue);
- putByVal(exec, baseValue, subscript, value);
+ putByVal(exec, baseValue, subscript, value, byValInfo);
}
-void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
+void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -609,218 +787,265 @@ void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValu
JSValue subscript = JSValue::decode(encodedSubscript);
JSValue value = JSValue::decode(encodedValue);
RELEASE_ASSERT(baseValue.isObject());
- directPutByVal(exec, asObject(baseValue), subscript, value);
+ directPutByVal(exec, asObject(baseValue), subscript, value, byValInfo);
}
-EncodedJSValue JIT_OPERATION operationCallEval(ExecState* execCallee)
+EncodedJSValue JIT_OPERATION operationCallEval(ExecState* exec, ExecState* execCallee)
{
- CallFrame* callerFrame = execCallee->callerFrame();
- ASSERT(execCallee->callerFrame()->codeBlock()->codeType() != FunctionCode
- || !execCallee->callerFrame()->codeBlock()->needsFullScopeChain()
- || execCallee->callerFrame()->uncheckedR(execCallee->callerFrame()->codeBlock()->activationRegister().offset()).jsValue());
+ VM* vm = &exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(*vm);
- execCallee->setScope(callerFrame->scope());
- execCallee->setReturnPC(static_cast<Instruction*>(OUR_RETURN_ADDRESS));
execCallee->setCodeBlock(0);
-
+
if (!isHostFunction(execCallee->calleeAsValue(), globalFuncEval))
return JSValue::encode(JSValue());
- VM* vm = &execCallee->vm();
JSValue result = eval(execCallee);
- if (vm->exception())
- return EncodedJSValue();
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
return JSValue::encode(result);
}
-static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind)
+static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee, CallLinkInfo* callLinkInfo)
{
ExecState* exec = execCallee->callerFrame();
VM* vm = &exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(*vm);
- execCallee->setScope(exec->scope());
execCallee->setCodeBlock(0);
- if (kind == CodeForCall) {
+ if (callLinkInfo->specializationKind() == CodeForCall) {
CallData callData;
CallType callType = getCallData(callee, callData);
- ASSERT(callType != CallTypeJS);
+ ASSERT(callType != CallType::JS);
- if (callType == CallTypeHost) {
+ if (callType == CallType::Host) {
NativeCallFrameTracer tracer(vm, execCallee);
execCallee->setCallee(asObject(callee));
vm->hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
- if (vm->exception())
- return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+ if (UNLIKELY(scope.exception())) {
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
- return reinterpret_cast<void*>(getHostCallReturnValue);
+ return encodeResult(
+ bitwise_cast<void*>(getHostCallReturnValue),
+ reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
}
- ASSERT(callType == CallTypeNone);
- exec->vm().throwException(exec, createNotAFunctionError(exec, callee));
- return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+ ASSERT(callType == CallType::None);
+ throwException(exec, scope, createNotAFunctionError(exec, callee));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
}
- ASSERT(kind == CodeForConstruct);
+ ASSERT(callLinkInfo->specializationKind() == CodeForConstruct);
ConstructData constructData;
ConstructType constructType = getConstructData(callee, constructData);
- ASSERT(constructType != ConstructTypeJS);
+ ASSERT(constructType != ConstructType::JS);
- if (constructType == ConstructTypeHost) {
+ if (constructType == ConstructType::Host) {
NativeCallFrameTracer tracer(vm, execCallee);
execCallee->setCallee(asObject(callee));
vm->hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
- if (vm->exception())
- return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+ if (UNLIKELY(scope.exception())) {
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
- return reinterpret_cast<void*>(getHostCallReturnValue);
+ return encodeResult(bitwise_cast<void*>(getHostCallReturnValue), reinterpret_cast<void*>(KeepTheFrame));
}
- ASSERT(constructType == ConstructTypeNone);
- exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
- return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
+ ASSERT(constructType == ConstructType::None);
+ throwException(exec, scope, createNotAConstructorError(exec, callee));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
}
-inline char* linkFor(ExecState* execCallee, CodeSpecializationKind kind)
+SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
{
ExecState* exec = execCallee->callerFrame();
VM* vm = &exec->vm();
+ auto throwScope = DECLARE_THROW_SCOPE(*vm);
+
+ CodeSpecializationKind kind = callLinkInfo->specializationKind();
NativeCallFrameTracer tracer(vm, exec);
+ RELEASE_ASSERT(!callLinkInfo->isDirect());
+
JSValue calleeAsValue = execCallee->calleeAsValue();
JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
- if (!calleeAsFunctionCell)
- return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));
+ if (!calleeAsFunctionCell) {
+ // FIXME: We should cache these kinds of calls. They can be common and currently they are
+ // expensive.
+ // https://bugs.webkit.org/show_bug.cgi?id=144458
+ throwScope.release();
+ return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
+ }
JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
- execCallee->setScope(callee->scopeUnchecked());
+ JSScope* scope = callee->scopeUnchecked();
ExecutableBase* executable = callee->executable();
MacroAssemblerCodePtr codePtr;
CodeBlock* codeBlock = 0;
- CallLinkInfo& callLinkInfo = exec->codeBlock()->getCallLinkInfo(execCallee->returnPC());
- if (executable->isHostFunction())
- codePtr = executable->generatedJITCodeFor(kind)->addressForCall();
- else {
+ if (executable->isHostFunction()) {
+ codePtr = executable->entrypointFor(kind, MustCheckArity);
+ } else {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- JSObject* error = functionExecutable->prepareForExecution(execCallee, callee->scope(), kind);
+
+ if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
+ throwException(exec, throwScope, createNotAConstructorError(exec, callee));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
+
+ CodeBlock** codeBlockSlot = execCallee->addressOfCodeBlock();
+ JSObject* error = functionExecutable->prepareForExecution<FunctionExecutable>(*vm, callee, scope, kind, *codeBlockSlot);
+ ASSERT(throwScope.exception() == reinterpret_cast<Exception*>(error));
if (error) {
- vm->throwException(exec, createStackOverflowError(exec));
- return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
+ throwException(exec, throwScope, error);
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
}
- codeBlock = functionExecutable->codeBlockFor(kind);
- if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.callType == CallLinkInfo::CallVarargs)
- codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
+ codeBlock = *codeBlockSlot;
+ ArityCheckMode arity;
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo->isVarargs())
+ arity = MustCheckArity;
else
- codePtr = functionExecutable->generatedJITCodeFor(kind)->addressForCall();
+ arity = ArityCheckNotRequired;
+ codePtr = functionExecutable->entrypointFor(kind, arity);
}
- if (!callLinkInfo.seenOnce())
- callLinkInfo.setSeen();
+ if (!callLinkInfo->seenOnce())
+ callLinkInfo->setSeen();
else
- linkFor(execCallee, callLinkInfo, codeBlock, callee, codePtr, kind);
- return reinterpret_cast<char*>(codePtr.executableAddress());
+ linkFor(execCallee, *callLinkInfo, codeBlock, callee, codePtr);
+
+ return encodeResult(codePtr.executableAddress(), reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
}
-char* JIT_OPERATION operationLinkCall(ExecState* execCallee)
+void JIT_OPERATION operationLinkDirectCall(ExecState* exec, CallLinkInfo* callLinkInfo, JSFunction* callee)
{
- return linkFor(execCallee, CodeForCall);
-}
+ VM* vm = &exec->vm();
+ auto throwScope = DECLARE_THROW_SCOPE(*vm);
-char* JIT_OPERATION operationLinkConstruct(ExecState* execCallee)
-{
- return linkFor(execCallee, CodeForConstruct);
+ CodeSpecializationKind kind = callLinkInfo->specializationKind();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ RELEASE_ASSERT(callLinkInfo->isDirect());
+
+ // This would happen if the executable died during GC but the CodeBlock did not die. That should
+ // not happen because the CodeBlock should have a weak reference to any executable it uses for
+ // this purpose.
+ RELEASE_ASSERT(callLinkInfo->executable());
+
+ // Having a CodeBlock indicates that this is linked. We shouldn't be taking this path if it's
+ // linked.
+ RELEASE_ASSERT(!callLinkInfo->codeBlock());
+
+ // We just don't support this yet.
+ RELEASE_ASSERT(!callLinkInfo->isVarargs());
+
+ ExecutableBase* executable = callLinkInfo->executable();
+ RELEASE_ASSERT(callee->executable() == callLinkInfo->executable());
+
+ JSScope* scope = callee->scopeUnchecked();
+
+ MacroAssemblerCodePtr codePtr;
+ CodeBlock* codeBlock = nullptr;
+ if (executable->isHostFunction())
+ codePtr = executable->entrypointFor(kind, MustCheckArity);
+ else {
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+
+ RELEASE_ASSERT(isCall(kind) || functionExecutable->constructAbility() != ConstructAbility::CannotConstruct);
+
+ JSObject* error = functionExecutable->prepareForExecution<FunctionExecutable>(*vm, callee, scope, kind, codeBlock);
+ ASSERT(throwScope.exception() == reinterpret_cast<Exception*>(error));
+ if (error) {
+ throwException(exec, throwScope, error);
+ return;
+ }
+ ArityCheckMode arity;
+ unsigned argumentStackSlots = callLinkInfo->maxNumArguments();
+ if (argumentStackSlots < static_cast<size_t>(codeBlock->numParameters()))
+ arity = MustCheckArity;
+ else
+ arity = ArityCheckNotRequired;
+ codePtr = functionExecutable->entrypointFor(kind, arity);
+ }
+
+ linkDirectFor(exec, *callLinkInfo, codeBlock, codePtr);
}
-inline char* virtualForWithFunction(ExecState* execCallee, CodeSpecializationKind kind, JSCell*& calleeAsFunctionCell)
+inline SlowPathReturnType virtualForWithFunction(
+ ExecState* execCallee, CallLinkInfo* callLinkInfo, JSCell*& calleeAsFunctionCell)
{
ExecState* exec = execCallee->callerFrame();
VM* vm = &exec->vm();
+ auto throwScope = DECLARE_THROW_SCOPE(*vm);
+
+ CodeSpecializationKind kind = callLinkInfo->specializationKind();
NativeCallFrameTracer tracer(vm, exec);
JSValue calleeAsValue = execCallee->calleeAsValue();
calleeAsFunctionCell = getJSFunction(calleeAsValue);
if (UNLIKELY(!calleeAsFunctionCell))
- return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));
+ return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
JSFunction* function = jsCast<JSFunction*>(calleeAsFunctionCell);
- execCallee->setScope(function->scopeUnchecked());
+ JSScope* scope = function->scopeUnchecked();
ExecutableBase* executable = function->executable();
if (UNLIKELY(!executable->hasJITCodeFor(kind))) {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
- JSObject* error = functionExecutable->prepareForExecution(execCallee, function->scope(), kind);
- if (error) {
- exec->vm().throwException(execCallee, error);
- return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
- }
- }
- return reinterpret_cast<char*>(executable->generatedJITCodeWithArityCheckFor(kind).executableAddress());
-}
-inline char* virtualFor(ExecState* execCallee, CodeSpecializationKind kind)
-{
- JSCell* calleeAsFunctionCellIgnored;
- return virtualForWithFunction(execCallee, kind, calleeAsFunctionCellIgnored);
-}
+ if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
+ throwException(exec, throwScope, createNotAConstructorError(exec, function));
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
-static bool attemptToOptimizeClosureCall(ExecState* execCallee, JSCell* calleeAsFunctionCell, CallLinkInfo& callLinkInfo)
-{
- if (!calleeAsFunctionCell)
- return false;
-
- JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
- JSFunction* oldCallee = callLinkInfo.callee.get();
-
- if (!oldCallee
- || oldCallee->structure() != callee->structure()
- || oldCallee->executable() != callee->executable())
- return false;
-
- ASSERT(callee->executable()->hasJITCodeForCall());
- MacroAssemblerCodePtr codePtr = callee->executable()->generatedJITCodeForCall()->addressForCall();
-
- CodeBlock* codeBlock;
- if (callee->executable()->isHostFunction())
- codeBlock = 0;
- else {
- codeBlock = jsCast<FunctionExecutable*>(callee->executable())->codeBlockForCall();
- if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
- return false;
+ CodeBlock** codeBlockSlot = execCallee->addressOfCodeBlock();
+ JSObject* error = functionExecutable->prepareForExecution<FunctionExecutable>(*vm, function, scope, kind, *codeBlockSlot);
+ if (error) {
+ throwException(exec, throwScope, error);
+ return encodeResult(
+ vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
+ reinterpret_cast<void*>(KeepTheFrame));
+ }
}
-
- linkClosureCall(
- execCallee, callLinkInfo, codeBlock,
- callee->structure(), callee->executable(), codePtr);
-
- return true;
+ return encodeResult(executable->entrypointFor(
+ kind, MustCheckArity).executableAddress(),
+ reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
}
-char* JIT_OPERATION operationLinkClosureCall(ExecState* execCallee)
+SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
{
+ ASSERT(callLinkInfo->specializationKind() == CodeForCall);
JSCell* calleeAsFunctionCell;
- char* result = virtualForWithFunction(execCallee, CodeForCall, calleeAsFunctionCell);
- CallLinkInfo& callLinkInfo = execCallee->callerFrame()->codeBlock()->getCallLinkInfo(execCallee->returnPC());
+ SlowPathReturnType result = virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCell);
- if (!attemptToOptimizeClosureCall(execCallee, calleeAsFunctionCell, callLinkInfo))
- linkSlowFor(execCallee, callLinkInfo, CodeForCall);
+ linkPolymorphicCall(execCallee, *callLinkInfo, CallVariant(calleeAsFunctionCell));
return result;
}
-char* JIT_OPERATION operationVirtualCall(ExecState* execCallee)
-{
- return virtualFor(execCallee, CodeForCall);
-}
-
-char* JIT_OPERATION operationVirtualConstruct(ExecState* execCallee)
+SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
{
- return virtualFor(execCallee, CodeForConstruct);
+ JSCell* calleeAsFunctionCellIgnored;
+ return virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCellIgnored);
}
-
size_t JIT_OPERATION operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
VM* vm = &exec->vm();
@@ -853,14 +1078,6 @@ size_t JIT_OPERATION operationCompareGreaterEq(ExecState* exec, EncodedJSValue e
return jsLessEq<false>(exec, JSValue::decode(encodedOp2), JSValue::decode(encodedOp1));
}
-size_t JIT_OPERATION operationConvertJSValueToBoolean(ExecState* exec, EncodedJSValue encodedOp)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-
- return JSValue::decode(encodedOp).toBoolean(exec);
-}
-
size_t JIT_OPERATION operationCompareEq(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
VM* vm = &exec->vm();
@@ -878,7 +1095,7 @@ size_t JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSC
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- bool result = asString(left)->value(exec) == asString(right)->value(exec);
+ bool result = asString(left)->equal(exec, asString(right));
#if USE(JSVALUE64)
return JSValue::encode(jsBoolean(result));
#else
@@ -886,13 +1103,6 @@ size_t JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSC
#endif
}
-size_t JIT_OPERATION operationHasProperty(ExecState* exec, JSObject* base, JSString* property)
-{
- int result = base->hasProperty(exec, Identifier(exec, property->value(exec)));
- return result;
-}
-
-
EncodedJSValue JIT_OPERATION operationNewArrayWithProfile(ExecState* exec, ArrayAllocationProfile* profile, const JSValue* values, int size)
{
VM* vm = &exec->vm();
@@ -915,66 +1125,117 @@ EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState* exec
return JSValue::encode(constructArrayWithSizeQuirk(exec, profile, exec->lexicalGlobalObject(), sizeValue));
}
-EncodedJSValue JIT_OPERATION operationNewFunction(ExecState* exec, JSCell* functionExecutable)
+}
+
+template<typename FunctionType>
+static EncodedJSValue operationNewFunctionCommon(ExecState* exec, JSScope* scope, JSCell* functionExecutable, bool isInvalidated)
{
- ASSERT(functionExecutable->inherits(FunctionExecutable::info()));
VM& vm = exec->vm();
+ ASSERT(functionExecutable->inherits(vm, FunctionExecutable::info()));
NativeCallFrameTracer tracer(&vm, exec);
- return JSValue::encode(JSFunction::create(vm, static_cast<FunctionExecutable*>(functionExecutable), exec->scope()));
+ if (isInvalidated)
+ return JSValue::encode(FunctionType::createWithInvalidatedReallocationWatchpoint(vm, static_cast<FunctionExecutable*>(functionExecutable), scope));
+ return JSValue::encode(FunctionType::create(vm, static_cast<FunctionExecutable*>(functionExecutable), scope));
+}
+
+extern "C" {
+
+EncodedJSValue JIT_OPERATION operationNewFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSFunction>(exec, scope, functionExecutable, false);
+}
+
+EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSFunction>(exec, scope, functionExecutable, true);
+}
+
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSGeneratorFunction>(exec, scope, functionExecutable, false);
+}
+
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSGeneratorFunction>(exec, scope, functionExecutable, true);
+}
+
+EncodedJSValue JIT_OPERATION operationNewAsyncFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSAsyncFunction>(exec, scope, functionExecutable, false);
+}
+
+EncodedJSValue JIT_OPERATION operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
+{
+ return operationNewFunctionCommon<JSAsyncFunction>(exec, scope, functionExecutable, true);
+}
+
+void JIT_OPERATION operationSetFunctionName(ExecState* exec, JSCell* funcCell, EncodedJSValue encodedName)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSFunction* func = jsCast<JSFunction*>(funcCell);
+ JSValue name = JSValue::decode(encodedName);
+ func->setFunctionName(exec, name);
}
JSCell* JIT_OPERATION operationNewObject(ExecState* exec, Structure* structure)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
-
+
return constructEmptyObject(exec, structure);
}
EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr)
{
+ SuperSamplerScope superSamplerScope(false);
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(vm);
+
RegExp* regexp = static_cast<RegExp*>(regexpPtr);
if (!regexp->isValid()) {
- vm.throwException(exec, createSyntaxError(exec, "Invalid flags supplied to RegExp constructor."));
+ throwException(exec, scope, createSyntaxError(exec, regexp->errorMessage()));
return JSValue::encode(jsUndefined());
}
return JSValue::encode(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regexp));
}
-void JIT_OPERATION operationHandleWatchdogTimer(ExecState* exec)
+// The only reason for returning an UnusedPtr (instead of void) is so that we can reuse the
+// existing DFG slow path generator machinery when creating the slow path for CheckWatchdogTimer
+// in the DFG. If a DFG slow path generator that supports a void return type is added in the
+// future, we can switch to using that then.
+UnusedPtr JIT_OPERATION operationHandleWatchdogTimer(ExecState* exec)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(vm);
+
+ if (UNLIKELY(vm.shouldTriggerTermination(exec)))
+ throwException(exec, scope, createTerminatedExecutionException(&vm));
- if (UNLIKELY(vm.watchdog.didFire(exec)))
- vm.throwException(exec, createTerminatedExecutionException(&vm));
+ return nullptr;
}
-void JIT_OPERATION operationThrowStaticError(ExecState* exec, EncodedJSValue encodedValue, int32_t referenceErrorFlag)
+void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookType)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- String message = errorDescriptionForValue(exec, JSValue::decode(encodedValue))->value(exec);
- if (referenceErrorFlag)
- vm.throwException(exec, createReferenceError(exec, message));
- else
- vm.throwException(exec, createTypeError(exec, message));
+ vm.interpreter->debug(exec, static_cast<DebugHookType>(debugHookType));
}
-void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookID)
+#if ENABLE(DFG_JIT)
+static void updateAllPredictionsAndOptimizeAfterWarmUp(CodeBlock* codeBlock)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- vm.interpreter->debug(exec, static_cast<DebugHookID>(debugHookID));
+ codeBlock->updateAllPredictions();
+ codeBlock->optimizeAfterWarmUp();
}
-#if ENABLE(DFG_JIT)
-char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
+SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -996,7 +1257,11 @@ char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
DeferGCForAWhile deferGC(vm.heap);
CodeBlock* codeBlock = exec->codeBlock();
-
+ if (codeBlock->jitType() != JITCode::BaselineJIT) {
+ dataLog("Unexpected code block in Baseline->DFG tier-up: ", *codeBlock, "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+
if (bytecodeIndex) {
// If we're attempting to OSR from a loop, assume that this should be
// separately optimized.
@@ -1017,29 +1282,38 @@ char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
}
if (!codeBlock->checkIfOptimizationThresholdReached()) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("counter = ", codeBlock->jitExecuteCounter()));
codeBlock->updateAllPredictions();
if (Options::verboseOSR())
dataLog("Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.\n");
- return 0;
+ return encodeResult(0, 0);
}
+ Debugger* debugger = codeBlock->globalObject()->debugger();
+ if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests())) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("debugger is stepping or has requests"));
+ updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
+ return encodeResult(0, 0);
+ }
+
if (codeBlock->m_shouldAlwaysBeInlined) {
- codeBlock->updateAllPredictions();
- codeBlock->optimizeAfterWarmUp();
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("should always be inlined"));
+ updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
if (Options::verboseOSR())
dataLog("Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.\n");
- return 0;
+ return encodeResult(0, 0);
}
// We cannot be in the process of asynchronous compilation and also have an optimized
// replacement.
+ DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull();
ASSERT(
- !vm.worklist
- || !(vm.worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown
+ !worklist
+ || !(worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown
&& codeBlock->hasOptimizedReplacement()));
DFG::Worklist::State worklistState;
- if (vm.worklist) {
+ if (worklist) {
// The call to DFG::Worklist::completeAllReadyPlansForVM() will complete all ready
// (i.e. compiled) code blocks. But if it completes ours, we also need to know
// what the result was so that we don't plow ahead and attempt OSR or immediate
@@ -1058,17 +1332,18 @@ char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
// probably a waste of memory. Our goal here is to complete code blocks as soon as
// possible in order to minimize the chances of us executing baseline code after
// optimized code is already available.
- worklistState = vm.worklist->completeAllReadyPlansForVM(
+ worklistState = worklist->completeAllReadyPlansForVM(
vm, DFG::CompilationKey(codeBlock, DFG::DFGMode));
} else
worklistState = DFG::Worklist::NotKnown;
if (worklistState == DFG::Worklist::Compiling) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("compiling"));
// We cannot be in the process of asynchronous compilation and also have an optimized
// replacement.
RELEASE_ASSERT(!codeBlock->hasOptimizedReplacement());
codeBlock->setOptimizationThresholdBasedOnCompilationResult(CompilationDeferred);
- return 0;
+ return encodeResult(0, 0);
}
if (worklistState == DFG::Worklist::Compiled) {
@@ -1078,10 +1353,11 @@ char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
// CodeBlock::setOptimizationThresholdBasedOnCompilationResult() and we have
// nothing left to do.
if (!codeBlock->hasOptimizedReplacement()) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("compiled and failed"));
codeBlock->updateAllPredictions();
if (Options::verboseOSR())
dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
- return 0;
+ return encodeResult(0, 0);
}
} else if (codeBlock->hasOptimizedReplacement()) {
if (Options::verboseOSR())
@@ -1100,22 +1376,24 @@ char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
// shouldReoptimizeFromLoopNow() to always return true. But we make it do some
// additional checking anyway, to reduce the amount of recompilation thrashing.
if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("should reoptimize from loop now"));
if (Options::verboseOSR()) {
dataLog(
"Triggering reoptimization of ", *codeBlock,
"(", *codeBlock->replacement(), ") (in loop).\n");
}
- codeBlock->replacement()->jettison(CountReoptimization);
- return 0;
+ codeBlock->replacement()->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTrigger, CountReoptimization);
+ return encodeResult(0, 0);
}
} else {
if (!codeBlock->shouldOptimizeNow()) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("insufficient profiling"));
if (Options::verboseOSR()) {
dataLog(
"Delaying optimization for ", *codeBlock,
" because of insufficient profiling.\n");
}
- return 0;
+ return encodeResult(0, 0);
}
if (Options::verboseOSR())
@@ -1123,46 +1401,42 @@ char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
unsigned numVarsWithValues;
if (bytecodeIndex)
- numVarsWithValues = codeBlock->m_numVars;
+ numVarsWithValues = codeBlock->m_numCalleeLocals;
else
numVarsWithValues = 0;
Operands<JSValue> mustHandleValues(codeBlock->numParameters(), numVarsWithValues);
+ int localsUsedForCalleeSaves = static_cast<int>(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters());
for (size_t i = 0; i < mustHandleValues.size(); ++i) {
int operand = mustHandleValues.operandForIndex(i);
- if (operandIsArgument(operand)
- && !VirtualRegister(operand).toArgument()
- && codeBlock->codeType() == FunctionCode
- && codeBlock->specializationKind() == CodeForConstruct) {
- // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
- // also never be used. It doesn't matter what we put into the value for this,
- // but it has to be an actual value that can be grokked by subsequent DFG passes,
- // so we sanitize it here by turning it into Undefined.
- mustHandleValues[i] = jsUndefined();
- } else
- mustHandleValues[i] = exec->uncheckedR(operand).jsValue();
+ if (operandIsLocal(operand) && VirtualRegister(operand).toLocal() < localsUsedForCalleeSaves)
+ continue;
+ mustHandleValues[i] = exec->uncheckedR(operand).jsValue();
}
+ CodeBlock* replacementCodeBlock = codeBlock->newReplacement();
CompilationResult result = DFG::compile(
- vm, codeBlock->newReplacement().get(), DFG::DFGMode, bytecodeIndex,
- mustHandleValues, JITToDFGDeferredCompilationCallback::create(),
- vm.ensureWorklist());
+ vm, replacementCodeBlock, nullptr, DFG::DFGMode, bytecodeIndex,
+ mustHandleValues, JITToDFGDeferredCompilationCallback::create());
- if (result != CompilationSuccessful)
- return 0;
+ if (result != CompilationSuccessful) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("compilation failed"));
+ return encodeResult(0, 0);
+ }
}
CodeBlock* optimizedCodeBlock = codeBlock->replacement();
ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType()));
- if (void* address = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) {
+ if (void* dataBuffer = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) {
+ CODEBLOCK_LOG_EVENT(optimizedCodeBlock, "osrEntry", ("at bc#", bytecodeIndex));
if (Options::verboseOSR()) {
dataLog(
- "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ", address ",
- RawPointer(OUR_RETURN_ADDRESS), " -> ", RawPointer(address), ".\n");
+ "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ".\n");
}
codeBlock->optimizeSoon();
- return static_cast<char*>(address);
+ codeBlock->unlinkedCodeBlock()->setDidOptimize(TrueTriState);
+ return encodeResult(vm.getCTIStub(DFG::osrEntryThunkGenerator).code().executableAddress(), dataBuffer);
}
if (Options::verboseOSR()) {
@@ -1185,20 +1459,22 @@ char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
// right now. So, we only trigger reoptimization only upon the more conservative (non-loop)
// reoptimization trigger.
if (optimizedCodeBlock->shouldReoptimizeNow()) {
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("should reoptimize now"));
if (Options::verboseOSR()) {
dataLog(
"Triggering reoptimization of ", *codeBlock, " -> ",
*codeBlock->replacement(), " (after OSR fail).\n");
}
- optimizedCodeBlock->jettison(CountReoptimization);
- return 0;
+ optimizedCodeBlock->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTriggerOnOSREntryFail, CountReoptimization);
+ return encodeResult(0, 0);
}
// OSR failed this time, but it might succeed next time! Let the code run a bit
// longer and then try again.
codeBlock->optimizeAfterWarmUp();
- return 0;
+ CODEBLOCK_LOG_EVENT(codeBlock, "delayOptimizeToDFG", ("OSR failed"));
+ return encodeResult(0, 0);
}
#endif
@@ -1212,31 +1488,25 @@ void JIT_OPERATION operationPutByIndex(ExecState* exec, EncodedJSValue encodedAr
asArray(arrayValue)->putDirectIndex(exec, index, JSValue::decode(encodedValue));
}
-#if USE(JSVALUE64)
-void JIT_OPERATION operationPutGetterSetter(ExecState* exec, EncodedJSValue encodedObjectValue, Identifier* identifier, EncodedJSValue encodedGetterValue, EncodedJSValue encodedSetterValue)
+enum class AccessorType {
+ Getter,
+ Setter
+};
+
+static void putAccessorByVal(ExecState* exec, JSObject* base, JSValue subscript, int32_t attribute, JSObject* accessor, AccessorType accessorType)
{
VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- ASSERT(JSValue::decode(encodedObjectValue).isObject());
- JSObject* baseObj = asObject(JSValue::decode(encodedObjectValue));
-
- GetterSetter* accessor = GetterSetter::create(vm);
-
- JSValue getter = JSValue::decode(encodedGetterValue);
- JSValue setter = JSValue::decode(encodedSetterValue);
- ASSERT(getter.isObject() || getter.isUndefined());
- ASSERT(setter.isObject() || setter.isUndefined());
- ASSERT(getter.isObject() || setter.isObject());
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ auto propertyKey = subscript.toPropertyKey(exec);
+ RETURN_IF_EXCEPTION(scope, void());
- if (!getter.isUndefined())
- accessor->setGetter(vm, asObject(getter));
- if (!setter.isUndefined())
- accessor->setSetter(vm, asObject(setter));
- baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor);
+ if (accessorType == AccessorType::Getter)
+ base->putGetter(exec, propertyKey, accessor, attribute);
+ else
+ base->putSetter(exec, propertyKey, accessor, attribute);
}
-#else
-void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, Identifier* identifier, JSCell* getter, JSCell* setter)
+
+void JIT_OPERATION operationPutGetterById(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t options, JSCell* getter)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -1244,208 +1514,372 @@ void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, Ide
ASSERT(object && object->isObject());
JSObject* baseObj = object->getObject();
- GetterSetter* accessor = GetterSetter::create(vm);
-
- ASSERT(!getter || getter->isObject());
- ASSERT(!setter || setter->isObject());
- ASSERT(getter || setter);
-
- if (getter)
- accessor->setGetter(vm, getter->getObject());
- if (setter)
- accessor->setSetter(vm, setter->getObject());
- baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor);
+ ASSERT(getter->isObject());
+ baseObj->putGetter(exec, uid, getter, options);
}
-#endif
-void JIT_OPERATION operationPushNameScope(ExecState* exec, Identifier* identifier, EncodedJSValue encodedValue, int32_t attibutes)
+void JIT_OPERATION operationPutSetterById(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t options, JSCell* setter)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSNameScope* scope = JSNameScope::create(exec, *identifier, JSValue::decode(encodedValue), attibutes);
+ ASSERT(object && object->isObject());
+ JSObject* baseObj = object->getObject();
- exec->setScope(scope);
+ ASSERT(setter->isObject());
+ baseObj->putSetter(exec, uid, setter, options);
}
-void JIT_OPERATION operationPushWithScope(ExecState* exec, EncodedJSValue encodedValue)
+void JIT_OPERATION operationPutGetterByVal(ExecState* exec, JSCell* base, EncodedJSValue encodedSubscript, int32_t attribute, JSCell* getter)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSObject* o = JSValue::decode(encodedValue).toObject(exec);
- if (vm.exception())
- return;
-
- exec->setScope(JSWithScope::create(exec, o));
+ putAccessorByVal(exec, asObject(base), JSValue::decode(encodedSubscript), attribute, asObject(getter), AccessorType::Getter);
}
-void JIT_OPERATION operationPopScope(ExecState* exec)
+void JIT_OPERATION operationPutSetterByVal(ExecState* exec, JSCell* base, EncodedJSValue encodedSubscript, int32_t attribute, JSCell* setter)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- exec->setScope(exec->scope()->next());
+ putAccessorByVal(exec, asObject(base), JSValue::decode(encodedSubscript), attribute, asObject(setter), AccessorType::Setter);
}
-void JIT_OPERATION operationProfileDidCall(ExecState* exec, EncodedJSValue encodedValue)
+#if USE(JSVALUE64)
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t attribute, EncodedJSValue encodedGetterValue, EncodedJSValue encodedSetterValue)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- if (LegacyProfiler* profiler = vm.enabledProfiler())
- profiler->didExecute(exec, JSValue::decode(encodedValue));
+ ASSERT(object && object->isObject());
+ JSObject* baseObj = asObject(object);
+
+ GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
+
+ JSValue getter = JSValue::decode(encodedGetterValue);
+ JSValue setter = JSValue::decode(encodedSetterValue);
+ ASSERT(getter.isObject() || getter.isUndefined());
+ ASSERT(setter.isObject() || setter.isUndefined());
+ ASSERT(getter.isObject() || setter.isObject());
+
+ if (!getter.isUndefined())
+ accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter));
+ if (!setter.isUndefined())
+ accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter));
+ baseObj->putDirectAccessor(exec, uid, accessor, attribute);
}
-void JIT_OPERATION operationProfileWillCall(ExecState* exec, EncodedJSValue encodedValue)
+#else
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t attribute, JSCell* getter, JSCell* setter)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- if (LegacyProfiler* profiler = vm.enabledProfiler())
- profiler->willExecute(exec, JSValue::decode(encodedValue));
-}
+ ASSERT(object && object->isObject());
+ JSObject* baseObj = asObject(object);
-EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBaseVal)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
+ GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
- JSValue value = JSValue::decode(encodedValue);
- JSValue baseVal = JSValue::decode(encodedBaseVal);
-
- if (baseVal.isObject()) {
- JSObject* baseObject = asObject(baseVal);
- ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance());
- if (baseObject->structure()->typeInfo().implementsHasInstance()) {
- bool result = baseObject->methodTable()->customHasInstance(baseObject, exec, value);
- return JSValue::encode(jsBoolean(result));
- }
- }
+ ASSERT(!getter || getter->isObject());
+ ASSERT(!setter || setter->isObject());
+ ASSERT(getter || setter);
- vm->throwException(exec, createInvalidParameterError(exec, "instanceof", baseVal));
- return JSValue::encode(JSValue());
+ if (getter)
+ accessor->setGetter(vm, exec->lexicalGlobalObject(), getter->getObject());
+ if (setter)
+ accessor->setSetter(vm, exec->lexicalGlobalObject(), setter->getObject());
+ baseObj->putDirectAccessor(exec, uid, accessor, attribute);
}
+#endif
-JSCell* JIT_OPERATION operationCreateActivation(ExecState* exec, int32_t offset)
+void JIT_OPERATION operationPopScope(ExecState* exec, int32_t scopeReg)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSActivation* activation = JSActivation::create(vm, exec, exec->registers() + offset, exec->codeBlock());
- exec->setScope(activation);
- return activation;
+
+ JSScope* scope = exec->uncheckedR(scopeReg).Register::scope();
+ exec->uncheckedR(scopeReg) = scope->next();
}
-JSCell* JIT_OPERATION operationCreateArguments(ExecState* exec)
+int32_t JIT_OPERATION operationInstanceOfCustom(ExecState* exec, EncodedJSValue encodedValue, JSObject* constructor, EncodedJSValue encodedHasInstance)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- // NB: This needs to be exceedingly careful with top call frame tracking, since it
- // may be called from OSR exit, while the state of the call stack is bizarre.
- Arguments* result = Arguments::create(vm, exec);
- ASSERT(!vm.exception());
- return result;
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue hasInstanceValue = JSValue::decode(encodedHasInstance);
+
+ ASSERT(hasInstanceValue != exec->lexicalGlobalObject()->functionProtoHasInstanceSymbolFunction() || !constructor->structure()->typeInfo().implementsDefaultHasInstance());
+
+ if (constructor->hasInstance(exec, value, hasInstanceValue))
+ return 1;
+ return 0;
}
-EncodedJSValue JIT_OPERATION operationGetArgumentsLength(ExecState* exec, int32_t argumentsRegister)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- // Here we can assume that the argumernts were created. Because otherwise the JIT code would
- // have not made this call.
- Identifier ident(&vm, "length");
- JSValue baseValue = exec->uncheckedR(argumentsRegister).jsValue();
- PropertySlot slot(baseValue);
- return JSValue::encode(baseValue.get(exec, ident, slot));
}
+static bool canAccessArgumentIndexQuickly(JSObject& object, uint32_t index)
+{
+ switch (object.structure()->typeInfo().type()) {
+ case DirectArgumentsType: {
+ DirectArguments* directArguments = jsCast<DirectArguments*>(&object);
+ if (directArguments->isMappedArgumentInDFG(index))
+ return true;
+ break;
+ }
+ case ScopedArgumentsType: {
+ ScopedArguments* scopedArguments = jsCast<ScopedArguments*>(&object);
+ if (scopedArguments->isMappedArgumentInDFG(index))
+ return true;
+ break;
+ }
+ default:
+ break;
+ }
+ return false;
}
-static JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript, ReturnAddressPtr returnAddress)
+static JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
{
+ VM& vm = exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+
if (LIKELY(baseValue.isCell() && subscript.isString())) {
- if (JSValue result = baseValue.asCell()->fastGetOwnProperty(exec, asString(subscript)->value(exec)))
- return result;
+ Structure& structure = *baseValue.asCell()->structure(vm);
+ if (JSCell::canUseFastGetOwnProperty(structure)) {
+ if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) {
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get())) {
+ ASSERT(exec->bytecodeOffset());
+ if (byValInfo->stubInfo && byValInfo->cachedId.impl() != existingAtomicString)
+ byValInfo->tookSlowPath = true;
+ return result;
+ }
+ }
+ }
}
if (subscript.isUInt32()) {
+ ASSERT(exec->bytecodeOffset());
+ byValInfo->tookSlowPath = true;
+
uint32_t i = subscript.asUInt32();
- if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) {
- ctiPatchCallByReturnAddress(exec->codeBlock(), returnAddress, FunctionPtr(operationGetByValString));
- return asString(baseValue)->getIndex(exec, i);
+ if (isJSString(baseValue)) {
+ if (asString(baseValue)->canGetIndex(i)) {
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(operationGetByValString));
+ return asString(baseValue)->getIndex(exec, i);
+ }
+ byValInfo->arrayProfile->setOutOfBounds();
+ } else if (baseValue.isObject()) {
+ JSObject* object = asObject(baseValue);
+ if (object->canGetIndexQuickly(i))
+ return object->getIndexQuickly(i);
+
+ if (!canAccessArgumentIndexQuickly(*object, i)) {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ }
}
+
return baseValue.get(exec, i);
}
- if (isName(subscript))
- return baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
+ baseValue.requireObjectCoercible(exec);
+ RETURN_IF_EXCEPTION(scope, JSValue());
+ auto property = subscript.toPropertyKey(exec);
+ RETURN_IF_EXCEPTION(scope, JSValue());
+
+ ASSERT(exec->bytecodeOffset());
+ if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
+ byValInfo->tookSlowPath = true;
- Identifier property(exec, subscript.toString(exec)->value(exec));
return baseValue.get(exec, property);
}
+static OptimizationResult tryGetByValOptimize(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+{
+ // See if it's worth optimizing this at all.
+ OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
+
+ VM& vm = exec->vm();
+
+ if (baseValue.isObject() && subscript.isInt32()) {
+ JSObject* object = asObject(baseValue);
+
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure(vm))) {
+ // Attempt to optimize.
+ Structure* structure = object->structure(vm);
+ JITArrayMode arrayMode = jitArrayModeForStructure(structure);
+ if (arrayMode != byValInfo->arrayMode) {
+ // If we reached this case, we got an interesting array mode we did not expect when we compiled.
+ // Let's update the profile to do better next time.
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+ byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
+
+ JIT::compileGetByVal(&vm, exec->codeBlock(), byValInfo, returnAddress, arrayMode);
+ optimizationResult = OptimizationResult::Optimized;
+ }
+ }
+
+ // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
+ if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ if (baseValue.isObject() && isStringOrSymbol(subscript)) {
+ const Identifier propertyName = subscript.toPropertyKey(exec);
+ if (subscript.isSymbol() || !parseIndex(propertyName)) {
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+ if (byValInfo->seen) {
+ if (byValInfo->cachedId == propertyName) {
+ JIT::compileGetByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, propertyName);
+ optimizationResult = OptimizationResult::Optimized;
+ } else {
+ // Seem like a generic property access site.
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+ } else {
+ CodeBlock* codeBlock = exec->codeBlock();
+ ConcurrentJSLocker locker(codeBlock->m_lock);
+ byValInfo->seen = true;
+ byValInfo->cachedId = propertyName;
+ if (subscript.isSymbol())
+ byValInfo->cachedSymbol.set(vm, codeBlock, asSymbol(subscript));
+ optimizationResult = OptimizationResult::SeenOnce;
+ }
+ }
+ }
+
+ if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. For cases where we see non-index-intercepting
+ // objects, this gives 10 iterations worth of opportunity for us to observe
+ // that the get_by_val may be polymorphic. We count up slowPathCount even if
+ // the result is GiveUp.
+ if (++byValInfo->slowPathCount >= 10)
+ optimizationResult = OptimizationResult::GiveUp;
+ }
+
+ return optimizationResult;
+}
+
extern "C" {
-
-EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript)
+
+EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
JSValue baseValue = JSValue::decode(encodedBase);
JSValue subscript = JSValue::decode(encodedSubscript);
- JSValue result = getByVal(exec, baseValue, subscript, ReturnAddressPtr(OUR_RETURN_ADDRESS));
+ JSValue result = getByVal(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS));
return JSValue::encode(result);
}
-EncodedJSValue JIT_OPERATION operationGetByValDefault(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript)
+EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ ReturnAddressPtr returnAddress = ReturnAddressPtr(OUR_RETURN_ADDRESS);
+ if (tryGetByValOptimize(exec, baseValue, subscript, byValInfo, returnAddress) == OptimizationResult::GiveUp) {
+ // Don't ever try to optimize.
+ byValInfo->tookSlowPath = true;
+ ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(operationGetByValGeneric));
+ }
+
+ return JSValue::encode(getByVal(exec, baseValue, subscript, byValInfo, returnAddress));
+}
+
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
JSValue baseValue = JSValue::decode(encodedBase);
JSValue subscript = JSValue::decode(encodedSubscript);
- if (baseValue.isObject() && subscript.isInt32()) {
- // See if it's worth optimizing this at all.
- JSObject* object = asObject(baseValue);
- bool didOptimize = false;
+ ASSERT(baseValue.isObject());
+ ASSERT(subscript.isUInt32());
- unsigned bytecodeOffset = exec->locationAsBytecodeOffset();
- ASSERT(bytecodeOffset);
- ByValInfo& byValInfo = exec->codeBlock()->getByValInfo(bytecodeOffset - 1);
- ASSERT(!byValInfo.stubRoutine);
-
- if (hasOptimizableIndexing(object->structure())) {
- // Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
- if (arrayMode != byValInfo.arrayMode) {
- JIT::compileGetByVal(&vm, exec->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
- didOptimize = true;
- }
+ JSObject* object = asObject(baseValue);
+ bool didOptimize = false;
+
+ ASSERT(exec->bytecodeOffset());
+ ASSERT(!byValInfo->stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure(vm))) {
+ // Attempt to optimize.
+ JITArrayMode arrayMode = jitArrayModeForStructure(object->structure(vm));
+ if (arrayMode != byValInfo->arrayMode) {
+ JIT::compileHasIndexedProperty(&vm, exec->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
+ didOptimize = true;
}
-
- if (!didOptimize) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. Or, if we failed to patch and we have some object
- // that intercepts indexed get, then don't even wait until 10 times. For cases
- // where we see non-index-intercepting objects, this gives 10 iterations worth of
- // opportunity for us to observe that the get_by_val may be polymorphic.
- if (++byValInfo.slowPathCount >= 10
- || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
- // Don't ever try to optimize.
- RepatchBuffer repatchBuffer(exec->codeBlock());
- repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationGetByValGeneric));
- }
+ }
+
+ if (!didOptimize) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. Or, if we failed to patch and we have some object
+ // that intercepts indexed get, then don't even wait until 10 times. For cases
+ // where we see non-index-intercepting objects, this gives 10 iterations worth of
+ // opportunity for us to observe that the get_by_val may be polymorphic.
+ if (++byValInfo->slowPathCount >= 10
+ || object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
+ // Don't ever try to optimize.
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationHasIndexedPropertyGeneric));
}
}
+
+ uint32_t index = subscript.asUInt32();
+ if (object->canGetIndexQuickly(index))
+ return JSValue::encode(JSValue(JSValue::JSTrue));
+
+ if (!canAccessArgumentIndexQuickly(*object, index)) {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ }
+ return JSValue::encode(jsBoolean(object->hasPropertyGeneric(exec, index, PropertySlot::InternalMethodType::GetOwnProperty)));
+}
- JSValue result = getByVal(exec, baseValue, subscript, ReturnAddressPtr(OUR_RETURN_ADDRESS));
- return JSValue::encode(result);
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+
+ ASSERT(baseValue.isObject());
+ ASSERT(subscript.isUInt32());
+
+ JSObject* object = asObject(baseValue);
+ uint32_t index = subscript.asUInt32();
+ if (object->canGetIndexQuickly(index))
+ return JSValue::encode(JSValue(JSValue::JSTrue));
+
+ if (!canAccessArgumentIndexQuickly(*object, index)) {
+ // FIXME: This will make us think that in-bounds typed array accesses are actually
+ // out-of-bounds.
+ // https://bugs.webkit.org/show_bug.cgi?id=149886
+ byValInfo->arrayProfile->setOutOfBounds();
+ }
+ return JSValue::encode(jsBoolean(object->hasPropertyGeneric(exec, subscript.asUInt32(), PropertySlot::InternalMethodType::GetOwnProperty)));
}
-EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript)
+EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(vm);
JSValue baseValue = JSValue::decode(encodedBase);
JSValue subscript = JSValue::decode(encodedSubscript);
@@ -1456,61 +1890,71 @@ EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSV
result = asString(baseValue)->getIndex(exec, i);
else {
result = baseValue.get(exec, i);
- if (!isJSString(baseValue))
- ctiPatchCallByReturnAddress(exec->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationGetByValDefault));
+ if (!isJSString(baseValue)) {
+ ASSERT(exec->bytecodeOffset());
+ ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(byValInfo->stubRoutine ? operationGetByValGeneric : operationGetByValOptimize));
+ }
}
- } else if (isName(subscript))
- result = baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
- else {
- Identifier property(exec, subscript.toString(exec)->value(exec));
+ } else {
+ baseValue.requireObjectCoercible(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
+ auto property = subscript.toPropertyKey(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
result = baseValue.get(exec, property);
}
return JSValue::encode(result);
}
-
-void JIT_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activationCell)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- ASSERT(exec->codeBlock()->needsFullScopeChain());
- jsCast<JSActivation*>(activationCell)->tearOff(vm);
-}
-void JIT_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell, JSCell* activationCell)
+EncodedJSValue JIT_OPERATION operationDeleteByIdJSResult(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
{
- ASSERT(exec->codeBlock()->usesArguments());
- if (activationCell) {
- jsCast<Arguments*>(argumentsCell)->didTearOffActivation(exec, jsCast<JSActivation*>(activationCell));
- return;
- }
- jsCast<Arguments*>(argumentsCell)->tearOff(exec);
+ return JSValue::encode(jsBoolean(operationDeleteById(exec, base, uid)));
}
-EncodedJSValue JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue encodedBase, const Identifier* identifier)
+size_t JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue encodedBase, UniquedStringImpl* uid)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(vm);
JSObject* baseObj = JSValue::decode(encodedBase).toObject(exec);
- bool couldDelete = baseObj->methodTable()->deleteProperty(baseObj, exec, *identifier);
- JSValue result = jsBoolean(couldDelete);
+ if (!baseObj)
+ return false;
+ bool couldDelete = baseObj->methodTable(vm)->deleteProperty(baseObj, exec, Identifier::fromUid(&vm, uid));
if (!couldDelete && exec->codeBlock()->isStrictMode())
- vm.throwException(exec, createTypeError(exec, "Unable to delete property."));
- return JSValue::encode(result);
+ throwTypeError(exec, scope, ASCIILiteral(UnableToDeletePropertyError));
+ return couldDelete;
+}
+
+EncodedJSValue JIT_OPERATION operationDeleteByValJSResult(ExecState* exec, EncodedJSValue base, EncodedJSValue key)
+{
+ return JSValue::encode(jsBoolean(operationDeleteByVal(exec, base, key)));
}
-JSCell* JIT_OPERATION operationGetPNames(ExecState* exec, JSObject* obj)
+size_t JIT_OPERATION operationDeleteByVal(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedKey)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(vm);
- Structure* structure = obj->structure();
- JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
- if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(exec))
- jsPropertyNameIterator = JSPropertyNameIterator::create(exec, obj);
- return jsPropertyNameIterator;
+ JSObject* baseObj = JSValue::decode(encodedBase).toObject(exec);
+ JSValue key = JSValue::decode(encodedKey);
+ if (!baseObj)
+ return false;
+
+ bool couldDelete;
+ uint32_t index;
+ if (key.getUInt32(index))
+ couldDelete = baseObj->methodTable(vm)->deletePropertyByIndex(baseObj, exec, index);
+ else {
+ RETURN_IF_EXCEPTION(scope, false);
+ Identifier property = key.toPropertyKey(exec);
+ RETURN_IF_EXCEPTION(scope, false);
+ couldDelete = baseObj->methodTable(vm)->deleteProperty(baseObj, exec, property);
+ }
+ if (!couldDelete && exec->codeBlock()->isStrictMode())
+ throwTypeError(exec, scope, ASCIILiteral(UnableToDeletePropertyError));
+ return couldDelete;
}
EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedProto)
@@ -1520,29 +1964,39 @@ EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState* exec, EncodedJSValue
JSValue value = JSValue::decode(encodedValue);
JSValue proto = JSValue::decode(encodedProto);
- ASSERT(!value.isObject() || !proto.isObject());
-
bool result = JSObject::defaultHasInstance(exec, value, proto);
return JSValue::encode(jsBoolean(result));
}
-CallFrame* JIT_OPERATION operationSizeAndAllocFrameForVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t firstFreeRegister)
+int32_t JIT_OPERATION operationSizeFrameForForwardArguments(ExecState* exec, EncodedJSValue, int32_t numUsedStackSlots, int32_t)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ return sizeFrameForForwardArguments(exec, vm, numUsedStackSlots);
+}
+
+int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSStack* stack = &exec->interpreter()->stack();
JSValue arguments = JSValue::decode(encodedArguments);
- CallFrame* newCallFrame = sizeAndAllocFrameForVarargs(exec, stack, arguments, firstFreeRegister);
+ return sizeFrameForVarargs(exec, vm, arguments, numUsedStackSlots, firstVarArgOffset);
+}
+
+CallFrame* JIT_OPERATION operationSetupForwardArgumentsFrame(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue, int32_t, int32_t length)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ setupForwardArgumentsFrame(exec, newCallFrame, length);
return newCallFrame;
}
-CallFrame* JIT_OPERATION operationLoadVarargs(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue encodedThis, EncodedJSValue encodedArguments)
+CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue encodedArguments, int32_t firstVarArgOffset, int32_t length)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSValue thisValue = JSValue::decode(encodedThis);
JSValue arguments = JSValue::decode(encodedArguments);
- loadVarargs(exec, newCallFrame, thisValue, arguments);
+ setupVarargsFrame(exec, newCallFrame, arguments, firstVarArgOffset, length);
return newCallFrame;
}
@@ -1550,7 +2004,10 @@ EncodedJSValue JIT_OPERATION operationToObject(ExecState* exec, EncodedJSValue v
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- return JSValue::encode(JSValue::decode(value).toObject(exec));
+ JSObject* obj = JSValue::decode(value).toObject(exec);
+ if (!obj)
+ return JSValue::encode(JSValue());
+ return JSValue::encode(obj);
}
char* JIT_OPERATION operationSwitchCharWithUnknownKeyType(ExecState* exec, EncodedJSValue encodedKey, size_t tableIndex)
@@ -1609,141 +2066,652 @@ char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState* exec, Enc
return reinterpret_cast<char*>(result);
}
-EncodedJSValue JIT_OPERATION operationResolveScope(ExecState* exec, int32_t identifierIndex)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- const Identifier& ident = exec->codeBlock()->identifier(identifierIndex);
- return JSValue::encode(JSScope::resolve(exec, exec->scope(), ident));
-}
-
EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* bytecodePC)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ auto throwScope = DECLARE_THROW_SCOPE(vm);
+
CodeBlock* codeBlock = exec->codeBlock();
Instruction* pc = bytecodePC;
const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[2].u.operand).jsValue());
- ResolveModeAndType modeAndType(pc[4].u.operand);
+ GetPutInfo getPutInfo(pc[4].u.operand);
- PropertySlot slot(scope);
- if (!scope->getPropertySlot(exec, ident, slot)) {
- if (modeAndType.mode() == ThrowIfNotFound)
- vm.throwException(exec, createUndefinedVariableError(exec, ident));
- return JSValue::encode(jsUndefined());
- }
+ // ModuleVar is always converted to ClosureVar for get_from_scope.
+ ASSERT(getPutInfo.resolveType() != ModuleVar);
- // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time.
- if (slot.isCacheableValue() && slot.slotBase() == scope && scope->structure()->propertyAccessesAreCacheable()) {
- if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) {
- ConcurrentJITLocker locker(codeBlock->m_lock);
- pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure());
- pc[6].u.operand = slot.cachedOffset();
+ return JSValue::encode(scope->getPropertySlot(exec, ident, [&] (bool found, PropertySlot& slot) -> JSValue {
+ if (!found) {
+ if (getPutInfo.resolveMode() == ThrowIfNotFound)
+ throwException(exec, throwScope, createUndefinedVariableError(exec, ident));
+ return jsUndefined();
}
- }
- return JSValue::encode(slot.getValue(exec, ident));
+ JSValue result = JSValue();
+ if (scope->isGlobalLexicalEnvironment()) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ result = slot.getValue(exec, ident);
+ if (result == jsTDZValue()) {
+ throwException(exec, throwScope, createTDZError(exec));
+ return jsUndefined();
+ }
+ }
+
+ CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident);
+
+ if (!result)
+ return slot.getValue(exec, ident);
+ return result;
+ }));
}
void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ auto throwScope = DECLARE_THROW_SCOPE(vm);
+
Instruction* pc = bytecodePC;
CodeBlock* codeBlock = exec->codeBlock();
const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[1].u.operand).jsValue());
JSValue value = exec->r(pc[3].u.operand).jsValue();
- ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
+ GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
- if (modeAndType.mode() == ThrowIfNotFound && !scope->hasProperty(exec, ident)) {
- exec->vm().throwException(exec, createUndefinedVariableError(exec, ident));
+ // ModuleVar does not keep the scope register value alive in DFG.
+ ASSERT(getPutInfo.resolveType() != ModuleVar);
+
+ if (getPutInfo.resolveType() == LocalClosureVar) {
+ JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope);
+ environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value);
+ if (WatchpointSet* set = pc[5].u.watchpointSet)
+ set->touch(vm, "Executed op_put_scope<LocalClosureVar>");
return;
}
- PutPropertySlot slot(scope, codeBlock->isStrictMode());
+ bool hasProperty = scope->hasProperty(exec, ident);
+ if (hasProperty
+ && scope->isGlobalLexicalEnvironment()
+ && !isInitialization(getPutInfo.initializationMode())) {
+ // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
+ PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
+ JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot);
+ if (slot.getValue(exec, ident) == jsTDZValue()) {
+ throwException(exec, throwScope, createTDZError(exec));
+ return;
+ }
+ }
+
+ if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) {
+ throwException(exec, throwScope, createUndefinedVariableError(exec, ident));
+ return;
+ }
+
+ PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, isInitialization(getPutInfo.initializationMode()));
scope->methodTable()->put(scope, exec, ident, value, slot);
- if (exec->vm().exception())
- return;
+ RETURN_IF_EXCEPTION(throwScope, void());
- // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time.
- if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) {
- if (slot.isCacheable() && slot.base() == scope && scope->structure()->propertyAccessesAreCacheable()) {
- ConcurrentJITLocker locker(codeBlock->m_lock);
- pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure());
- pc[6].u.operand = slot.cachedOffset();
- }
- }
+ CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident);
}
void JIT_OPERATION operationThrow(ExecState* exec, EncodedJSValue encodedExceptionValue)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
JSValue exceptionValue = JSValue::decode(encodedExceptionValue);
- vm->throwException(exec, exceptionValue);
+ throwException(exec, scope, exceptionValue);
+
+ // Results stored out-of-band in vm.targetMachinePCForThrow & vm.callFrameForCatch
+ genericUnwind(vm, exec);
+}
+
+char* JIT_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState* exec, JSObject* object)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ ASSERT(!object->structure()->outOfLineCapacity());
+ Butterfly* result = object->allocateMoreOutOfLineStorage(vm, 0, initialOutOfLineCapacity);
+ object->nukeStructureAndSetButterfly(vm, object->structureID(), result);
+ return reinterpret_cast<char*>(result);
+}
- // Results stored out-of-band in vm.targetMachinePCForThrow & vm.callFrameForThrow
- genericUnwind(vm, exec, exceptionValue);
+char* JIT_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState* exec, JSObject* object, size_t newSize)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ Butterfly* result = object->allocateMoreOutOfLineStorage(vm, object->structure()->outOfLineCapacity(), newSize);
+ object->nukeStructureAndSetButterfly(vm, object->structureID(), result);
+ return reinterpret_cast<char*>(result);
}
-void JIT_OPERATION operationFlushWriteBarrierBuffer(ExecState* exec, JSCell* cell)
+void JIT_OPERATION operationOSRWriteBarrier(ExecState* exec, JSCell* cell)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- vm->heap.flushWriteBarrierBuffer(cell);
+ vm->heap.writeBarrier(cell);
}
-void JIT_OPERATION operationOSRWriteBarrier(ExecState* exec, JSCell* cell)
+void JIT_OPERATION operationWriteBarrierSlowPath(ExecState* exec, JSCell* cell)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- exec->heap()->writeBarrier(cell);
+ vm->heap.writeBarrierSlowPath(cell);
}
-// NB: We don't include the value as part of the barrier because the write barrier elision
-// phase in the DFG only tracks whether the object being stored to has been barriered. It
-// would be much more complicated to try to model the value being stored as well.
-void JIT_OPERATION operationUnconditionalWriteBarrier(ExecState* exec, JSCell* cell)
+void JIT_OPERATION lookupExceptionHandler(VM* vm, ExecState* exec)
+{
+ NativeCallFrameTracer tracer(vm, exec);
+ genericUnwind(vm, exec);
+ ASSERT(vm->targetMachinePCForThrow);
+}
+
+void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM* vm, ExecState* exec)
+{
+ vm->topCallFrame = exec->callerFrame();
+ genericUnwind(vm, exec, UnwindFromCallerFrame);
+ ASSERT(vm->targetMachinePCForThrow);
+}
+
+void JIT_OPERATION operationVMHandleException(ExecState* exec)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Heap::writeBarrier(cell);
+ genericUnwind(vm, exec);
}
-void JIT_OPERATION operationInitGlobalConst(ExecState* exec, Instruction* pc)
+// This function "should" just take the ExecState*, but doing so would make it more difficult
+// to call from exception check sites. So, unlike all of our other functions, we allow
+// ourselves to play some gnarly ABI tricks just to simplify the calling convention. This is
+// particularly safe here since this is never called on the critical path - it's only for
+// testing.
+void JIT_OPERATION operationExceptionFuzz(ExecState* exec)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(*vm);
+ UNUSED_PARAM(scope);
+#if COMPILER(GCC_OR_CLANG)
+ void* returnPC = __builtin_return_address(0);
+ doExceptionFuzzing(exec, scope, "JITOperations", returnPC);
+#endif // COMPILER(GCC_OR_CLANG)
+}
+
+EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState* exec, EncodedJSValue encodedBaseValue, JSCell* propertyName)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ if (baseValue.isUndefinedOrNull())
+ return JSValue::encode(jsBoolean(false));
+
+ JSObject* base = baseValue.toObject(exec);
+ if (!base)
+ return JSValue::encode(JSValue());
+ return JSValue::encode(jsBoolean(base->hasPropertyGeneric(exec, asString(propertyName)->toIdentifier(exec), PropertySlot::InternalMethodType::GetOwnProperty)));
+}
+
+EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState* exec, JSCell* baseCell, int32_t subscript, int32_t internalMethodType)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSObject* object = baseCell->toObject(exec, exec->lexicalGlobalObject());
+ return JSValue::encode(jsBoolean(object->hasPropertyGeneric(exec, subscript, static_cast<PropertySlot::InternalMethodType>(internalMethodType))));
+}
+
+JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState* exec, JSCell* cell)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSObject* base = cell->toObject(exec, exec->lexicalGlobalObject());
+
+ return propertyNameEnumerator(exec, base);
+}
+
+EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState* exec, JSCell* enumeratorCell, int32_t index)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(enumeratorCell);
+ JSString* propertyName = enumerator->propertyNameAtIndex(index);
+ return JSValue::encode(propertyName ? propertyName : jsNull());
+}
- JSValue value = exec->r(pc[2].u.operand).jsValue();
- pc[1].u.registerPointer->set(*vm, exec->codeBlock()->globalObject(), value);
+JSCell* JIT_OPERATION operationToIndexString(ExecState* exec, int32_t index)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ return jsString(exec, Identifier::from(exec, index).string());
}
-void JIT_OPERATION lookupExceptionHandler(ExecState* exec)
+ALWAYS_INLINE static EncodedJSValue unprofiledAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ return JSValue::encode(jsAdd(exec, op1, op2));
+}
- JSValue exceptionValue = exec->exception();
- ASSERT(exceptionValue);
+ALWAYS_INLINE static EncodedJSValue profiledAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile& arithProfile)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
- genericUnwind(vm, exec, exceptionValue);
- ASSERT(vm->targetMachinePCForThrow);
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ arithProfile.observeLHSAndRHS(op1, op2);
+ JSValue result = jsAdd(exec, op1, op2);
+ arithProfile.observeResult(result);
+
+ return JSValue::encode(result);
}
-void JIT_OPERATION operationVMHandleException(ExecState* exec)
+EncodedJSValue JIT_OPERATION operationValueAdd(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ return unprofiledAdd(exec, encodedOp1, encodedOp2);
+}
+
+EncodedJSValue JIT_OPERATION operationValueAddProfiled(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile* arithProfile)
+{
+ ASSERT(arithProfile);
+ return profiledAdd(exec, encodedOp1, encodedOp2, *arithProfile);
+}
+
+EncodedJSValue JIT_OPERATION operationValueAddProfiledOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC* addIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ ArithProfile* arithProfile = addIC->arithProfile();
+ ASSERT(arithProfile);
+ arithProfile->observeLHSAndRHS(op1, op2);
+ auto nonOptimizeVariant = operationValueAddProfiledNoOptimize;
+ addIC->generateOutOfLine(*vm, exec->codeBlock(), nonOptimizeVariant);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ JSValue result = jsAdd(exec, op1, op2);
+ arithProfile->observeResult(result);
+
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationValueAddProfiledNoOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC* addIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ ArithProfile* arithProfile = addIC->arithProfile();
+ ASSERT(arithProfile);
+ return profiledAdd(exec, encodedOp1, encodedOp2, *arithProfile);
+}
+
+EncodedJSValue JIT_OPERATION operationValueAddOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC* addIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ auto nonOptimizeVariant = operationValueAddNoOptimize;
+ if (ArithProfile* arithProfile = addIC->arithProfile())
+ arithProfile->observeLHSAndRHS(op1, op2);
+ addIC->generateOutOfLine(*vm, exec->codeBlock(), nonOptimizeVariant);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ return JSValue::encode(jsAdd(exec, op1, op2));
+}
+
+EncodedJSValue JIT_OPERATION operationValueAddNoOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ JSValue result = jsAdd(exec, op1, op2);
+
+ return JSValue::encode(result);
+}
+
+ALWAYS_INLINE static EncodedJSValue unprofiledMul(VM& vm, ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ double a = op1.toNumber(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
+ double b = op2.toNumber(exec);
+ return JSValue::encode(jsNumber(a * b));
+}
+
+ALWAYS_INLINE static EncodedJSValue profiledMul(VM& vm, ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile& arithProfile, bool shouldObserveLHSAndRHSTypes = true)
+{
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ if (shouldObserveLHSAndRHSTypes)
+ arithProfile.observeLHSAndRHS(op1, op2);
+
+ double a = op1.toNumber(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
+ double b = op2.toNumber(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
+
+ JSValue result = jsNumber(a * b);
+ arithProfile.observeResult(result);
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationValueMul(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return unprofiledMul(*vm, exec, encodedOp1, encodedOp2);
+}
+
+EncodedJSValue JIT_OPERATION operationValueMulNoOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return unprofiledMul(*vm, exec, encodedOp1, encodedOp2);
+}
+
+EncodedJSValue JIT_OPERATION operationValueMulOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC* mulIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ auto nonOptimizeVariant = operationValueMulNoOptimize;
+ if (ArithProfile* arithProfile = mulIC->arithProfile())
+ arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+ mulIC->generateOutOfLine(*vm, exec->codeBlock(), nonOptimizeVariant);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ return unprofiledMul(*vm, exec, encodedOp1, encodedOp2);
+}
+
+EncodedJSValue JIT_OPERATION operationValueMulProfiled(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile* arithProfile)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ ASSERT(arithProfile);
+ return profiledMul(*vm, exec, encodedOp1, encodedOp2, *arithProfile);
+}
+
+EncodedJSValue JIT_OPERATION operationValueMulProfiledOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC* mulIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ ArithProfile* arithProfile = mulIC->arithProfile();
+ ASSERT(arithProfile);
+ arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+ auto nonOptimizeVariant = operationValueMulProfiledNoOptimize;
+ mulIC->generateOutOfLine(*vm, exec->codeBlock(), nonOptimizeVariant);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ return profiledMul(*vm, exec, encodedOp1, encodedOp2, *arithProfile, false);
+}
+
+EncodedJSValue JIT_OPERATION operationValueMulProfiledNoOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC* mulIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ ArithProfile* arithProfile = mulIC->arithProfile();
+ ASSERT(arithProfile);
+ return profiledMul(*vm, exec, encodedOp1, encodedOp2, *arithProfile);
+}
+
+ALWAYS_INLINE static EncodedJSValue unprofiledNegate(ExecState* exec, EncodedJSValue encodedOperand)
+{
+ VM& vm = exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue operand = JSValue::decode(encodedOperand);
+ double number = operand.toNumber(exec);
+ if (UNLIKELY(scope.exception()))
+ return JSValue::encode(JSValue());
+ return JSValue::encode(jsNumber(-number));
+}
+
+ALWAYS_INLINE static EncodedJSValue profiledNegate(ExecState* exec, EncodedJSValue encodedOperand, ArithProfile& arithProfile)
+{
+ VM& vm = exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue operand = JSValue::decode(encodedOperand);
+ arithProfile.observeLHS(operand);
+ double number = operand.toNumber(exec);
+ if (UNLIKELY(scope.exception()))
+ return JSValue::encode(JSValue());
+
+ JSValue result = jsNumber(-number);
+ arithProfile.observeResult(result);
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationArithNegate(ExecState* exec, EncodedJSValue operand)
+{
+ return unprofiledNegate(exec, operand);
+}
+
+EncodedJSValue JIT_OPERATION operationArithNegateProfiled(ExecState* exec, EncodedJSValue operand, ArithProfile* arithProfile)
{
+ ASSERT(arithProfile);
+ return profiledNegate(exec, operand, *arithProfile);
+}
+
+EncodedJSValue JIT_OPERATION operationArithNegateProfiledOptimize(ExecState* exec, EncodedJSValue encodedOperand, JITNegIC* negIC)
+{
+ VM& vm = exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue operand = JSValue::decode(encodedOperand);
+
+ ArithProfile* arithProfile = negIC->arithProfile();
+ ASSERT(arithProfile);
+ arithProfile->observeLHS(operand);
+ negIC->generateOutOfLine(vm, exec->codeBlock(), operationArithNegateProfiled);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ double number = operand.toNumber(exec);
+ if (UNLIKELY(scope.exception()))
+ return JSValue::encode(JSValue());
+ JSValue result = jsNumber(-number);
+ arithProfile->observeResult(result);
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationArithNegateOptimize(ExecState* exec, EncodedJSValue encodedOperand, JITNegIC* negIC)
+{
+ VM& vm = exec->vm();
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue operand = JSValue::decode(encodedOperand);
+
+ if (ArithProfile* arithProfile = negIC->arithProfile())
+ arithProfile->observeLHS(operand);
+ negIC->generateOutOfLine(vm, exec->codeBlock(), operationArithNegate);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ double number = operand.toNumber(exec);
+ if (UNLIKELY(scope.exception()))
+ return JSValue::encode(JSValue());
+ return JSValue::encode(jsNumber(-number));
+}
+
+ALWAYS_INLINE static EncodedJSValue unprofiledSub(VM& vm, ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ double a = op1.toNumber(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
+ double b = op2.toNumber(exec);
+ return JSValue::encode(jsNumber(a - b));
+}
+
+ALWAYS_INLINE static EncodedJSValue profiledSub(VM& vm, ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile& arithProfile, bool shouldObserveLHSAndRHSTypes = true)
+{
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ JSValue op1 = JSValue::decode(encodedOp1);
+ JSValue op2 = JSValue::decode(encodedOp2);
+
+ if (shouldObserveLHSAndRHSTypes)
+ arithProfile.observeLHSAndRHS(op1, op2);
+
+ double a = op1.toNumber(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
+ double b = op2.toNumber(exec);
+ RETURN_IF_EXCEPTION(scope, encodedJSValue());
+
+ JSValue result = jsNumber(a - b);
+ arithProfile.observeResult(result);
+ return JSValue::encode(result);
+}
+
+EncodedJSValue JIT_OPERATION operationValueSub(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+ return unprofiledSub(*vm, exec, encodedOp1, encodedOp2);
+}
+
+EncodedJSValue JIT_OPERATION operationValueSubProfiled(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile* arithProfile)
+{
+ ASSERT(arithProfile);
+
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- ASSERT(!exec->isVMEntrySentinel());
- genericUnwind(vm, exec, vm->exception());
+ return profiledSub(*vm, exec, encodedOp1, encodedOp2, *arithProfile);
+}
+
+EncodedJSValue JIT_OPERATION operationValueSubOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC* subIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ auto nonOptimizeVariant = operationValueSubNoOptimize;
+ if (ArithProfile* arithProfile = subIC->arithProfile())
+ arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+ subIC->generateOutOfLine(*vm, exec->codeBlock(), nonOptimizeVariant);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ return unprofiledSub(*vm, exec, encodedOp1, encodedOp2);
+}
+
+EncodedJSValue JIT_OPERATION operationValueSubNoOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return unprofiledSub(*vm, exec, encodedOp1, encodedOp2);
+}
+
+EncodedJSValue JIT_OPERATION operationValueSubProfiledOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC* subIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ ArithProfile* arithProfile = subIC->arithProfile();
+ ASSERT(arithProfile);
+ arithProfile->observeLHSAndRHS(JSValue::decode(encodedOp1), JSValue::decode(encodedOp2));
+ auto nonOptimizeVariant = operationValueSubProfiledNoOptimize;
+ subIC->generateOutOfLine(*vm, exec->codeBlock(), nonOptimizeVariant);
+
+#if ENABLE(MATH_IC_STATS)
+ exec->codeBlock()->dumpMathICStats();
+#endif
+
+ return profiledSub(*vm, exec, encodedOp1, encodedOp2, *arithProfile, false);
+}
+
+EncodedJSValue JIT_OPERATION operationValueSubProfiledNoOptimize(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC* subIC)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ ArithProfile* arithProfile = subIC->arithProfile();
+ ASSERT(arithProfile);
+ return profiledSub(*vm, exec, encodedOp1, encodedOp2, *arithProfile);
+}
+
+void JIT_OPERATION operationProcessTypeProfilerLog(ExecState* exec)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ vm.typeProfilerLog()->processLogEntries(ASCIILiteral("Log Full, called from inside baseline JIT"));
+}
+
+void JIT_OPERATION operationProcessShadowChickenLog(ExecState* exec)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ vm.shadowChicken().update(vm, exec);
+}
+
+int32_t JIT_OPERATION operationCheckIfExceptionIsUncatchableAndNotifyProfiler(ExecState* exec)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ auto scope = DECLARE_THROW_SCOPE(vm);
+ RELEASE_ASSERT(!!scope.exception());
+
+ if (isTerminatedExecutionException(vm, scope.exception())) {
+ genericUnwind(&vm, exec);
+ return 1;
+ }
+ return 0;
}
} // extern "C"
@@ -1758,28 +2726,32 @@ extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWi
return JSValue::encode(exec->vm().hostCallReturnValue);
}
-#if COMPILER(GCC) && CPU(X86_64)
+#if COMPILER(GCC_OR_CLANG) && CPU(X86_64)
asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "mov 0(%rbp), %rbp\n" // CallerFrameAndPC::callerFrame
- "mov %rbp, %rdi\n"
+ "lea -8(%rsp), %rdi\n"
"jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC) && CPU(X86)
+#elif COMPILER(GCC_OR_CLANG) && CPU(X86)
asm (
".text" "\n" \
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "mov 0(%ebp), %ebp\n" // CallerFrameAndPC::callerFrame
- "mov %ebp, 4(%esp)\n"
- "jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+ "push %ebp\n"
+ "mov %esp, %eax\n"
+ "leal -4(%esp), %esp\n"
+ "push %eax\n"
+ "call " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
+ "leal 8(%esp), %esp\n"
+ "pop %ebp\n"
+ "ret\n"
);
-#elif COMPILER(GCC) && CPU(ARM_THUMB2)
+#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_THUMB2)
asm (
".text" "\n"
".align 2" "\n"
@@ -1788,20 +2760,18 @@ HIDE_SYMBOL(getHostCallReturnValue) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "ldr r7, [r7, #0]" "\n" // CallerFrameAndPC::callerFrame
- "mov r0, r7" "\n"
+ "sub r0, sp, #8" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
+#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_TRADITIONAL)
asm (
".text" "\n"
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
INLINE_ARM_FUNCTION(getHostCallReturnValue)
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "ldr r11, [r11, #0]" "\n" // CallerFrameAndPC::callerFrame
- "mov r0, r11" "\n"
+ "sub r0, sp, #8" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
@@ -1812,44 +2782,38 @@ asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "ldur x29, [x29, #0]" "\n"
- "mov x0, x29" "\n"
+ "sub x0, sp, #16" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC) && CPU(MIPS)
+#elif COMPILER(GCC_OR_CLANG) && CPU(MIPS)
+
+#if WTF_MIPS_PIC
+#define LOAD_FUNCTION_TO_T9(function) \
+ ".set noreorder" "\n" \
+ ".cpload $25" "\n" \
+ ".set reorder" "\n" \
+ "la $t9, " LOCAL_REFERENCE(function) "\n"
+#else
+#define LOAD_FUNCTION_TO_T9(function) "" "\n"
+#endif
+
asm (
".text" "\n"
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
LOAD_FUNCTION_TO_T9(getHostCallReturnValueWithExecState)
- "lw $fp, 0($fp)" "\n" // CallerFrameAndPC::callerFrame
- "move $a0, $fp" "\n"
+ "addi $a0, $sp, -8" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC) && CPU(SH4)
-asm (
-".text" "\n"
-".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
-HIDE_SYMBOL(getHostCallReturnValue) "\n"
-SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "mov.l @r14, r14" "\n" // CallerFrameAndPC::callerFrame
- "mov r14, r4" "\n"
- "mov.l 2f, " SH4_SCRATCH_REGISTER "\n"
- "braf " SH4_SCRATCH_REGISTER "\n"
- "nop" "\n"
- "1: .balign 4" "\n"
- "2: .long " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "-1b\n"
-);
-
#elif COMPILER(MSVC) && CPU(X86)
extern "C" {
__declspec(naked) EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue()
{
- __asm mov ebp, [ebp + 0]; // CallerFrameAndPC::callerFrame
- __asm mov [esp + 4], ebp;
+ __asm lea eax, [esp - 4]
+ __asm mov [esp + 4], eax;
__asm jmp getHostCallReturnValueWithExecState
}
}
diff --git a/Source/JavaScriptCore/jit/JITOperations.h b/Source/JavaScriptCore/jit/JITOperations.h
index 43ca6177b..2883324e1 100644
--- a/Source/JavaScriptCore/jit/JITOperations.h
+++ b/Source/JavaScriptCore/jit/JITOperations.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2017 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,273 +23,455 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITOperations_h
-#define JITOperations_h
+#pragma once
#if ENABLE(JIT)
-#include "CallFrame.h"
-#include "JITExceptions.h"
-#include "JSArray.h"
-#include "JSCJSValue.h"
-#include "MacroAssembler.h"
-#include "PutKind.h"
-#include "StructureStubInfo.h"
-#include "VariableWatchpointSet.h"
+#include "JITMathICForwards.h"
+#include "MacroAssemblerCodeRef.h"
+#include "PropertyOffset.h"
+#include "SlowPathReturnType.h"
+#include "TypedArrayType.h"
+#include <wtf/Platform.h>
+#include <wtf/text/UniquedStringImpl.h>
namespace JSC {
+typedef int64_t EncodedJSValue;
+
class ArrayAllocationProfile;
+class ArrayProfile;
+class Butterfly;
+class CallLinkInfo;
+class CodeBlock;
+class ExecState;
+class JSArray;
+class JSCell;
+class JSFunction;
+class JSGlobalObject;
+class JSLexicalEnvironment;
+class JSObject;
+class JSScope;
+class JSString;
+class JSValue;
+class RegExpObject;
+class Register;
+class Structure;
+class StructureStubInfo;
+class Symbol;
+class SymbolTable;
+class WatchpointSet;
-#if CALLING_CONVENTION_IS_STDCALL
-#define JIT_OPERATION CDECL
-#else
-#define JIT_OPERATION
-#endif
+struct ByValInfo;
+struct InlineCallFrame;
+struct Instruction;
+struct ArithProfile;
+
+typedef ExecState CallFrame;
extern "C" {
+typedef char* UnusedPtr;
+
// These typedefs provide typechecking when generating calls out to helper routines;
// this helps prevent calling a helper routine with the wrong arguments!
/*
Key:
A: JSArray*
Aap: ArrayAllocationProfile*
+ Ap: ArrayProfile*
+ Arp: ArithProfile*
+ B: Butterfly*
+ By: ByValInfo*
C: JSCell*
Cb: CodeBlock*
+ Cli: CallLinkInfo*
D: double
E: ExecState*
F: CallFrame*
- I: StringImpl*
- Icf: InlineCalLFrame*
+ G: JSGlobalObject*
+ I: UniquedStringImpl*
+ Icf: InlineCallFrame*
Idc: const Identifier*
J: EncodedJSValue
+ Mic: JITMathIC* (can be JITAddIC*, JITMulIC*, etc).
Jcp: const JSValue*
- Jsa: JSActivation*
+ Jsc: JSScope*
+ Jsf: JSFunction*
Jss: JSString*
+ L: JSLexicalEnvironment*
O: JSObject*
P: pointer (char*)
Pc: Instruction* i.e. bytecode PC
+ Q: int64_t
R: Register
+ Reo: RegExpObject*
S: size_t
+ Sprt: SlowPathReturnType
Ssi: StructureStubInfo*
St: Structure*
+ Symtab: SymbolTable*
+ Sym: Symbol*
+ T: StringImpl*
V: void
Vm: VM*
- Vws: VariableWatchpointSet*
+ Ws: WatchpointSet*
Z: int32_t
+ Ui: uint32_t
*/
-typedef CallFrame* JIT_OPERATION (*F_JITOperation_EFJJ)(ExecState*, CallFrame*, EncodedJSValue, EncodedJSValue);
-typedef CallFrame* JIT_OPERATION (*F_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_E)(ExecState*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EA)(ExecState*, JSArray*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAZ)(ExecState*, JSArray*, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJ)(ExecState*, ArrayAllocationProfile*, EncodedJSValue);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJcpZ)(ExecState*, ArrayAllocationProfile*, const JSValue*, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EC)(ExecState*, JSCell*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECI)(ExecState*, JSCell*, StringImpl*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EDA)(ExecState*, double, JSArray*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EI)(ExecState*, StringImpl*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJIdc)(ExecState*, EncodedJSValue, const Identifier*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJssZ)(ExecState*, JSString*, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJP)(ExecState*, EncodedJSValue, void*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EP)(ExecState*, void*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPP)(ExecState*, void*, void*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPS)(ExecState*, void*, size_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPc)(ExecState*, Instruction*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESS)(ExecState*, size_t, size_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, StringImpl*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZ)(ExecState*, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZIcfZ)(ExecState*, int32_t, InlineCallFrame*, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_E)(ExecState*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EC)(ExecState*, JSCell*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EIcf)(ExecState*, InlineCallFrame*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssSt)(ExecState*, JSString*, Structure*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJss)(ExecState*, JSString*, JSString*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJssJss)(ExecState*, JSString*, JSString*, JSString*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EO)(ExecState*, JSObject*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EOZ)(ExecState*, JSObject*, int32_t);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_ESt)(ExecState*, Structure*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t);
-typedef double JIT_OPERATION (*D_JITOperation_D)(double);
-typedef double JIT_OPERATION (*D_JITOperation_DD)(double, double);
-typedef double JIT_OPERATION (*D_JITOperation_ZZ)(int32_t, int32_t);
-typedef double JIT_OPERATION (*D_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_D)(double);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_E)(ExecState*);
-typedef size_t JIT_OPERATION (*S_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef size_t JIT_OPERATION (*S_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef size_t JIT_OPERATION (*S_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
-typedef size_t JIT_OPERATION (*S_JITOperation_EOJss)(ExecState*, JSObject*, JSString*);
-typedef size_t JIT_OPERATION (*S_JITOperation_J)(EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_E)(ExecState*);
-typedef void JIT_OPERATION (*V_JITOperation_EC)(ExecState*, JSCell*);
-typedef void JIT_OPERATION (*V_JITOperation_ECb)(ExecState*, CodeBlock*);
-typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef void JIT_OPERATION (*V_JITOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*);
-typedef void JIT_OPERATION (*V_JITOperation_ECICC)(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*);
-typedef void JIT_OPERATION (*V_JITOperation_ECCIcf)(ExecState*, JSCell*, JSCell*, InlineCallFrame*);
-typedef void JIT_OPERATION (*V_JITOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_ECPSPS)(ExecState*, JSCell*, void*, size_t, void*, size_t);
-typedef void JIT_OPERATION (*V_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
-typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef void JIT_OPERATION (*V_JITOperation_EIdJZ)(ExecState*, Identifier*, EncodedJSValue, int32_t);
-typedef void JIT_OPERATION (*V_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, StringImpl*);
-typedef void JIT_OPERATION (*V_JITOperation_EJIdJJ)(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EJPP)(ExecState*, EncodedJSValue, void*, void*);
-typedef void JIT_OPERATION (*V_JITOperation_EJZJ)(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
-typedef void JIT_OPERATION (*V_JITOperation_EOZD)(ExecState*, JSObject*, int32_t, double);
-typedef void JIT_OPERATION (*V_JITOperation_EOZJ)(ExecState*, JSObject*, int32_t, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EPc)(ExecState*, Instruction*);
-typedef void JIT_OPERATION (*V_JITOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, StringImpl*);
-typedef void JIT_OPERATION (*V_JITOperation_EVws)(ExecState*, VariableWatchpointSet*);
-typedef void JIT_OPERATION (*V_JITOperation_EZ)(ExecState*, int32_t);
-typedef void JIT_OPERATION (*V_JITOperation_EVm)(ExecState*, VM*);
-typedef char* JIT_OPERATION (*P_JITOperation_E)(ExecState*);
-typedef char* JIT_OPERATION (*P_JITOperation_EC)(ExecState*, JSCell*);
-typedef char* JIT_OPERATION (*P_JITOperation_EJS)(ExecState*, EncodedJSValue, size_t);
-typedef char* JIT_OPERATION (*P_JITOperation_EO)(ExecState*, JSObject*);
-typedef char* JIT_OPERATION (*P_JITOperation_EOS)(ExecState*, JSObject*, size_t);
-typedef char* JIT_OPERATION (*P_JITOperation_EOZ)(ExecState*, JSObject*, int32_t);
-typedef char* JIT_OPERATION (*P_JITOperation_EPS)(ExecState*, void*, size_t);
-typedef char* JIT_OPERATION (*P_JITOperation_ES)(ExecState*, size_t);
-typedef char* JIT_OPERATION (*P_JITOperation_ESJss)(ExecState*, size_t, JSString*);
-typedef char* JIT_OPERATION (*P_JITOperation_ESt)(ExecState*, Structure*);
-typedef char* JIT_OPERATION (*P_JITOperation_EStJ)(ExecState*, Structure*, EncodedJSValue);
-typedef char* JIT_OPERATION (*P_JITOperation_EStPS)(ExecState*, Structure*, void*, size_t);
-typedef char* JIT_OPERATION (*P_JITOperation_EStSS)(ExecState*, Structure*, size_t, size_t);
-typedef char* JIT_OPERATION (*P_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
-typedef char* JIT_OPERATION (*P_JITOperation_EZ)(ExecState*, int32_t);
-typedef char* JIT_OPERATION (*P_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
-typedef StringImpl* JIT_OPERATION (*I_JITOperation_EJss)(ExecState*, JSString*);
-typedef JSString* JIT_OPERATION (*Jss_JITOperation_EZ)(ExecState*, int32_t);
+typedef CallFrame* (JIT_OPERATION *F_JITOperation_EFJZZ)(ExecState*, CallFrame*, EncodedJSValue, int32_t, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_E)(ExecState*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EA)(ExecState*, JSArray*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EAZ)(ExecState*, JSArray*, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EAapJ)(ExecState*, ArrayAllocationProfile*, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EAapJcpZ)(ExecState*, ArrayAllocationProfile*, const JSValue*, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EC)(ExecState*, JSCell*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ECI)(ExecState*, JSCell*, UniquedStringImpl*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ECZZ)(ExecState*, JSCell*, int32_t, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EDA)(ExecState*, double, JSArray*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EE)(ExecState*, ExecState*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EGReoJ)(ExecState*, JSGlobalObject*, RegExpObject*, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EGReoJss)(ExecState*, JSGlobalObject*, RegExpObject*, JSString*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EGJJ)(ExecState*, JSGlobalObject*, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EI)(ExecState*, UniquedStringImpl*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJC)(ExecState*, EncodedJSValue, JSCell*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJArp)(ExecState*, EncodedJSValue, ArithProfile*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJI)(ExecState*, EncodedJSValue, UniquedStringImpl*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, ArrayProfile*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, ByValInfo*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJArp)(ExecState*, EncodedJSValue, EncodedJSValue, ArithProfile*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJMic)(ExecState*, EncodedJSValue, EncodedJSValue, void*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJMic)(ExecState*, EncodedJSValue, void*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJssZ)(ExecState*, JSString*, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJss)(ExecState*, JSString*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJssReo)(ExecState*, JSString*, RegExpObject*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJssReoJss)(ExecState*, JSString*, RegExpObject*, JSString*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJP)(ExecState*, EncodedJSValue, void*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EGP)(ExecState*, JSGlobalObject*, void*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EP)(ExecState*, void*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EPP)(ExecState*, void*, void*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EPPP)(ExecState*, void*, void*, void*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EPS)(ExecState*, void*, size_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EPc)(ExecState*, Instruction*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJscCJ)(ExecState*, JSScope*, JSCell*, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EReoJ)(ExecState*, RegExpObject*, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EReoJss)(ExecState*, RegExpObject*, JSString*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ESS)(ExecState*, size_t, size_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EZ)(ExecState*, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EZIcfZ)(ExecState*, int32_t, InlineCallFrame*, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EOIUi)(ExecState*, JSObject*, UniquedStringImpl*, uint32_t);
+typedef EncodedJSValue (JIT_OPERATION *J_JITOperation_EJJI)(ExecState*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EPUi)(ExecState*, void*, uint32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_E)(ExecState*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EZ)(ExecState*, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EC)(ExecState*, JSCell*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_ECZC)(ExecState*, JSCell*, int32_t, JSCell*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EGJ)(ExecState*, JSGlobalObject*, EncodedJSValue);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EIcf)(ExecState*, InlineCallFrame*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJsc)(ExecState*, JSScope*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJZC)(ExecState*, EncodedJSValue, int32_t, JSCell*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJJC)(ExecState*, EncodedJSValue, EncodedJSValue, JSCell*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJscZ)(ExecState*, JSScope*, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJssSt)(ExecState*, JSString*, Structure*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJssJss)(ExecState*, JSString*, JSString*);
+typedef uintptr_t (JIT_OPERATION *C_JITOperation_B_EJssJss)(ExecState*, JSString*, JSString*);
+typedef uintptr_t (JIT_OPERATION *C_JITOperation_TT)(StringImpl*, StringImpl*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJssJssJss)(ExecState*, JSString*, JSString*, JSString*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EL)(ExecState*, JSLexicalEnvironment*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EO)(ExecState*, JSObject*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EOZ)(ExecState*, JSObject*, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_ESt)(ExecState*, Structure*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EStJscSymtabJ)(ExecState*, Structure*, JSScope*, SymbolTable*, EncodedJSValue);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EStRZJsfL)(ExecState*, Structure*, Register*, int32_t, JSFunction*, JSLexicalEnvironment*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EStRZJsf)(ExecState*, Structure*, Register*, int32_t, JSFunction*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EStZZ)(ExecState*, Structure*, int32_t, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EZ)(ExecState*, int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_EJscI)(ExecState*, JSScope*, UniquedStringImpl*);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_ECJZ)(ExecState*, JSCell*, EncodedJSValue, int32_t);
+typedef double (JIT_OPERATION *D_JITOperation_D)(double);
+typedef double (JIT_OPERATION *D_JITOperation_G)(JSGlobalObject*);
+typedef double (JIT_OPERATION *D_JITOperation_DD)(double, double);
+typedef double (JIT_OPERATION *D_JITOperation_ZZ)(int32_t, int32_t);
+typedef double (JIT_OPERATION *D_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef int64_t (JIT_OPERATION *Q_JITOperation_J)(EncodedJSValue);
+typedef int64_t (JIT_OPERATION *Q_JITOperation_D)(double);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_D)(double);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_E)(ExecState*);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EC)(ExecState*, JSCell*);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_ESJss)(ExecState*, size_t, JSString*);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EJOJ)(ExecState*, EncodedJSValue, JSObject*, EncodedJSValue);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EJZZ)(ExecState*, EncodedJSValue, int32_t, int32_t);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EOI)(ExecState*, JSObject*, UniquedStringImpl*);
+typedef int32_t (JIT_OPERATION *Z_JITOperation_EOJ)(ExecState*, JSObject*, EncodedJSValue);
+typedef size_t (JIT_OPERATION *S_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef size_t (JIT_OPERATION *S_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
+typedef size_t (JIT_OPERATION *S_JITOperation_EGJJ)(ExecState*, JSGlobalObject*, EncodedJSValue, EncodedJSValue);
+typedef size_t (JIT_OPERATION *S_JITOperation_EGReoJ)(ExecState*, JSGlobalObject*, RegExpObject*, EncodedJSValue);
+typedef size_t (JIT_OPERATION *S_JITOperation_EGReoJss)(ExecState*, JSGlobalObject*, RegExpObject*, JSString*);
+typedef size_t (JIT_OPERATION *S_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef size_t (JIT_OPERATION *S_JITOperation_EJI)(ExecState*, EncodedJSValue, UniquedStringImpl*);
+typedef size_t (JIT_OPERATION *S_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
+typedef size_t (JIT_OPERATION *S_JITOperation_EOJss)(ExecState*, JSObject*, JSString*);
+typedef size_t (JIT_OPERATION *S_JITOperation_EReoJ)(ExecState*, RegExpObject*, EncodedJSValue);
+typedef size_t (JIT_OPERATION *S_JITOperation_EReoJss)(ExecState*, RegExpObject*, JSString*);
+typedef size_t (JIT_OPERATION *S_JITOperation_J)(EncodedJSValue);
+typedef SlowPathReturnType (JIT_OPERATION *Sprt_JITOperation_EZ)(ExecState*, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation)();
+typedef void (JIT_OPERATION *V_JITOperation_E)(ExecState*);
+typedef void (JIT_OPERATION *V_JITOperation_EC)(ExecState*, JSCell*);
+typedef void (JIT_OPERATION *V_JITOperation_ECb)(ExecState*, CodeBlock*);
+typedef void (JIT_OPERATION *V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef void (JIT_OPERATION *V_JITOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*);
+typedef void (JIT_OPERATION *V_JITOperation_ECIZC)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, JSCell*);
+typedef void (JIT_OPERATION *V_JITOperation_ECIZCC)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, JSCell*, JSCell*);
+typedef void (JIT_OPERATION *V_JITOperation_ECIZJJ)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, EncodedJSValue, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_ECJZC)(ExecState*, JSCell*, EncodedJSValue, int32_t, JSCell*);
+typedef void (JIT_OPERATION *V_JITOperation_ECCIcf)(ExecState*, JSCell*, JSCell*, InlineCallFrame*);
+typedef void (JIT_OPERATION *V_JITOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_ECPSPS)(ExecState*, JSCell*, void*, size_t, void*, size_t);
+typedef void (JIT_OPERATION *V_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
+typedef void (JIT_OPERATION *V_JITOperation_ECliJsf)(ExecState*, CallLinkInfo*, JSFunction*);
+typedef void (JIT_OPERATION *V_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_EJ)(ExecState*, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, UniquedStringImpl*);
+typedef void (JIT_OPERATION *V_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_EJJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ArrayProfile*);
+typedef void (JIT_OPERATION *V_JITOperation_EJJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*);
+typedef void (JIT_OPERATION *V_JITOperation_EJPP)(ExecState*, EncodedJSValue, void*, void*);
+typedef void (JIT_OPERATION *V_JITOperation_EJZJ)(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOZD)(ExecState*, JSObject*, int32_t, double);
+typedef void (JIT_OPERATION *V_JITOperation_EOZJ)(ExecState*, JSObject*, int32_t, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_EPc)(ExecState*, Instruction*);
+typedef void (JIT_OPERATION *V_JITOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*);
+typedef void (JIT_OPERATION *V_JITOperation_EJJJI)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, UniquedStringImpl*);
+typedef void (JIT_OPERATION *V_JITOperation_EJJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_EOJJZ)(ExecState*, JSObject*, EncodedJSValue, EncodedJSValue, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOJssJZ)(ExecState*, JSObject*, JSString*, EncodedJSValue, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOIJZ)(ExecState*, JSObject*, UniquedStringImpl*, EncodedJSValue, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOSymJZ)(ExecState*, JSObject*, Symbol*, EncodedJSValue, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOJOOZ)(ExecState*, JSObject*, EncodedJSValue, JSObject*, JSObject*, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOJssOOZ)(ExecState*, JSObject*, JSString*, JSObject*, JSObject*, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOIOOZ)(ExecState*, JSObject*, UniquedStringImpl*, JSObject*, JSObject*, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOSymOOZ)(ExecState*, JSObject*, Symbol*, JSObject*, JSObject*, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EWs)(ExecState*, WatchpointSet*);
+typedef void (JIT_OPERATION *V_JITOperation_EZ)(ExecState*, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EZJ)(ExecState*, int32_t, EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_EZJZZZ)(ExecState*, int32_t, EncodedJSValue, int32_t, int32_t, int32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EVm)(ExecState*, VM*);
+typedef void (JIT_OPERATION *V_JITOperation_J)(EncodedJSValue);
+typedef void (JIT_OPERATION *V_JITOperation_Z)(int32_t);
+typedef JSCell* (JIT_OPERATION *C_JITOperation_ERUiUi)(ExecState*, Register*, uint32_t, uint32_t);
+typedef void (JIT_OPERATION *V_JITOperation_EOJIUi)(ExecState*, JSObject*, EncodedJSValue, UniquedStringImpl*, uint32_t);
+typedef char* (JIT_OPERATION *P_JITOperation_E)(ExecState*);
+typedef char* (JIT_OPERATION *P_JITOperation_EC)(ExecState*, JSCell*);
+typedef char* (JIT_OPERATION *P_JITOperation_ECli)(ExecState*, CallLinkInfo*);
+typedef char* (JIT_OPERATION *P_JITOperation_EJS)(ExecState*, EncodedJSValue, size_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EO)(ExecState*, JSObject*);
+typedef char* (JIT_OPERATION *P_JITOperation_EOS)(ExecState*, JSObject*, size_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EOZ)(ExecState*, JSObject*, int32_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EPS)(ExecState*, void*, size_t);
+typedef char* (JIT_OPERATION *P_JITOperation_ES)(ExecState*, size_t);
+typedef char* (JIT_OPERATION *P_JITOperation_ESJss)(ExecState*, size_t, JSString*);
+typedef char* (JIT_OPERATION *P_JITOperation_ESt)(ExecState*, Structure*);
+typedef char* (JIT_OPERATION *P_JITOperation_EStJ)(ExecState*, Structure*, EncodedJSValue);
+typedef char* (JIT_OPERATION *P_JITOperation_EStPS)(ExecState*, Structure*, void*, size_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EStSS)(ExecState*, Structure*, size_t, size_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EStZB)(ExecState*, Structure*, int32_t, Butterfly*);
+typedef char* (JIT_OPERATION *P_JITOperation_EStZP)(ExecState*, Structure*, int32_t, char*);
+typedef char* (JIT_OPERATION *P_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EQZ)(ExecState*, int64_t, int32_t);
+typedef char* (JIT_OPERATION *P_JITOperation_EDZ)(ExecState*, double, int32_t);
+typedef SlowPathReturnType (JIT_OPERATION *Sprt_JITOperation_ECli)(ExecState*, CallLinkInfo*);
+typedef StringImpl* (JIT_OPERATION *T_JITOperation_EJss)(ExecState*, JSString*);
+typedef JSString* (JIT_OPERATION *Jss_JITOperation_EZ)(ExecState*, int32_t);
+typedef JSString* (JIT_OPERATION *Jss_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
+typedef JSString* (JIT_OPERATION *Jss_JITOperation_EJssUi)(ExecState*, JSString*, uint32_t);
// This method is used to lookup an exception hander, keyed by faultLocation, which is
// the return location from one of the calls out to one of the helper operations above.
-
-void JIT_OPERATION lookupExceptionHandler(ExecState*) WTF_INTERNAL;
+
+void JIT_OPERATION lookupExceptionHandler(VM*, ExecState*) WTF_INTERNAL;
+void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM*, ExecState*) WTF_INTERNAL;
void JIT_OPERATION operationVMHandleException(ExecState*) WTF_INTERNAL;
-void JIT_OPERATION operationStackCheck(ExecState*, CodeBlock*) WTF_INTERNAL;
+void JIT_OPERATION operationThrowStackOverflowError(ExecState*, CodeBlock*) WTF_INTERNAL;
+#if ENABLE(WEBASSEMBLY)
+void JIT_OPERATION operationThrowDivideError(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationThrowOutOfBoundsAccessError(ExecState*) WTF_INTERNAL;
+#endif
int32_t JIT_OPERATION operationCallArityCheck(ExecState*) WTF_INTERNAL;
int32_t JIT_OPERATION operationConstructArityCheck(ExecState*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, StringImpl*);
-EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, StringImpl*);
-EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue);
-EncodedJSValue JIT_OPERATION operationCallCustomGetter(ExecState*, JSCell*, PropertySlot::GetValueFunc, StringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationCallGetter(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState*, JSObject*, Structure*, PropertyOffset, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationDirectPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationCallEval(ExecState*) WTF_INTERNAL;
-char* JIT_OPERATION operationVirtualCall(ExecState*) WTF_INTERNAL;
-char* JIT_OPERATION operationLinkCall(ExecState*) WTF_INTERNAL;
-char* JIT_OPERATION operationLinkClosureCall(ExecState*) WTF_INTERNAL;
-char* JIT_OPERATION operationVirtualConstruct(ExecState*) WTF_INTERNAL;
-char* JIT_OPERATION operationLinkConstruct(ExecState*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationTryGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationTryGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationTryGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByValOptimize(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+void JIT_OPERATION operationDirectPutByValOptimize(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationCallEval(ExecState*, ExecState*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+void JIT_OPERATION operationLinkDirectCall(ExecState*, CallLinkInfo*, JSFunction*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
+
size_t JIT_OPERATION operationCompareLess(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareGreater(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareGreaterEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
-size_t JIT_OPERATION operationConvertJSValueToBoolean(ExecState*, EncodedJSValue) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
#if USE(JSVALUE64)
EncodedJSValue JIT_OPERATION operationCompareStringEq(ExecState*, JSCell* left, JSCell* right) WTF_INTERNAL;
#else
size_t JIT_OPERATION operationCompareStringEq(ExecState*, JSCell* left, JSCell* right) WTF_INTERNAL;
#endif
-size_t JIT_OPERATION operationHasProperty(ExecState*, JSObject*, JSString*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationNewArrayWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationNewArrayBufferWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState*, ArrayAllocationProfile*, EncodedJSValue size) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewAsyncFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewAsyncFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationSetFunctionName(ExecState*, JSCell*, EncodedJSValue) WTF_INTERNAL;
JSCell* JIT_OPERATION operationNewObject(ExecState*, Structure*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState*, void*) WTF_INTERNAL;
-void JIT_OPERATION operationHandleWatchdogTimer(ExecState*) WTF_INTERNAL;
-void JIT_OPERATION operationThrowStaticError(ExecState*, EncodedJSValue, int32_t) WTF_INTERNAL;
+UnusedPtr JIT_OPERATION operationHandleWatchdogTimer(ExecState*) WTF_INTERNAL;
void JIT_OPERATION operationThrow(ExecState*, EncodedJSValue) WTF_INTERNAL;
void JIT_OPERATION operationDebug(ExecState*, int32_t) WTF_INTERNAL;
#if ENABLE(DFG_JIT)
-char* JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL;
+SlowPathReturnType JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL;
#endif
void JIT_OPERATION operationPutByIndex(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
+void JIT_OPERATION operationPutGetterById(ExecState*, JSCell*, UniquedStringImpl*, int32_t options, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutSetterById(ExecState*, JSCell*, UniquedStringImpl*, int32_t options, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutGetterByVal(ExecState*, JSCell*, EncodedJSValue, int32_t attribute, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutSetterByVal(ExecState*, JSCell*, EncodedJSValue, int32_t attribute, JSCell*) WTF_INTERNAL;
#if USE(JSVALUE64)
-void JIT_OPERATION operationPutGetterSetter(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, UniquedStringImpl*, int32_t attribute, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
#else
-void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, UniquedStringImpl*, int32_t attribute, JSCell*, JSCell*) WTF_INTERNAL;
#endif
-void JIT_OPERATION operationPushNameScope(ExecState*, Identifier*, EncodedJSValue, int32_t) WTF_INTERNAL;
-void JIT_OPERATION operationPushWithScope(ExecState*, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationPopScope(ExecState*) WTF_INTERNAL;
-void JIT_OPERATION operationProfileDidCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationProfileWillCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState*, EncodedJSValue, EncodedJSValue baseVal) WTF_INTERNAL;
-JSCell* JIT_OPERATION operationCreateActivation(ExecState*, int32_t offset) WTF_INTERNAL;
-JSCell* JIT_OPERATION operationCreateArguments(ExecState*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetArgumentsLength(ExecState*, int32_t) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByValDefault(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByValString(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL;
-void JIT_OPERATION operationTearOffActivation(ExecState*, JSCell*) WTF_INTERNAL;
-void JIT_OPERATION operationTearOffArguments(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationDeleteById(ExecState*, EncodedJSValue base, const Identifier*) WTF_INTERNAL;
+void JIT_OPERATION operationPushFunctionNameScope(ExecState*, int32_t, SymbolTable*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPopScope(ExecState*, int32_t) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValString(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationDeleteByIdJSResult(ExecState*, EncodedJSValue base, UniquedStringImpl*) WTF_INTERNAL;
+size_t JIT_OPERATION operationDeleteById(ExecState*, EncodedJSValue base, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationDeleteByValJSResult(ExecState*, EncodedJSValue base, EncodedJSValue target) WTF_INTERNAL;
+size_t JIT_OPERATION operationDeleteByVal(ExecState*, EncodedJSValue base, EncodedJSValue target) WTF_INTERNAL;
JSCell* JIT_OPERATION operationGetPNames(ExecState*, JSObject*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState*, EncodedJSValue, EncodedJSValue proto) WTF_INTERNAL;
-CallFrame* JIT_OPERATION operationSizeAndAllocFrameForVarargs(ExecState*, EncodedJSValue arguments, int32_t firstFreeRegister) WTF_INTERNAL;
-CallFrame* JIT_OPERATION operationLoadVarargs(ExecState*, CallFrame*, EncodedJSValue thisValue, EncodedJSValue arguments) WTF_INTERNAL;
+int32_t JIT_OPERATION operationSizeFrameForForwardArguments(ExecState*, EncodedJSValue arguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset) WTF_INTERNAL;
+int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState*, EncodedJSValue arguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset) WTF_INTERNAL;
+CallFrame* JIT_OPERATION operationSetupForwardArgumentsFrame(ExecState*, CallFrame*, EncodedJSValue, int32_t, int32_t length) WTF_INTERNAL;
+CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState*, CallFrame*, EncodedJSValue arguments, int32_t firstVarArgOffset, int32_t length) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationToObject(ExecState*, EncodedJSValue) WTF_INTERNAL;
char* JIT_OPERATION operationSwitchCharWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
char* JIT_OPERATION operationSwitchImmWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationResolveScope(ExecState*, int32_t identifierIndex) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;
void JIT_OPERATION operationPutToScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;
-void JIT_OPERATION operationFlushWriteBarrierBuffer(ExecState*, JSCell*);
-void JIT_OPERATION operationWriteBarrier(ExecState*, JSCell*, JSCell*);
-void JIT_OPERATION operationUnconditionalWriteBarrier(ExecState*, JSCell*);
+char* JIT_OPERATION operationReallocateButterflyToHavePropertyStorageWithInitialCapacity(ExecState*, JSObject*) WTF_INTERNAL;
+char* JIT_OPERATION operationReallocateButterflyToGrowPropertyStorage(ExecState*, JSObject*, size_t newSize) WTF_INTERNAL;
+
+void JIT_OPERATION operationWriteBarrierSlowPath(ExecState*, JSCell*);
void JIT_OPERATION operationOSRWriteBarrier(ExecState*, JSCell*);
-void JIT_OPERATION operationInitGlobalConst(ExecState*, Instruction*);
+void JIT_OPERATION operationExceptionFuzz(ExecState*);
+
+int32_t JIT_OPERATION operationCheckIfExceptionIsUncatchableAndNotifyProfiler(ExecState*);
+int32_t JIT_OPERATION operationInstanceOfCustom(ExecState*, EncodedJSValue encodedValue, JSObject* constructor, EncodedJSValue encodedHasInstance) WTF_INTERNAL;
+
+EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState*, EncodedJSValue, JSCell*);
+EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState*, JSCell*, int32_t, int32_t);
+JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState*, JSCell*);
+EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState*, JSCell*, int32_t);
+JSCell* JIT_OPERATION operationToIndexString(ExecState*, int32_t);
+
+EncodedJSValue JIT_OPERATION operationValueAdd(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueAddProfiled(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueAddProfiledOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueAddProfiledNoOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueAddOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueAddNoOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITAddIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueMul(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueMulOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueMulNoOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueMulProfiledOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueMulProfiledNoOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITMulIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueMulProfiled(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationArithNegate(ExecState*, EncodedJSValue operand);
+EncodedJSValue JIT_OPERATION operationArithNegateProfiled(ExecState*, EncodedJSValue operand, ArithProfile*);
+EncodedJSValue JIT_OPERATION operationArithNegateProfiledOptimize(ExecState*, EncodedJSValue encodedOperand, JITNegIC*);
+EncodedJSValue JIT_OPERATION operationArithNegateOptimize(ExecState*, EncodedJSValue encodedOperand, JITNegIC*);
+EncodedJSValue JIT_OPERATION operationValueSub(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueSubProfiled(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, ArithProfile*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueSubOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueSubNoOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueSubProfiledOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationValueSubProfiledNoOptimize(ExecState*, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2, JITSubIC*) WTF_INTERNAL;
+
+void JIT_OPERATION operationProcessTypeProfilerLog(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationProcessShadowChickenLog(ExecState*) WTF_INTERNAL;
} // extern "C"
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JITOperations_h
-
diff --git a/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp b/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp
new file mode 100644
index 000000000..544bca394
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64)
+
+#include "CallFrame.h"
+#include "JSCJSValue.h"
+#include "JSCInlines.h"
+
+namespace JSC {
+
+// FIXME: The following is a workaround that is only needed because JITStubsMSVC64.asm
+// is built unconditionally even when the JIT is disable, and it references this function.
+// We only need to provide a stub to satisfy the linkage. It will never be called.
+extern "C" EncodedJSValue getHostCallReturnValueWithExecState(ExecState*)
+{
+ return JSValue::encode(JSValue());
+}
+
+} // namespace JSC
+
+#endif // !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 4241baf32..bff53608d 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,18 +29,20 @@
#include "JIT.h"
#include "CodeBlock.h"
+#include "DirectArguments.h"
#include "GCAwareJITStubRoutine.h"
#include "GetterSetter.h"
#include "Interpreter.h"
#include "JITInlines.h"
#include "JSArray.h"
+#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "JSVariableObject.h"
#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
#include "ResultType.h"
-#include "SamplingTool.h"
+#include "ScopedArguments.h"
+#include "ScopedArgumentsTable.h"
+#include "SlowPathCall.h"
+#include "StructureStubInfo.h"
#include <wtf/StringPrintStream.h>
@@ -51,7 +53,10 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
JSInterfaceJIT jit(vm);
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
+ failures.append(jit.branchStructure(
+ NotEqual,
+ Address(regT0, JSCell::structureIDOffset()),
+ vm->stringStructure.get()));
// Load string length to regT2, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
@@ -83,7 +88,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
@@ -93,21 +98,32 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
-
- emitGetVirtualRegisters(base, regT0, property, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
- // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
- // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
- // number was signed since m_vectorLength is always less than intmax (since the total allocation
- // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
- // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
- // extending since it makes it easier to re-tag the value in the slow case.
- zeroExtend32ToPtr(regT1, regT1);
+ emitGetVirtualRegister(base, regT0);
+ bool propertyNameIsIntegerConstant = isOperandConstantInt(property);
+ if (propertyNameIsIntegerConstant)
+ move(Imm32(getOperandConstantInt(property)), regT1);
+ else
+ emitGetVirtualRegister(property, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSite(regT2, regT3, profile);
+
+ PatchableJump notIndex;
+ if (!propertyNameIsIntegerConstant) {
+ notIndex = emitPatchableJumpIfNotInt(regT1);
+ addSlowCase(notIndex);
+
+ // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
+ // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
+ // number was signed since m_vectorLength is always less than intmax (since the total allocation
+ // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign
+ // extending since it makes it easier to re-tag the value in the slow case.
+ zeroExtend32ToPtr(regT1, regT1);
+ }
+
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
and32(TrustedImm32(IndexingShapeMask), regT2);
PatchableJump badType;
@@ -137,19 +153,21 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
-#if !ASSERT_DISABLED
- Jump resultOK = branchTest64(NonZero, regT0);
- breakpoint();
- resultOK.link(this);
-#endif
+ if (!ASSERT_DISABLED) {
+ Jump resultOK = branchTest64(NonZero, regT0);
+ abortWithReason(JITGetByValResultIsNotEmpty);
+ resultOK.link(this);
+ }
emitValueProfilingSite();
emitPutVirtualRegister(dst);
-
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+
+ Label nextHotPath = label();
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
}
-JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
@@ -158,13 +176,11 @@ JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
- moveDoubleTo64(fpRegT0, regT0);
- sub64(tagTypeNumberRegister, regT0);
return slowCases;
}
-JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
JumpList slowCases;
@@ -177,7 +193,7 @@ JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType,
return slowCases;
}
-JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
@@ -193,18 +209,50 @@ JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType
return slowCases;
}
+JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
+{
+ // base: regT0
+ // property: regT1
+ // scratch: regT3
+
+ int dst = currentInstruction[1].u.operand;
+
+ slowCases.append(emitJumpIfNotJSCell(regT1));
+ emitByValIdentifierCheck(byValInfo, regT1, regT3, propertyName, slowCases);
+
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ propertyName.impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get);
+ gen.generateFastPath(*this);
+
+ fastDoneCase = jump();
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ slowDoneCase = jump();
+
+ return gen;
+}
+
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+
+ if (!isOperandConstantInt(property))
+ linkSlowCase(iter); // property int32 check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ Jump notString = branchStructure(NotEqual,
+ Address(regT0, JSCell::structureIDOffset()),
+ m_vm->stringStructure.get());
emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
Jump failed = branchTest64(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
@@ -213,20 +261,14 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
notString.link(this);
nonCell.link(this);
- Jump skipProfiling = jump();
-
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
- emitArrayProfileOutOfBoundsSpecialCase(profile);
-
- skipProfiling.link(this);
-
Label slowPath = label();
emitGetVirtualRegister(base, regT0);
emitGetVirtualRegister(property, regT1);
- Call call = callOperation(operationGetByValDefault, dst, regT0, regT1);
+ Call call = callOperation(operationGetByValOptimize, dst, regT0, regT1, byValInfo);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -235,89 +277,29 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
emitValueProfilingSite();
}
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
-{
- ASSERT(sizeof(JSValue) == 8);
-
- if (finalObjectMode == MayBeFinal) {
- Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
- loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
- neg32(offset);
- Jump done = jump();
- isInline.link(this);
- addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
- done.link(this);
- } else {
-#if !ASSERT_DISABLED
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
- breakpoint();
- isOutOfLine.link(this);
-#endif
- loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
- neg32(offset);
- }
- signExtend32ToPtr(offset, offset);
- load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
-}
-
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- int iter = currentInstruction[5].u.operand;
- int i = currentInstruction[6].u.operand;
-
- emitGetVirtualRegister(property, regT0);
- addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
- emitGetVirtualRegisters(base, regT0, iter, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
-
- // Test base's structure
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(TrustedImm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
- add32(TrustedImm32(firstOutOfLineOffset), regT3);
- sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
- inlineProperty.link(this);
- compileGetDirectOffset(regT0, regT0, regT3, regT1);
-
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
-
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- emitGetVirtualRegister(base, regT0);
- emitGetVirtualRegister(property, regT1);
- callOperation(operationGetByValGeneric, dst, regT0, regT1);
-}
-
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
+
+ emitGetVirtualRegister(base, regT0);
+ bool propertyNameIsIntegerConstant = isOperandConstantInt(property);
+ if (propertyNameIsIntegerConstant)
+ move(Imm32(getOperandConstantInt(property)), regT1);
+ else
+ emitGetVirtualRegister(property, regT1);
- emitGetVirtualRegisters(base, regT0, property, regT1);
- emitJumpSlowCaseIfNotImmediateInteger(regT1);
- // See comment in op_get_by_val.
- zeroExtend32ToPtr(regT1, regT1);
emitJumpSlowCaseIfNotJSCell(regT0, base);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSite(regT2, regT3, profile);
+ PatchableJump notIndex;
+ if (!propertyNameIsIntegerConstant) {
+ notIndex = emitPatchableJumpIfNotInt(regT1);
+ addSlowCase(notIndex);
+ // See comment in op_get_by_val.
+ zeroExtend32ToPtr(regT1, regT1);
+ }
+ emitArrayProfilingSiteWithCell(regT0, regT2, profile);
and32(TrustedImm32(IndexingShapeMask), regT2);
PatchableJump badType;
@@ -347,8 +329,13 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Label done = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
+}
+void JIT::emit_op_put_by_val_with_this(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_put_by_val_with_this);
+ slowPathCall.call();
}
JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
@@ -367,11 +354,11 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
emitGetVirtualRegister(value, regT3);
switch (indexingShape) {
case Int32Shape:
- slowCases.append(emitJumpIfNotImmediateInteger(regT3));
+ slowCases.append(emitJumpIfNotInt(regT3));
store64(regT3, BaseIndex(regT2, regT1, TimesEight));
break;
case DoubleShape: {
- Jump notInt = emitJumpIfNotImmediateInteger(regT3);
+ Jump notInt = emitJumpIfNotInt(regT3);
convertInt32ToDouble(regT3, fpRegT0);
Jump ready = jump();
notInt.link(this);
@@ -441,18 +428,54 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
return slowCases;
}
+JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
+{
+ // base: regT0
+ // property: regT1
+ // scratch: regT2
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ slowCases.append(emitJumpIfNotJSCell(regT1));
+ emitByValIdentifierCheck(byValInfo, regT1, regT1, propertyName, slowCases);
+
+ // Write barrier breaks the registers. So after issuing the write barrier,
+ // reload the registers.
+ emitGetVirtualRegisters(base, regT0, value, regT1);
+
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind);
+ gen.generateFastPath(*this);
+ emitWriteBarrier(base, value, ShouldFilterBase);
+ doneCases.append(jump());
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ doneCases.append(jump());
+
+ return gen;
+}
+
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ JITArrayMode mode = m_byValCompilationInfo[m_byValInstructionIndex].arrayMode;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ if (!isOperandConstantInt(property))
+ linkSlowCase(iter); // property int32 check
linkSlowCase(iter); // base not array check
- JITArrayMode mode = chooseArrayMode(profile);
+ linkSlowCase(iter); // out of bounds
+
switch (mode) {
case JITInt32:
case JITDouble:
@@ -462,17 +485,13 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
break;
}
- Jump skipProfiling = jump();
- linkSlowCase(iter); // out of bounds
- emitArrayProfileOutOfBoundsSpecialCase(profile);
- skipProfiling.link(this);
-
Label slowPath = label();
+ emitGetVirtualRegister(base, regT0);
emitGetVirtualRegister(property, regT1);
emitGetVirtualRegister(value, regT2);
bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
- Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT0, regT1, regT2);
+ Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -486,12 +505,47 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1);
}
-void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
+ int32_t options = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1);
+}
+
+void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ int32_t options = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1);
+}
+
+void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ int32_t attribute = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
+ emitGetVirtualRegister(currentInstruction[5].u.operand, regT2);
+ callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), attribute, regT1, regT2);
+}
+
+void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ int32_t attributes = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
+ callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2);
+}
+
+void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
+{
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
+ int32_t attributes = currentInstruction[3].u.operand;
emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
- callOperation(operationPutGetterSetter, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), regT1, regT2);
+ callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2);
}
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
@@ -500,7 +554,56 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
emitGetVirtualRegister(base, regT0);
- callOperation(operationDeleteById, dst, regT0, &m_codeBlock->identifier(property));
+ callOperation(operationDeleteByIdJSResult, dst, regT0, m_codeBlock->identifier(property).impl());
+}
+
+void JIT::emit_op_del_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(property, regT1);
+ callOperation(operationDeleteByValJSResult, dst, regT0, regT1);
+}
+
+void JIT::emit_op_try_get_by_id(Instruction* currentInstruction)
+{
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitGetVirtualRegister(baseVReg, regT0);
+
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
+
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::TryGet);
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIds.append(gen);
+
+ emitValueProfilingSite();
+ emitPutVirtualRegister(resultVReg);
+}
+
+void JIT::emitSlow_op_try_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
+
+ Label coldPathBegin = label();
+
+ Call call = callOperation(operationTryGetByIdOptimize, resultVReg, gen.stubInfo(), regT0, ident->impl());
+
+ gen.reportSlowPathCall(coldPathBegin, call);
}
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
@@ -513,14 +616,12 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
- if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset);
- }
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
+ emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT1, m_bytecodeOffset);
JITGetByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
- callFrameRegister, JSValueRegs(regT0), JSValueRegs(regT0), true);
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ ident->impl(), JSValueRegs(regT0), JSValueRegs(regT0), AccessType::Get);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
@@ -529,6 +630,18 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
emitPutVirtualRegister(resultVReg);
}
+void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_by_id_with_this);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_get_by_val_with_this(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_by_val_with_this);
+ slowPathCall.call();
+}
+
void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int resultVReg = currentInstruction[1].u.operand;
@@ -551,9 +664,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
int baseVReg = currentInstruction[1].u.operand;
int valueVReg = currentInstruction[3].u.operand;
- unsigned direct = currentInstruction[8].u.operand;
-
- emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBaseAndValue);
+ unsigned direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
@@ -561,20 +672,27 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
- // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
JITPutByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
- callFrameRegister, JSValueRegs(regT0), JSValueRegs(regT1), regT2, true,
- m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(),
+ direct ? Direct : NotDirect);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
+ emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase);
+
m_putByIds.append(gen);
}
+void JIT::emit_op_put_by_id_with_this(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_put_by_id_with_this);
+ slowPathCall.call();
+}
+
void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int baseVReg = currentInstruction[1].u.operand;
@@ -593,42 +711,6 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
gen.reportSlowPathCall(coldPathBegin, call);
}
-// Compile a store into an object's property storage. May overwrite the
-// value in objectReg.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
-{
- if (isInlineOffset(cachedOffset)) {
- store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
- return;
- }
-
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
-}
-
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
-{
- if (isInlineOffset(cachedOffset)) {
- load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
- return;
- }
-
- loadPtr(Address(base, JSObject::butterflyOffset()), result);
- load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
-{
- if (isInlineOffset(cachedOffset)) {
- load64(base->locationForOffset(cachedOffset), result);
- return;
- }
-
- loadPtr(base->butterflyAddress(), result);
- load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
-}
-
void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
{
if (!needsVarInjectionChecks)
@@ -636,16 +718,10 @@ void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
}
-void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth)
+void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
{
emitVarInjectionCheck(needsVarInjectionChecks);
- emitGetVirtualRegister(JSStack::ScopeChain, regT0);
- if (m_codeBlock->needsActivation()) {
- emitGetVirtualRegister(m_codeBlock->activationRegister(), regT1);
- Jump noActivation = branchTestPtr(Zero, regT1);
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- noActivation.link(this);
- }
+ emitGetVirtualRegister(scope, regT0);
for (unsigned i = 0; i < depth; ++i)
loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
emitPutVirtualRegister(dst);
@@ -654,92 +730,212 @@ void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned dep
void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
- unsigned depth = currentInstruction[4].u.operand;
+ int scope = currentInstruction[2].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(copiedInstruction(currentInstruction)[4].u.operand);
+ unsigned depth = currentInstruction[5].u.operand;
+
+ auto emitCode = [&] (ResolveType resolveType) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ move(TrustedImmPtr(constantScope), regT0);
+ emitPutVirtualRegister(dst);
+ break;
+ }
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
+ break;
+ case ModuleVar:
+ move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
+ emitPutVirtualRegister(dst);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case LocalClosureVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
switch (resolveType) {
- case GlobalProperty:
- case GlobalVar:
- case GlobalPropertyWithVarInjectionChecks:
- case GlobalVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
- emitPutVirtualRegister(dst);
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth);
- break;
- case Dynamic:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+
+ Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
+ emitCode(GlobalProperty);
+ skipToEnd.append(jump());
+ notGlobalProperty.link(this);
+
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ emitCode(GlobalPropertyWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
addSlowCase(jump());
+ skipToEnd.link(this);
+ break;
+ }
+
+ default:
+ emitCode(resolveType);
break;
}
}
void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
-
- if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar)
+ ResolveType resolveType = static_cast<ResolveType>(copiedInstruction(currentInstruction)[4].u.operand);
+ if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar || resolveType == GlobalLexicalVar || resolveType == ModuleVar)
return;
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ linkSlowCase(iter); // var injections check for GlobalPropertyWithVarInjectionChecks.
+ linkSlowCase(iter); // var injections check for GlobalLexicalVarWithVarInjectionChecks.
+ }
+
linkSlowCase(iter);
- int32_t indentifierIndex = currentInstruction[2].u.operand;
- callOperation(operationResolveScope, dst, indentifierIndex);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resolve_scope);
+ slowPathCall.call();
}
void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
{
- emitGetVirtualRegister(scope, regT0);
loadPtr(structureSlot, regT1);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT1));
+ emitGetVirtualRegister(scope, regT0);
+ addSlowCase(branchTestPtr(Zero, regT1));
+ load32(Address(regT1, Structure::structureIDOffset()), regT1);
+ addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1));
}
-void JIT::emitGetGlobalProperty(uintptr_t* operandSlot)
+void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg reg)
{
- load32(operandSlot, regT1);
- compileGetDirectOffset(regT0, regT0, regT1, regT2, KnownNotFinal);
+ loadPtr(operand, reg);
}
-void JIT::emitGetGlobalVar(uintptr_t operand)
+void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg reg)
{
- loadPtr(reinterpret_cast<void*>(operand), regT0);
+ loadPtr(operand, reg);
+ loadPtr(reg, reg);
}
void JIT::emitGetClosureVar(int scope, uintptr_t operand)
{
emitGetVirtualRegister(scope, regT0);
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
- loadPtr(Address(regT0, operand * sizeof(Register)), regT0);
+ loadPtr(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)), regT0);
}
void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int scope = currentInstruction[2].u.operand;
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ ResolveType resolveType = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand).resolveType();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ GPRReg base = regT0;
+ GPRReg result = regT0;
+ GPRReg offset = regT1;
+ GPRReg scratch = regT2;
+
+ load32(operandSlot, offset);
+ if (!ASSERT_DISABLED) {
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+ abortWithReason(JITOffsetIsNotOutOfLine);
+ isOutOfLine.link(this);
+ }
+ loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+ neg32(offset);
+ signExtend32ToPtr(offset, offset);
+ load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
+ if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
+ addSlowCase(branchTest64(Zero, regT0));
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetClosureVar(scope, *operandSlot);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case LocalClosureVar:
+ case ModuleVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
+
switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks:
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- emitGetGlobalProperty(operandSlot);
- break;
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitGetGlobalVar(*operandSlot);
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitGetClosureVar(scope, *operandSlot);
- break;
- case Dynamic:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
+ }
+
+ default:
+ emitCode(resolveType, false);
break;
}
emitPutVirtualRegister(dst);
@@ -749,294 +945,321 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ ResolveType resolveType = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand).resolveType();
if (resolveType == GlobalVar || resolveType == ClosureVar)
return;
- linkSlowCase(iter);
- callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
-}
+ if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks)
+ linkSlowCase(iter); // bad structure
-void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value)
-{
- emitGetVirtualRegister(value, regT2);
+ if (resolveType == GlobalLexicalVarWithVarInjectionChecks) // Var injections check.
+ linkSlowCase(iter);
+
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionChecks
+ linkSlowCase(iter); // emitLoadWithStructureCheck
+ linkSlowCase(iter); // emitLoadWithStructureCheck
+ // GlobalLexicalVar
+ linkSlowCase(iter); // TDZ check.
+ // GlobalLexicalVarWithVarInjectionChecks.
+ linkSlowCase(iter); // var injection check.
+ linkSlowCase(iter); // TDZ check.
+ }
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- loadPtr(operandSlot, regT1);
- negPtr(regT1);
- storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
+ linkSlowCase(iter);
+
+ callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
}
-void JIT::emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet* set)
+void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
{
- if (!set || set->state() == IsInvalidated)
- return;
-
- load8(set->addressOfState(), scratch);
-
- JumpList ready;
-
- ready.append(branch32(Equal, scratch, TrustedImm32(IsInvalidated)));
-
- if (set->state() == ClearWatchpoint) {
- Jump isWatched = branch32(NotEqual, scratch, TrustedImm32(ClearWatchpoint));
-
- store64(value, set->addressOfInferredValue());
- store8(TrustedImm32(IsWatched), set->addressOfState());
- ready.append(jump());
-
- isWatched.link(this);
- }
-
- ready.append(branch64(Equal, AbsoluteAddress(set->addressOfInferredValue()), value));
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(set->addressOfSetIsNotEmpty())));
- store8(TrustedImm32(IsInvalidated), set->addressOfState());
- move(TrustedImm64(JSValue::encode(JSValue())), scratch);
- store64(scratch, set->addressOfInferredValue());
-
- ready.link(this);
+ emitGetVirtualRegister(value, regT0);
+ emitNotifyWrite(set);
+ storePtr(regT0, operand);
}
-
-void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set)
+void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
{
emitGetVirtualRegister(value, regT0);
- emitNotifyWrite(regT0, regT1, set);
- storePtr(regT0, reinterpret_cast<void*>(operand));
+ loadPtr(indirectWatchpointSet, regT1);
+ emitNotifyWrite(regT1);
+ loadPtr(addressOfOperand, regT1);
+ storePtr(regT0, regT1);
}
-void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value)
+void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
{
emitGetVirtualRegister(value, regT1);
emitGetVirtualRegister(scope, regT0);
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
- storePtr(regT1, Address(regT0, operand * sizeof(Register)));
+ emitNotifyWrite(set);
+ storePtr(regT1, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)));
}
void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
{
int scope = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ GetPutInfo getPutInfo = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitGetVirtualRegister(value, regT2);
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ loadPtr(operandSlot, regT1);
+ negPtr(regT1);
+ storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
+ // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
+ addSlowCase(branchTest64(Zero, regT0));
+ }
+ if (indirectLoadForOperand)
+ emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(&currentInstruction[5]));
+ else
+ emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
+ emitWriteBarrier(constantScope, value, ShouldFilterValue);
+ break;
+ }
+ case LocalClosureVar:
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
+ emitWriteBarrier(scope, value, ShouldFilterValue);
+ break;
+ case ModuleVar:
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ break;
+ }
+ };
+
switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks:
- emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- emitPutGlobalProperty(operandSlot, value);
- break;
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet);
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitWriteBarrier(scope, value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitPutClosureVar(scope, *operandSlot, value);
- break;
- case Dynamic:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
+ }
+
+ default:
+ emitCode(resolveType, false);
break;
}
}
void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ GetPutInfo getPutInfo = GetPutInfo(copiedInstruction(currentInstruction)[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
unsigned linkCount = 0;
- if (resolveType != GlobalVar && resolveType != ClosureVar)
+ if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar && resolveType != GlobalLexicalVar)
linkCount++;
- if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
- && currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks
+ || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks
+ || resolveType == ClosureVar || resolveType == ClosureVarWithVarInjectionChecks
+ || resolveType == LocalClosureVar)
linkCount++;
+ if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks)
+ linkCount++; // bad structure
+ if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) // TDZ check.
+ linkCount++;
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionsCheck
+ linkCount++; // emitLoadWithStructureCheck
+ linkCount++; // emitLoadWithStructureCheck
+
+ // GlobalLexicalVar
+ bool needsTDZCheck = !isInitialization(getPutInfo.initializationMode());
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
+
+ // GlobalLexicalVarWithVarInjectionsCheck
+ linkCount++; // var injection check.
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
+ }
if (!linkCount)
return;
while (linkCount--)
linkSlowCase(iter);
- callOperation(operationPutToScope, currentInstruction);
-}
-void JIT::emit_op_init_global_const(Instruction* currentInstruction)
-{
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
- emitWriteBarrier(globalObject, currentInstruction[2].u.operand, ShouldFilterValue);
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- store64(regT0, currentInstruction[1].u.registerPointer);
+ if (resolveType == ModuleVar) {
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
+ slowPathCall.call();
+ } else
+ callOperation(operationPutToScope, currentInstruction);
}
-#endif // USE(JSVALUE64)
-
-JIT::Jump JIT::checkMarkWord(RegisterID owner, RegisterID scratch1, RegisterID scratch2)
+void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
{
- move(owner, scratch1);
- move(owner, scratch2);
-
- andPtr(TrustedImmPtr(MarkedBlock::blockMask), scratch1);
- andPtr(TrustedImmPtr(~MarkedBlock::blockMask), scratch2);
-
- rshift32(TrustedImm32(3 + 4), scratch2);
-
- return branchTest8(Zero, BaseIndex(scratch1, scratch2, TimesOne, MarkedBlock::offsetOfMarks()));
+ int dst = currentInstruction[1].u.operand;
+ int arguments = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(arguments, regT0);
+ load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst);
}
-JIT::Jump JIT::checkMarkWord(JSCell* owner)
+void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
{
- MarkedBlock* block = MarkedBlock::blockFor(owner);
- size_t index = (reinterpret_cast<size_t>(owner) & ~MarkedBlock::blockMask) >> (3 + 4);
- void* address = (reinterpret_cast<char*>(block) + MarkedBlock::offsetOfMarks()) + index;
+ int arguments = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegister(arguments, regT0);
+ emitGetVirtualRegister(value, regT1);
+ store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)));
- return branchTest8(Zero, AbsoluteAddress(address));
+ emitWriteBarrier(arguments, value, ShouldFilterValue);
}
+#endif // USE(JSVALUE64)
+
#if USE(JSVALUE64)
void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
{
-#if ENABLE(GGC)
- emitGetVirtualRegister(value, regT0);
Jump valueNotCell;
- if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
+ emitGetVirtualRegister(value, regT0);
valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
+ }
emitGetVirtualRegister(owner, regT0);
Jump ownerNotCell;
- if (mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
- Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2);
- callOperation(operationUnconditionalWriteBarrier, regT0);
- ownerNotMarked.link(this);
+ Jump ownerIsRememberedOrInEden = barrierBranch(regT0, regT1);
+ callOperation(operationWriteBarrierSlowPath, regT0);
+ ownerIsRememberedOrInEden.link(this);
- if (mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
ownerNotCell.link(this);
if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
valueNotCell.link(this);
-#else
- UNUSED_PARAM(owner);
- UNUSED_PARAM(value);
- UNUSED_PARAM(mode);
-#endif
}
void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
{
-#if ENABLE(GGC)
emitGetVirtualRegister(value, regT0);
Jump valueNotCell;
if (mode == ShouldFilterValue)
valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
- if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
- Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2);
- callOperation(operationUnconditionalWriteBarrier, owner);
- ownerNotMarked.link(this);
- } else
- callOperation(operationUnconditionalWriteBarrier, owner);
+ emitWriteBarrier(owner);
if (mode == ShouldFilterValue)
valueNotCell.link(this);
-#else
- UNUSED_PARAM(owner);
- UNUSED_PARAM(value);
- UNUSED_PARAM(mode);
-#endif
}
#else // USE(JSVALUE64)
void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
{
-#if ENABLE(GGC)
- emitLoadTag(value, regT0);
Jump valueNotCell;
- if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
+ emitLoadTag(value, regT0);
valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
+ }
emitLoad(owner, regT0, regT1);
Jump ownerNotCell;
- if (mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
- Jump ownerNotMarked = checkMarkWord(regT1, regT0, regT2);
- callOperation(operationUnconditionalWriteBarrier, regT1);
- ownerNotMarked.link(this);
+ Jump ownerIsRememberedOrInEden = barrierBranch(regT1, regT2);
+ callOperation(operationWriteBarrierSlowPath, regT1);
+ ownerIsRememberedOrInEden.link(this);
- if (mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
ownerNotCell.link(this);
if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
valueNotCell.link(this);
-#else
- UNUSED_PARAM(owner);
- UNUSED_PARAM(value);
- UNUSED_PARAM(mode);
-#endif
}
void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
{
-#if ENABLE(GGC)
- emitLoadTag(value, regT0);
Jump valueNotCell;
- if (mode == ShouldFilterValue)
+ if (mode == ShouldFilterValue) {
+ emitLoadTag(value, regT0);
valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
+ }
- if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
- Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2);
- callOperation(operationUnconditionalWriteBarrier, owner);
- ownerNotMarked.link(this);
- } else
- callOperation(operationUnconditionalWriteBarrier, owner);
+ emitWriteBarrier(owner);
if (mode == ShouldFilterValue)
valueNotCell.link(this);
-#else
- UNUSED_PARAM(owner);
- UNUSED_PARAM(value);
- UNUSED_PARAM(mode);
-#endif
}
#endif // USE(JSVALUE64)
-JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch)
+void JIT::emitWriteBarrier(JSCell* owner)
{
- if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
- structure->addTransitionWatchpoint(stubInfo->addWatchpoint(m_codeBlock));
-#if !ASSERT_DISABLED
- move(TrustedImmPtr(object), scratch);
- Jump ok = branchPtr(Equal, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
- breakpoint();
- ok.link(this);
-#endif
- Jump result; // Returning an unset jump this way because otherwise VC++ would complain.
- return result;
- }
-
- move(TrustedImmPtr(object), scratch);
- return branchPtr(NotEqual, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
+ Jump ownerIsRememberedOrInEden = barrierBranch(owner, regT0);
+ callOperation(operationWriteBarrierSlowPath, owner);
+ ownerIsRememberedOrInEden.link(this);
}
-void JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, JumpList& failureCases, RegisterID scratch)
+void JIT::emitByValIdentifierCheck(ByValInfo* byValInfo, RegisterID cell, RegisterID scratch, const Identifier& propertyName, JumpList& slowCases)
{
- Jump failureCase = addStructureTransitionCheck(object, structure, stubInfo, scratch);
- if (!failureCase.isSet())
- return;
-
- failureCases.append(failureCase);
-}
-
-void JIT::testPrototype(JSValue prototype, JumpList& failureCases, StructureStubInfo* stubInfo)
-{
- if (prototype.isNull())
- return;
-
- ASSERT(prototype.isCell());
- addStructureTransitionCheck(prototype.asCell(), prototype.asCell()->structure(), stubInfo, failureCases, regT3);
+ if (propertyName.isSymbol())
+ slowCases.append(branchPtr(NotEqual, cell, TrustedImmPtr(byValInfo->cachedSymbol.get())));
+ else {
+ slowCases.append(branchStructure(NotEqual, Address(cell, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
+ loadPtr(Address(cell, JSString::offsetOfValue()), scratch);
+ slowCases.append(branchPtr(NotEqual, scratch, TrustedImmPtr(propertyName.impl())));
+ }
}
void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -1059,6 +1282,12 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
case JITArrayStorage:
slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
break;
+ case JITDirectArguments:
+ slowCases = emitDirectArgumentsGetByVal(currentInstruction, badType);
+ break;
+ case JITScopedArguments:
+ slowCases = emitScopedArgumentsGetByVal(currentInstruction, badType);
+ break;
default:
TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
if (isInt(type))
@@ -1070,7 +1299,7 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
@@ -1078,12 +1307,44 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
+ m_codeBlock, patchBuffer,
("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationGetByValGeneric));
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
+}
+
+void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ Jump fastDoneCase;
+ Jump slowDoneCase;
+ JumpList slowCases;
+
+ JITGetByIdGenerator gen = emitGetByValWithCachedId(byValInfo, currentInstruction, propertyName, fastDoneCase, slowDoneCase, slowCases);
+
+ ConcurrentJSLocker locker(m_codeBlock->m_lock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(fastDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+ patchBuffer.link(slowDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToNextHotPath));
+ if (!m_exceptionChecks.empty())
+ patchBuffer.link(m_exceptionChecks, byValInfo->exceptionHandler);
+
+ for (const auto& callSite : m_calls) {
+ if (callSite.to)
+ patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
+ }
+ gen.finalize(patchBuffer);
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline get_by_val with cached property name '%s' stub for %s, return point %p", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
+ byValInfo->stubInfo = gen.stubInfo();
+
+ MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
}
void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -1093,9 +1354,7 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
PatchableJump badType;
JumpList slowCases;
-#if ENABLE(GGC)
bool needsLinkForWriteBarrier = false;
-#endif
switch (arrayMode) {
case JITInt32:
@@ -1106,15 +1365,11 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
break;
case JITContiguous:
slowCases = emitContiguousPutByVal(currentInstruction, badType);
-#if ENABLE(GGC)
needsLinkForWriteBarrier = true;
-#endif
break;
case JITArrayStorage:
slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
-#if ENABLE(GGC)
needsLinkForWriteBarrier = true;
-#endif
break;
default:
TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
@@ -1127,31 +1382,129 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
-#if ENABLE(GGC)
if (needsLinkForWriteBarrier) {
- ASSERT(m_calls.last().to == operationUnconditionalWriteBarrier);
- patchBuffer.link(m_calls.last().from, operationUnconditionalWriteBarrier);
+ ASSERT(m_calls.last().to == operationWriteBarrierSlowPath);
+ patchBuffer.link(m_calls.last().from, operationWriteBarrierSlowPath);
}
-#endif
bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
if (!isDirect) {
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
+ m_codeBlock, patchBuffer,
("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
} else {
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
+ m_codeBlock, patchBuffer,
("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
}
- RepatchBuffer repatchBuffer(m_codeBlock);
- repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
+ MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
+}
+
+void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
+{
+ Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
+
+ JumpList doneCases;
+ JumpList slowCases;
+
+ JITPutByIdGenerator gen = emitPutByValWithCachedId(byValInfo, currentInstruction, putKind, propertyName, doneCases, slowCases);
+
+ ConcurrentJSLocker locker(m_codeBlock->m_lock);
+ LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+ patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
+ patchBuffer.link(doneCases, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+ if (!m_exceptionChecks.empty())
+ patchBuffer.link(m_exceptionChecks, byValInfo->exceptionHandler);
+
+ for (const auto& callSite : m_calls) {
+ if (callSite.to)
+ patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
+ }
+ gen.finalize(patchBuffer);
+
+ byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
+ m_codeBlock, patchBuffer,
+ ("Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p", (putKind == Direct) ? "_direct" : "", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
+ byValInfo->stubInfo = gen.stubInfo();
+
+ MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
+}
+
+
+JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ JSValueRegs result = JSValueRegs(regT0);
+ RegisterID scratch = regT3;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ JSValueRegs result = JSValueRegs(regT1, regT0);
+ RegisterID scratch = regT3;
+#endif
+
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType));
+
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, DirectArguments::offsetOfLength())));
+ slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfMappedArguments())));
+
+ zeroExtend32ToPtr(property, scratch);
+ loadValue(BaseIndex(base, scratch, TimesEight, DirectArguments::storageOffset()), result);
+
+ return slowCases;
+}
+
+JIT::JumpList JIT::emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType)
+{
+ JumpList slowCases;
+
+#if USE(JSVALUE64)
+ RegisterID base = regT0;
+ RegisterID property = regT1;
+ JSValueRegs result = JSValueRegs(regT0);
+ RegisterID scratch = regT3;
+ RegisterID scratch2 = regT4;
+#else
+ RegisterID base = regT0;
+ RegisterID property = regT2;
+ JSValueRegs result = JSValueRegs(regT1, regT0);
+ RegisterID scratch = regT3;
+ RegisterID scratch2 = regT4;
+#endif
+
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(ScopedArgumentsType));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, ScopedArguments::offsetOfTotalLength())));
+
+ loadPtr(Address(base, ScopedArguments::offsetOfTable()), scratch);
+ load32(Address(scratch, ScopedArgumentsTable::offsetOfLength()), scratch2);
+ Jump overflowCase = branch32(AboveOrEqual, property, scratch2);
+ loadPtr(Address(base, ScopedArguments::offsetOfScope()), scratch2);
+ loadPtr(Address(scratch, ScopedArgumentsTable::offsetOfArguments()), scratch);
+ load32(BaseIndex(scratch, property, TimesFour), scratch);
+ slowCases.append(branch32(Equal, scratch, TrustedImm32(ScopeOffset::invalidOffset)));
+ loadValue(BaseIndex(scratch2, scratch, TimesEight, JSEnvironmentRecord::offsetOfVariables()), result);
+ Jump done = jump();
+ overflowCase.link(this);
+ sub32(property, scratch2);
+ neg32(scratch2);
+ loadValue(BaseIndex(base, scratch2, TimesEight, ScopedArguments::overflowStorageOffset()), result);
+ slowCases.append(branchIfEmpty(result));
+ done.link(this);
+
+ return slowCases;
}
JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
@@ -1176,26 +1529,26 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
JumpList slowCases;
- loadPtr(Address(base, JSCell::structureOffset()), scratch);
- badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
- loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
+ loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch);
switch (elementSize(type)) {
case 1:
- if (isSigned(type))
- load8Signed(BaseIndex(base, property, TimesOne), resultPayload);
+ if (JSC::isSigned(type))
+ load8SignedExtendTo32(BaseIndex(scratch, property, TimesOne), resultPayload);
else
- load8(BaseIndex(base, property, TimesOne), resultPayload);
+ load8(BaseIndex(scratch, property, TimesOne), resultPayload);
break;
case 2:
- if (isSigned(type))
- load16Signed(BaseIndex(base, property, TimesTwo), resultPayload);
+ if (JSC::isSigned(type))
+ load16SignedExtendTo32(BaseIndex(scratch, property, TimesTwo), resultPayload);
else
- load16(BaseIndex(base, property, TimesTwo), resultPayload);
+ load16(BaseIndex(scratch, property, TimesTwo), resultPayload);
break;
case 4:
- load32(BaseIndex(base, property, TimesFour), resultPayload);
+ load32(BaseIndex(scratch, property, TimesFour), resultPayload);
break;
default:
CRASH();
@@ -1246,19 +1599,19 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
#endif
JumpList slowCases;
-
- loadPtr(Address(base, JSCell::structureOffset()), scratch);
- badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
+
+ load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
+ badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
- loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
+ loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), scratch);
switch (elementSize(type)) {
case 4:
- loadFloat(BaseIndex(base, property, TimesFour), fpRegT0);
+ loadFloat(BaseIndex(scratch, property, TimesFour), fpRegT0);
convertFloatToDouble(fpRegT0, fpRegT0);
break;
case 8: {
- loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
+ loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
break;
}
default:
@@ -1266,8 +1619,8 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
}
Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
- static const double NaN = QNaN;
- loadDouble(&NaN, fpRegT0);
+ static const double NaN = PNaN;
+ loadDouble(TrustedImmPtr(&NaN), fpRegT0);
notNaN.link(this);
#if USE(JSVALUE64)
@@ -1281,6 +1634,7 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
{
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
ASSERT(isInt(type));
int value = currentInstruction[3].u.operand;
@@ -1299,13 +1653,16 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
JumpList slowCases;
- loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
- badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
- slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
+ load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
+ badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
+ Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ slowCases.append(jump());
+ inBounds.link(this);
#if USE(JSVALUE64)
emitGetVirtualRegister(value, earlyScratch);
- slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch));
+ slowCases.append(emitJumpIfNotInt(earlyScratch));
#else
emitLoad(value, lateScratch, earlyScratch);
slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
@@ -1317,7 +1674,7 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
if (isClamped(type)) {
ASSERT(elementSize(type) == 1);
- ASSERT(!isSigned(type));
+ ASSERT(!JSC::isSigned(type));
Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
xor32(earlyScratch, earlyScratch);
@@ -1347,6 +1704,7 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
{
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
ASSERT(isFloat(type));
int value = currentInstruction[3].u.operand;
@@ -1365,17 +1723,20 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
JumpList slowCases;
- loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
- badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
- slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
+ load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
+ badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
+ Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ slowCases.append(jump());
+ inBounds.link(this);
#if USE(JSVALUE64)
emitGetVirtualRegister(value, earlyScratch);
- Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch);
+ Jump doubleCase = emitJumpIfNotInt(earlyScratch);
convertInt32ToDouble(earlyScratch, fpRegT0);
Jump ready = jump();
doubleCase.link(this);
- slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
+ slowCases.append(emitJumpIfNotNumber(earlyScratch));
add64(tagTypeNumberRegister, earlyScratch);
move64ToDouble(earlyScratch, fpRegT0);
ready.link(this);
@@ -1409,6 +1770,18 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
return slowCases;
}
+void JIT::emit_op_define_data_property(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_define_data_property);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_define_accessor_property(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_define_accessor_property);
+ slowPathCall.call();
+}
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index 5bc8d1abb..bc34b3000 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,17 +30,17 @@
#include "JIT.h"
#include "CodeBlock.h"
+#include "DirectArguments.h"
#include "GCAwareJITStubRoutine.h"
#include "Interpreter.h"
#include "JITInlines.h"
#include "JSArray.h"
+#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
-#include "JSPropertyNameIterator.h"
-#include "JSVariableObject.h"
#include "LinkBuffer.h"
-#include "RepatchBuffer.h"
#include "ResultType.h"
-#include "SamplingTool.h"
+#include "SlowPathCall.h"
+#include "StructureStubInfo.h"
#include <wtf/StringPrintStream.h>
@@ -57,17 +57,68 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
callOperation(operationPutByIndex, regT1, regT0, property, regT3, regT2);
}
-void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
- int getter = currentInstruction[3].u.operand;
+ int options = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT1);
+ emitLoadPayload(getter, regT3);
+ callOperation(operationPutGetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
+}
+
+void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ unsigned options = currentInstruction[3].u.operand;
int setter = currentInstruction[4].u.operand;
emitLoadPayload(base, regT1);
+ emitLoadPayload(setter, regT3);
+ callOperation(operationPutSetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
+}
+
+void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ unsigned attribute = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+ int setter = currentInstruction[5].u.operand;
+
+ emitLoadPayload(base, regT1);
emitLoadPayload(getter, regT3);
emitLoadPayload(setter, regT4);
- callOperation(operationPutGetterSetter, regT1, &m_codeBlock->identifier(property), regT3, regT4);
+ callOperation(operationPutGetterSetter, regT1, m_codeBlock->identifier(property).impl(), attribute, regT3, regT4);
+}
+
+void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int32_t attributes = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT2);
+ emitLoad(property, regT1, regT0);
+ emitLoadPayload(getter, regT3);
+ callOperation(operationPutGetterByVal, regT2, regT1, regT0, attributes, regT3);
+}
+
+void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
+{
+ int base = currentInstruction[1].u.operand;
+ int property = currentInstruction[2].u.operand;
+ int32_t attributes = currentInstruction[3].u.operand;
+ int getter = currentInstruction[4].u.operand;
+
+ emitLoadPayload(base, regT2);
+ emitLoad(property, regT1, regT0);
+ emitLoadPayload(getter, regT3);
+ callOperation(operationPutSetterByVal, regT2, regT1, regT0, attributes, regT3);
}
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
@@ -76,14 +127,23 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction)
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
emitLoad(base, regT1, regT0);
- callOperation(operationDeleteById, dst, regT1, regT0, &m_codeBlock->identifier(property));
+ callOperation(operationDeleteByIdJSResult, dst, regT1, regT0, m_codeBlock->identifier(property).impl());
+}
+
+void JIT::emit_op_del_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ emitLoad2(base, regT1, regT0, property, regT3, regT2);
+ callOperation(operationDeleteByValJSResult, dst, regT1, regT0, regT3, regT2);
}
JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
JSInterfaceJIT jit(vm);
JumpList failures;
- failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
+ failures.append(jit.branchStructure(NotEqual, Address(regT0, JSCell::structureIDOffset()), vm->stringStructure.get()));
// Load string length to regT1, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
@@ -117,7 +177,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
@@ -127,13 +187,14 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitLoad2(base, regT1, regT0, property, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSite(regT1, regT3, profile);
+ PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(notIndex);
+ emitArrayProfilingSiteWithCell(regT0, regT1, profile);
and32(TrustedImm32(IndexingShapeMask), regT1);
PatchableJump badType;
@@ -162,27 +223,27 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
-#if !ASSERT_DISABLED
- Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
- breakpoint();
- resultOK.link(this);
-#endif
+ if (!ASSERT_DISABLED) {
+ Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ abortWithReason(JITGetByValResultIsNotEmpty);
+ resultOK.link(this);
+ }
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
+
+ Label nextHotPath = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
}
-JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape));
-
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
@@ -190,52 +251,75 @@ JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType,
return slowCases;
}
-JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape));
-
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
-
loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0);
slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
- moveDoubleToInts(fpRegT0, regT0, regT1);
return slowCases;
}
-JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
add32(TrustedImm32(-ArrayStorageShape), regT1, regT3);
badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
-
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
-
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
return slowCases;
}
-
+
+JITGetByIdGenerator JIT::emitGetByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ // base: tag(regT1), payload(regT0)
+ // property: tag(regT3), payload(regT2)
+ // scratch: regT4
+
+ slowCases.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
+ emitByValIdentifierCheck(byValInfo, regT2, regT4, propertyName, slowCases);
+
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ propertyName.impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::Get);
+ gen.generateFastPath(*this);
+
+ fastDoneCase = jump();
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT1, regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ slowDoneCase = jump();
+
+ return gen;
+}
+
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
-
- linkSlowCase(iter); // property int32 check
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
+
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // property int32 check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ Jump notString = branchStructure(NotEqual, Address(regT0, JSCell::structureIDOffset()), m_vm->stringStructure.get());
emitNakedCall(m_vm->getCTIStub(stringGetByValStubGenerator).code());
Jump failed = branchTestPtr(Zero, regT0);
emitStore(dst, regT1, regT0);
@@ -243,21 +327,15 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
failed.link(this);
notString.link(this);
nonCell.link(this);
-
- Jump skipProfiling = jump();
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
- emitArrayProfileOutOfBoundsSpecialCase(profile);
-
- skipProfiling.link(this);
-
Label slowPath = label();
emitLoad(base, regT1, regT0);
emitLoad(property, regT3, regT2);
- Call call = callOperation(operationGetByValDefault, dst, regT1, regT0, regT3, regT2);
+ Call call = callOperation(operationGetByValOptimize, dst, regT1, regT0, regT3, regT2, byValInfo);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -271,13 +349,14 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitLoad2(base, regT1, regT0, property, regT3, regT2);
- addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
- emitArrayProfilingSite(regT1, regT3, profile);
+ PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(notIndex);
+ emitArrayProfilingSiteWithCell(regT0, regT1, profile);
and32(TrustedImm32(IndexingShapeMask), regT1);
PatchableJump badType;
@@ -307,7 +386,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Label done = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+ m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
}
JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
@@ -406,15 +485,52 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
return slowCases;
}
+JITPutByIdGenerator JIT::emitPutByValWithCachedId(ByValInfo* byValInfo, Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
+{
+ // base: tag(regT1), payload(regT0)
+ // property: tag(regT3), payload(regT2)
+
+ int base = currentInstruction[1].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ slowCases.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
+ emitByValIdentifierCheck(byValInfo, regT2, regT2, propertyName, slowCases);
+
+ // Write barrier breaks the registers. So after issuing the write barrier,
+ // reload the registers.
+ emitWriteBarrier(base, value, ShouldFilterBase);
+ emitLoadPayload(base, regT0);
+ emitLoad(value, regT3, regT2);
+
+ JITPutByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, m_codeBlock->ecmaMode(), putKind);
+ gen.generateFastPath(*this);
+ doneCases.append(jump());
+
+ Label coldPathBegin = label();
+ gen.slowPathJump().link(this);
+
+ // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
+ emitLoadTag(base, regT1);
+
+ Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT3, regT2, regT1, regT0, propertyName.impl());
+ gen.reportSlowPathCall(coldPathBegin, call);
+ doneCases.append(jump());
+
+ return gen;
+}
+
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+ ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCase(iter); // property int32 check
linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ linkSlowCase(iter); // property int32 check
linkSlowCase(iter); // base not array check
JITArrayMode mode = chooseArrayMode(profile);
@@ -451,14 +567,15 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
emitLoad(value, regT0, regT1);
addCallArgument(regT1);
addCallArgument(regT0);
- Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByVal : operationPutByVal);
+ addCallArgument(TrustedImmPtr(byValInfo));
+ Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize);
#else
// The register selection below is chosen to reduce register swapping on ARM.
// Swapping shouldn't happen on other platforms.
emitLoad(base, regT2, regT1);
emitLoad(property, regT3, regT0);
emitLoad(value, regT5, regT4);
- Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT2, regT1, regT3, regT0, regT5, regT4);
+ Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT2, regT1, regT3, regT0, regT5, regT4, byValInfo);
#endif
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
@@ -466,6 +583,45 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
m_byValInstructionIndex++;
}
+void JIT::emit_op_try_get_by_id(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ emitLoad(base, regT1, regT0);
+ emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ JITGetByIdGenerator gen(
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::TryGet);
+ gen.generateFastPath(*this);
+ addSlowCase(gen.slowPathJump());
+ m_getByIds.append(gen);
+
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_try_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int resultVReg = currentInstruction[1].u.operand;
+ int baseVReg = currentInstruction[2].u.operand;
+ const Identifier* ident = &(m_codeBlock->identifier(currentInstruction[3].u.operand));
+
+ linkSlowCaseIfNotJSCell(iter, baseVReg);
+ linkSlowCase(iter);
+
+ JITGetByIdGenerator& gen = m_getByIds[m_getByIdIndex++];
+
+ Label coldPathBegin = label();
+
+ Call call = callOperation(operationTryGetByIdOptimize, resultVReg, gen.stubInfo(), regT1, regT0, ident->impl());
+
+ gen.reportSlowPathCall(coldPathBegin, call);
+}
+
+
void JIT::emit_op_get_by_id(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -475,14 +631,12 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
emitLoad(base, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(base, regT1);
- if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
- loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
- emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset);
- }
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
+ emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT2, m_bytecodeOffset);
JITGetByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
- callFrameRegister, JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), true);
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ ident->impl(), JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), AccessType::Get);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
@@ -517,25 +671,22 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
int base = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
- int direct = currentInstruction[8].u.operand;
+ int direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
- emitWriteBarrier(base, value, ShouldFilterBaseAndValue);
-
emitLoad2(base, regT1, regT0, value, regT3, regT2);
emitJumpSlowCaseIfNotJSCell(base, regT1);
-
- emitLoad(base, regT1, regT0);
- emitLoad(value, regT3, regT2);
JITPutByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
- callFrameRegister, JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2),
- regT1, true, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
+ JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2),
+ regT1, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
+ emitWriteBarrier(base, value, ShouldFilterBase);
+
m_putByIds.append(gen);
}
@@ -548,7 +699,10 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
linkSlowCase(iter);
Label coldPathBegin(this);
-
+
+ // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
+ emitLoadTag(base, regT1);
+
JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
Call call = callOperation(
@@ -557,114 +711,6 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
gen.reportSlowPathCall(coldPathBegin, call);
}
-// Compile a store into an object's property storage. May overwrite base.
-void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset)
-{
- if (isOutOfLineOffset(cachedOffset))
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- emitStore(indexRelativeToBase(cachedOffset), valueTag, valuePayload, base);
-}
-
-// Compile a load from an object's property storage. May overwrite base.
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
-{
- if (isInlineOffset(cachedOffset)) {
- emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, base);
- return;
- }
-
- RegisterID temp = resultPayload;
- loadPtr(Address(base, JSObject::butterflyOffset()), temp);
- emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, temp);
-}
-
-void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
-{
- if (isInlineOffset(cachedOffset)) {
- move(TrustedImmPtr(base->locationForOffset(cachedOffset)), resultTag);
- load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
- return;
- }
-
- loadPtr(base->butterflyAddress(), resultTag);
- load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
- load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
-}
-
-void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode finalObjectMode)
-{
- ASSERT(sizeof(JSValue) == 8);
-
- if (finalObjectMode == MayBeFinal) {
- Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- neg32(offset);
- Jump done = jump();
- isInline.link(this);
- addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base);
- done.link(this);
- } else {
-#if !ASSERT_DISABLED
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
- breakpoint();
- isOutOfLine.link(this);
-#endif
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- neg32(offset);
- }
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
-}
-
-void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- unsigned expected = currentInstruction[4].u.operand;
- int iter = currentInstruction[5].u.operand;
- int i = currentInstruction[6].u.operand;
-
- emitLoad2(property, regT1, regT0, base, regT3, regT2);
- emitJumpSlowCaseIfNotJSCell(property, regT1);
- addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
- // Property registers are now available as the property is known
- emitJumpSlowCaseIfNotJSCell(base, regT3);
- emitLoadPayload(iter, regT1);
-
- // Test base's structure
- loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
- addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
- load32(addressFor(i), regT3);
- sub32(TrustedImm32(1), regT3);
- addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
- Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
- add32(TrustedImm32(firstOutOfLineOffset), regT3);
- sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
- inlineProperty.link(this);
- compileGetDirectOffset(regT2, regT1, regT0, regT3);
-
- emitStore(dst, regT1, regT0);
-}
-
-void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
-
- linkSlowCaseIfNotJSCell(iter, property);
- linkSlowCase(iter);
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- emitLoad(base, regT1, regT0);
- emitLoad(property, regT3, regT2);
- callOperation(operationGetByValGeneric, dst, regT1, regT0, regT3, regT2);
-}
-
void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
{
if (!needsVarInjectionChecks)
@@ -672,17 +718,11 @@ void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
}
-void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth)
+void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
{
emitVarInjectionCheck(needsVarInjectionChecks);
move(TrustedImm32(JSValue::CellTag), regT1);
- emitLoadPayload(JSStack::ScopeChain, regT0);
- if (m_codeBlock->needsActivation()) {
- emitLoadPayload(m_codeBlock->activationRegister().offset(), regT2);
- Jump noActivation = branchTestPtr(Zero, regT2);
- loadPtr(Address(regT2, JSScope::offsetOfNext()), regT0);
- noActivation.link(this);
- }
+ emitLoadPayload(scope, regT0);
for (unsigned i = 0; i < depth; ++i)
loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
emitStore(dst, regT1, regT0);
@@ -691,96 +731,214 @@ void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned dep
void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
- unsigned depth = currentInstruction[4].u.operand;
-
+ int scope = currentInstruction[2].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
+ unsigned depth = currentInstruction[5].u.operand;
+ auto emitCode = [&] (ResolveType resolveType) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalLexicalVar:
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ move(TrustedImmPtr(constantScope), regT0);
+ emitStore(dst, regT1, regT0);
+ break;
+ }
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
+ break;
+ case ModuleVar:
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
+ emitStore(dst, regT1, regT0);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case LocalClosureVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
switch (resolveType) {
- case GlobalProperty:
- case GlobalVar:
- case GlobalPropertyWithVarInjectionChecks:
- case GlobalVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- move(TrustedImm32(JSValue::CellTag), regT1);
- move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
- emitStore(dst, regT1, regT0);
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth);
- break;
- case Dynamic:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+
+ Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
+ emitCode(GlobalProperty);
+ skipToEnd.append(jump());
+ notGlobalProperty.link(this);
+
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ emitCode(GlobalPropertyWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
addSlowCase(jump());
+ skipToEnd.link(this);
+ break;
+ }
+
+ default:
+ emitCode(resolveType);
break;
}
}
void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
- if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar)
+ if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar || resolveType == GlobalLexicalVar || resolveType == ModuleVar)
return;
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ linkSlowCase(iter); // Var injections check for GlobalPropertyWithVarInjectionChecks.
+ linkSlowCase(iter); // Var injections check for GlobalLexicalVarWithVarInjectionChecks.
+ }
linkSlowCase(iter);
- int32_t indentifierIndex = currentInstruction[2].u.operand;
- callOperation(operationResolveScope, dst, indentifierIndex);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resolve_scope);
+ slowPathCall.call();
}
void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
{
emitLoad(scope, regT1, regT0);
loadPtr(structureSlot, regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT2));
}
-void JIT::emitGetGlobalProperty(uintptr_t* operandSlot)
+void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload)
{
- move(regT0, regT2);
- load32(operandSlot, regT3);
- compileGetDirectOffset(regT2, regT1, regT0, regT3, KnownNotFinal);
+ uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand);
+ load32(bitwise_cast<void*>(rawAddress + TagOffset), tag);
+ load32(bitwise_cast<void*>(rawAddress + PayloadOffset), payload);
}
-
-void JIT::emitGetGlobalVar(uintptr_t operand)
+void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload)
{
- load32(reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), regT1);
- load32(reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), regT0);
+ loadPtr(operand, payload);
+ load32(Address(payload, TagOffset), tag);
+ load32(Address(payload, PayloadOffset), payload);
}
void JIT::emitGetClosureVar(int scope, uintptr_t operand)
{
emitLoad(scope, regT1, regT0);
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
- load32(Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), regT1);
- load32(Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), regT0);
+ load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset), regT1);
+ load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset), regT0);
}
void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int scope = currentInstruction[2].u.operand;
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ GPRReg base = regT2;
+ GPRReg resultTag = regT1;
+ GPRReg resultPayload = regT0;
+ GPRReg offset = regT3;
+
+ move(regT0, base);
+ load32(operandSlot, offset);
+ if (!ASSERT_DISABLED) {
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+ abortWithReason(JITOffsetIsNotOutOfLine);
+ isOutOfLine.link(this);
+ }
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ neg32(offset);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0);
+ if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetClosureVar(scope, *operandSlot);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case ModuleVar:
+ case LocalClosureVar:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks:
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- emitGetGlobalProperty(operandSlot);
- break;
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitGetGlobalVar(*operandSlot);
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitGetClosureVar(scope, *operandSlot);
- break;
- case Dynamic:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
+ }
+
+ default:
+ emitCode(resolveType, false);
break;
}
emitValueProfilingSite();
@@ -790,141 +948,246 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
if (resolveType == GlobalVar || resolveType == ClosureVar)
return;
+ if (resolveType == GlobalLexicalVarWithVarInjectionChecks) // Var Injections check.
+ linkSlowCase(iter);
+
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionChecks
+ linkSlowCase(iter); // emitLoadWithStructureCheck
+ // GlobalLexicalVar
+ linkSlowCase(iter); // TDZ check.
+ // GlobalLexicalVarWithVarInjectionChecks.
+ linkSlowCase(iter); // var injection check.
+ linkSlowCase(iter); // TDZ check.
+ }
+
linkSlowCase(iter);
callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
}
-void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value)
-{
- emitLoad(value, regT3, regT2);
-
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- loadPtr(operandSlot, regT1);
- negPtr(regT1);
- store32(regT3, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- store32(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
-}
-
-void JIT::emitNotifyWrite(RegisterID tag, RegisterID payload, RegisterID scratch, VariableWatchpointSet* set)
+void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
{
- if (!set || set->state() == IsInvalidated)
- return;
-
- load8(set->addressOfState(), scratch);
-
- JumpList ready;
-
- ready.append(branch32(Equal, scratch, TrustedImm32(IsInvalidated)));
-
- if (set->state() == ClearWatchpoint) {
- Jump isWatched = branch32(NotEqual, scratch, TrustedImm32(ClearWatchpoint));
-
- store32(tag, &set->addressOfInferredValue()->u.asBits.tag);
- store32(payload, &set->addressOfInferredValue()->u.asBits.payload);
- store8(TrustedImm32(IsWatched), set->addressOfState());
- ready.append(jump());
-
- isWatched.link(this);
- }
-
- Jump definitelyNotEqual = branch32(
- NotEqual, AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.payload), payload);
- ready.append(branch32(
- Equal, AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.tag), tag));
- definitelyNotEqual.link(this);
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(set->addressOfSetIsNotEmpty())));
- store8(TrustedImm32(IsInvalidated), set->addressOfState());
- store32(
- TrustedImm32(JSValue::EmptyValueTag), &set->addressOfInferredValue()->u.asBits.tag);
- store32(TrustedImm32(0), &set->addressOfInferredValue()->u.asBits.payload);
-
- ready.link(this);
+ emitLoad(value, regT1, regT0);
+ emitNotifyWrite(set);
+ uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand);
+ store32(regT1, bitwise_cast<void*>(rawAddress + TagOffset));
+ store32(regT0, bitwise_cast<void*>(rawAddress + PayloadOffset));
}
-void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set)
+void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
{
emitLoad(value, regT1, regT0);
- emitNotifyWrite(regT1, regT0, regT2, set);
- store32(regT1, reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- store32(regT0, reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ loadPtr(indirectWatchpointSet, regT2);
+ emitNotifyWrite(regT2);
+ loadPtr(addressOfOperand, regT2);
+ store32(regT1, Address(regT2, TagOffset));
+ store32(regT0, Address(regT2, PayloadOffset));
}
-void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value)
+void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
{
emitLoad(value, regT3, regT2);
emitLoad(scope, regT1, regT0);
- loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
- store32(regT3, Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- store32(regT2, Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ emitNotifyWrite(set);
+ store32(regT3, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset));
+ store32(regT2, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset));
}
void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
{
int scope = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
+
+ auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks: {
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitLoad(value, regT3, regT2);
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ loadPtr(operandSlot, regT1);
+ negPtr(regT1);
+ store32(regT3, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ store32(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ break;
+ }
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ case GlobalLexicalVar:
+ case GlobalLexicalVarWithVarInjectionChecks: {
+ JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
+ RELEASE_ASSERT(constantScope);
+ emitWriteBarrier(constantScope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
+ // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
+ if (indirectLoadForOperand)
+ emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0);
+ else
+ emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0);
+ addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ }
+ if (indirectLoadForOperand)
+ emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(&currentInstruction[5]));
+ else
+ emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
+ break;
+ }
+ case LocalClosureVar:
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitWriteBarrier(scope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
+ break;
+ case ModuleVar:
+ case Dynamic:
+ addSlowCase(jump());
+ break;
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks:
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ };
switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks:
- emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- emitPutGlobalProperty(operandSlot, value);
- break;
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet);
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitWriteBarrier(scope, value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitPutClosureVar(scope, *operandSlot, value);
- break;
- case Dynamic:
+ case UnresolvedProperty:
+ case UnresolvedPropertyWithVarInjectionChecks: {
+ JumpList skipToEnd;
+ load32(&currentInstruction[4], regT0);
+ and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
+
+ Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
+ Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
+ isGlobalProperty.link(this);
+ emitCode(GlobalProperty, false);
+ skipToEnd.append(jump());
+ notGlobalPropertyWithVarInjections.link(this);
+
+ Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
+ emitCode(GlobalLexicalVar, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVar.link(this);
+
+ Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
+ emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
+ skipToEnd.append(jump());
+ notGlobalLexicalVarWithVarInjections.link(this);
+
addSlowCase(jump());
+
+ skipToEnd.link(this);
+ break;
+ }
+
+ default:
+ emitCode(resolveType, false);
break;
}
}
void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
+ GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
+ ResolveType resolveType = getPutInfo.resolveType();
unsigned linkCount = 0;
- if (resolveType != GlobalVar && resolveType != ClosureVar)
+ if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar && resolveType != GlobalLexicalVar)
linkCount++;
- if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
- && currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
+ if (resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks
+ || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks
+ || resolveType == ClosureVar || resolveType == ClosureVarWithVarInjectionChecks
+ || resolveType == LocalClosureVar)
linkCount++;
+ if (!isInitialization(getPutInfo.initializationMode()) && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) // TDZ check.
+ linkCount++;
+ if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
+ // GlobalProperty/GlobalPropertyWithVarInjectionsCheck
+ linkCount++; // emitLoadWithStructureCheck
+
+ // GlobalLexicalVar
+ bool needsTDZCheck = !isInitialization(getPutInfo.initializationMode());
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
+
+ // GlobalLexicalVarWithVarInjectionsCheck
+ linkCount++; // var injection check.
+ if (needsTDZCheck)
+ linkCount++;
+ linkCount++; // Notify write check.
+ }
if (!linkCount)
return;
while (linkCount--)
linkSlowCase(iter);
- callOperation(operationPutToScope, currentInstruction);
+
+ if (resolveType == ModuleVar) {
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
+ slowPathCall.call();
+ } else
+ callOperation(operationPutToScope, currentInstruction);
+}
+
+void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int arguments = currentInstruction[2].u.operand;
+ int index = currentInstruction[3].u.operand;
+
+ emitLoadPayload(arguments, regT0);
+ load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset), regT1);
+ load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset), regT0);
+ emitValueProfilingSite();
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
+{
+ int arguments = currentInstruction[1].u.operand;
+ int index = currentInstruction[2].u.operand;
+ int value = currentInstruction[3].u.operand;
+
+ emitWriteBarrier(arguments, value, ShouldFilterValue);
+
+ emitLoadPayload(arguments, regT0);
+ emitLoad(value, regT1, regT2);
+ store32(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset));
+ store32(regT2, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset));
}
-void JIT::emit_op_init_global_const(Instruction* currentInstruction)
+void JIT::emit_op_get_by_id_with_this(Instruction* currentInstruction)
{
- WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
- int value = currentInstruction[2].u.operand;
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_by_id_with_this);
+ slowPathCall.call();
+}
- JSGlobalObject* globalObject = m_codeBlock->globalObject();
+void JIT::emit_op_get_by_val_with_this(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_by_val_with_this);
+ slowPathCall.call();
+}
- emitWriteBarrier(globalObject, value, ShouldFilterValue);
+void JIT::emit_op_put_by_id_with_this(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_put_by_id_with_this);
+ slowPathCall.call();
+}
- emitLoad(value, regT1, regT0);
-
- store32(regT1, registerPointer->tagPointer());
- store32(regT0, registerPointer->payloadPointer());
+void JIT::emit_op_put_by_val_with_this(Instruction* currentInstruction)
+{
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_put_by_val_with_this);
+ slowPathCall.call();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp b/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp
new file mode 100644
index 000000000..4e75fafc0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp
@@ -0,0 +1,140 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITRightShiftGenerator.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+void JITRightShiftGenerator::generateFastPath(CCallHelpers& jit)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+
+ m_didEmitFastPath = true;
+
+ if (m_rightOperand.isConstInt32()) {
+ // Try to do (intVar >> intConstant).
+ CCallHelpers::Jump notInt = jit.branchIfNotInt32(m_left);
+
+ jit.moveValueRegs(m_left, m_result);
+ int32_t shiftAmount = m_rightOperand.asConstInt32() & 0x1f;
+ if (shiftAmount) {
+ if (m_shiftType == SignedShift)
+ jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
+ else
+ jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
+#if USE(JSVALUE64)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#endif
+ }
+
+ if (jit.supportsFloatingPointTruncate()) {
+ m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.
+
+ // Try to do (doubleVar >> intConstant).
+ notInt.link(&jit);
+
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));
+
+ if (shiftAmount) {
+ if (m_shiftType == SignedShift)
+ jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
+ else
+ jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
+ }
+ jit.boxInt32(m_scratchGPR, m_result);
+
+ } else
+ m_slowPathJumpList.append(notInt);
+
+ } else {
+ // Try to do (intConstant >> intVar) or (intVar >> intVar).
+ m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
+
+ GPRReg rightOperandGPR = m_right.payloadGPR();
+ if (rightOperandGPR == m_result.payloadGPR())
+ rightOperandGPR = m_scratchGPR;
+
+ CCallHelpers::Jump leftNotInt;
+ if (m_leftOperand.isConstInt32()) {
+ jit.move(m_right.payloadGPR(), rightOperandGPR);
+#if USE(JSVALUE32_64)
+ jit.move(m_right.tagGPR(), m_result.tagGPR());
+#endif
+ jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR());
+ } else {
+ leftNotInt = jit.branchIfNotInt32(m_left);
+ jit.move(m_right.payloadGPR(), rightOperandGPR);
+ jit.moveValueRegs(m_left, m_result);
+ }
+
+ if (m_shiftType == SignedShift)
+ jit.rshift32(rightOperandGPR, m_result.payloadGPR());
+ else
+ jit.urshift32(rightOperandGPR, m_result.payloadGPR());
+#if USE(JSVALUE64)
+ jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
+#endif
+ if (m_leftOperand.isConstInt32())
+ return;
+
+ if (jit.supportsFloatingPointTruncate()) {
+ m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.
+
+ // Try to do (doubleVar >> intVar).
+ leftNotInt.link(&jit);
+
+ m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));
+
+ if (m_shiftType == SignedShift)
+ jit.rshift32(m_right.payloadGPR(), m_scratchGPR);
+ else
+ jit.urshift32(m_right.payloadGPR(), m_scratchGPR);
+ jit.boxInt32(m_scratchGPR, m_result);
+
+ } else
+ m_slowPathJumpList.append(leftNotInt);
+ }
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITRightShiftGenerator.h b/Source/JavaScriptCore/jit/JITRightShiftGenerator.h
new file mode 100644
index 000000000..fa8c3e30c
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITRightShiftGenerator.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "JITBitBinaryOpGenerator.h"
+
+namespace JSC {
+
+class JITRightShiftGenerator : public JITBitBinaryOpGenerator {
+public:
+ enum ShiftType {
+ SignedShift,
+ UnsignedShift
+ };
+
+ JITRightShiftGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, GPRReg scratchGPR, FPRReg scratchFPR, ShiftType type = SignedShift)
+ : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
+ , m_shiftType(type)
+ , m_leftFPR(leftFPR)
+ , m_scratchFPR(scratchFPR)
+ { }
+
+ void generateFastPath(CCallHelpers&);
+
+private:
+ ShiftType m_shiftType;
+ FPRReg m_leftFPR;
+ FPRReg m_scratchFPR;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.cpp b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
index 28543a8b8..74e537747 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,13 +29,18 @@
#if ENABLE(JIT)
#include "JSObject.h"
-
+#include "JSCInlines.h"
#include "SlotVisitor.h"
namespace JSC {
JITStubRoutine::~JITStubRoutine() { }
+bool JITStubRoutine::visitWeak(VM&)
+{
+ return true;
+}
+
void JITStubRoutine::observeZeroRefCount()
{
RELEASE_ASSERT(!m_refCount);
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h
index 020ef6907..2bf37fab4 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,17 +23,12 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITStubRoutine_h
-#define JITStubRoutine_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
#include "ExecutableAllocator.h"
#include "MacroAssemblerCodeRef.h"
-#include <wtf/RefCounted.h>
-#include <wtf/Vector.h>
namespace JSC {
@@ -61,24 +56,22 @@ public:
// Use this if you want to pass a CodePtr to someone who insists on taking
// a RefPtr<JITStubRoutine>.
- static PassRefPtr<JITStubRoutine> createSelfManagedRoutine(
+ static Ref<JITStubRoutine> createSelfManagedRoutine(
MacroAssemblerCodePtr rawCodePointer)
{
- return adoptRef(new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer)));
+ return adoptRef(*new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer)));
}
virtual ~JITStubRoutine();
+ virtual void aboutToDie() { }
// MacroAssemblerCodeRef is copyable, but at the cost of reference
// counting churn. Returning a reference is a good way of reducing
// the churn.
const MacroAssemblerCodeRef& code() const { return m_code; }
- static MacroAssemblerCodePtr asCodePtr(PassRefPtr<JITStubRoutine> stubRoutine)
+ static MacroAssemblerCodePtr asCodePtr(Ref<JITStubRoutine>&& stubRoutine)
{
- if (!stubRoutine)
- return MacroAssemblerCodePtr();
-
MacroAssemblerCodePtr result = stubRoutine->code().code();
ASSERT(!!result);
return result;
@@ -104,29 +97,15 @@ public:
static bool canPerformRangeFilter()
{
-#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
return true;
-#else
- return false;
-#endif
}
static uintptr_t filteringStartAddress()
{
-#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
return startOfFixedExecutableMemoryPool;
-#else
- UNREACHABLE_FOR_PLATFORM();
- return 0;
-#endif
}
static size_t filteringExtentSize()
{
-#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
return fixedExecutableMemoryPoolSize;
-#else
- UNREACHABLE_FOR_PLATFORM();
- return 0;
-#endif
}
static bool passesFilter(uintptr_t address)
{
@@ -141,6 +120,11 @@ public:
return true;
}
+
+ // Return true if you are still valid after. Return false if you are now invalid. If you return
+ // false, you will usually not do any clearing because the idea is that you will simply be
+ // destroyed.
+ virtual bool visitWeak(VM&);
protected:
virtual void observeZeroRefCount();
@@ -150,15 +134,9 @@ protected:
};
// Helper for the creation of simple stub routines that need no help from the GC.
-#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogFArguments) \
- (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogFArguments))))
-
-#define FINALIZE_CODE_FOR_DFG_STUB(patchBuffer, dataLogFArguments) \
- (adoptRef(new JITStubRoutine(FINALIZE_DFG_CODE((patchBuffer), dataLogFArguments))))
+#define FINALIZE_CODE_FOR_STUB(codeBlock, patchBuffer, dataLogFArguments) \
+ (adoptRef(new JITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments))))
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JITStubRoutine_h
-
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
deleted file mode 100644
index 47c509e3d..000000000
--- a/Source/JavaScriptCore/jit/JITStubs.cpp
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
- * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
- * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-#include "JITStubs.h"
-
-#if CPU(ARM_TRADITIONAL)
-#include "JITStubsARM.h"
-#elif CPU(ARM_THUMB2)
-#include "JITStubsARMv7.h"
-#elif CPU(X86)
-#include "JITStubsX86.h"
-#elif CPU(X86_64)
-#include "JITStubsX86_64.h"
-#elif CPU(ARM64)
-// There isn't an ARM64 specific .h file
-#elif CPU(MIPS)
-// There isn't a MIPS specific .h file
-#elif CPU(SH4)
-// There isn't an SH4 specific .h file
-#else
-#error "JIT not supported on this platform."
-#endif
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
deleted file mode 100644
index 24d95dfd4..000000000
--- a/Source/JavaScriptCore/jit/JITStubs.h
+++ /dev/null
@@ -1,59 +0,0 @@
-/*
- * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
- * Copyright (C) Research In Motion Limited 2010. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubs_h
-#define JITStubs_h
-
-#include "JSCJSValue.h"
-
-namespace JSC {
-
-#if ENABLE(JIT)
-
-#if OS(WINDOWS)
-class ExecState;
-class Register;
-struct ProtoCallFrame;
-
-extern "C" {
- EncodedJSValue callToJavaScript(void*, ExecState**, ProtoCallFrame*, Register*);
- void returnFromJavaScript();
- EncodedJSValue callToNativeFunction(void*, ExecState**, ProtoCallFrame*, Register*);
-}
-#endif
-
-#if USE(MASM_PROBE)
-extern "C" void ctiMasmProbeTrampoline();
-#endif
-
-#endif // ENABLE(JIT)
-
-} // namespace JSC
-
-#endif // JITStubs_h
diff --git a/Source/JavaScriptCore/jit/JITStubsARM.h b/Source/JavaScriptCore/jit/JITStubsARM.h
deleted file mode 100644
index fd59188f4..000000000
--- a/Source/JavaScriptCore/jit/JITStubsARM.h
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
- * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
- * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubsARM_h
-#define JITStubsARM_h
-
-#if !CPU(ARM_TRADITIONAL)
-#error "JITStubsARM.h should only be #included if CPU(ARM_TRADITIONAL)"
-#endif
-
-#if !USE(JSVALUE32_64)
-#error "JITStubsARM.h only implements USE(JSVALUE32_64)"
-#endif
-
-namespace JSC {
-
-#if COMPILER(GCC)
-
-#if USE(MASM_PROBE)
-// The following are offsets for MacroAssembler::ProbeContext fields accessed
-// by the ctiMasmProbeTrampoline stub.
-
-#define PTR_SIZE 4
-#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
-#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
-#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
-
-#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE)
-
-#define GPREG_SIZE 4
-#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
-#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
-#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
-#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
-#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
-#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
-#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
-#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
-#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
-#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
-#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
-#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
-#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
-#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
-#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
-#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
-
-#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
-#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
-
-#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
-
-#define FPREG_SIZE 8
-#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
-#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
-#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
-#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
-#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
-#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
-#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
-#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
-#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
-#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
-#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
-#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
-#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
-#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
-#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
-#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
-
-#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
-
-// These ASSERTs remind you that if you change the layout of ProbeContext,
-// you need to change ctiMasmProbeTrampoline offsets above to match.
-#define PROBE_OFFSETOF(x) offsetof(struct MacroAssembler::ProbeContext, x)
-COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r70_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
-#undef PROBE_OFFSETOF
-
-#endif // USE(MASM_PROBE)
-
-
-#if USE(MASM_PROBE)
-asm (
-".text" "\n"
-".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
-HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
-INLINE_ARM_FUNCTION(ctiMasmProbeTrampoline) "\n"
-SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
-
- // MacroAssembler::probe() has already generated code to store some values.
- // The top of stack now looks like this:
- // esp[0 * ptrSize]: probeFunction
- // esp[1 * ptrSize]: arg1
- // esp[2 * ptrSize]: arg2
- // esp[3 * ptrSize]: saved r3 / S0
- // esp[4 * ptrSize]: saved ip
- // esp[5 * ptrSize]: saved lr
- // esp[6 * ptrSize]: saved sp
-
- "mov ip, sp" "\n"
- "mov r3, sp" "\n"
- "sub r3, r3, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
-
- // The ARM EABI specifies that the stack needs to be 16 byte aligned.
- "bic r3, r3, #0xf" "\n"
- "mov sp, r3" "\n"
-
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "\n"
- "stmia lr, { r0-r11 }" "\n"
- "mrs lr, APSR" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
- "vmrs lr, FPSCR" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
-
- "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
- "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
- "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
- "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R3_OFFSET) "]" "\n"
- "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
-
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
-
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
- "vstmia.64 ip, { d0-d15 }" "\n"
-
- "mov fp, sp" "\n" // Save the ProbeContext*.
-
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
- "mov r0, sp" "\n" // the ProbeContext* arg.
- "blx ip" "\n"
-
- "mov sp, fp" "\n"
-
- // To enable probes to modify register state, we copy all registers
- // out of the ProbeContext before returning.
-
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n"
- "vldmdb.64 ip!, { d0-d15 }" "\n"
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
- "ldmdb ip, { r0-r11 }" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
- "vmsr FPSCR, ip" "\n"
-
- // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
- // There are 2 issues that complicate the restoration of these last few
- // registers:
- //
- // 1. Normal ARM calling convention relies on moving lr to pc to return to
- // the caller. In our case, the address to return to is specified by
- // ProbeContext.cpu.pc. And at that moment, we won't have any available
- // scratch registers to hold the return address (lr needs to hold
- // ProbeContext.cpu.lr, not the return address).
- //
- // The solution is to store the return address on the stack and load the
- // pc from there.
- //
- // 2. Issue 1 means we will need to write to the stack location at
- // ProbeContext.cpu.sp - 4. But if the user probe function had modified
- // the value of ProbeContext.cpu.sp to point in the range between
- // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
- // Issue 1 may trash the values to be restored before we can restore
- // them.
- //
- // The solution is to check if ProbeContext.cpu.sp contains a value in
- // the undesirable range. If so, we copy the remaining ProbeContext
- // register data to a safe range (at memory lower than where
- // ProbeContext.cpu.sp points) first, and restore the remaining register
- // from this new range.
-
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "cmp lr, ip" "\n"
- "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
-
- // We get here because the new expected stack pointer location is lower
- // than where it's supposed to be. This means the safe range of stack
- // memory where we'll be copying the remaining register restore values to
- // might be in a region of memory below the sp i.e. unallocated stack
- // memory. This in turn makes it vulnerable to interrupts potentially
- // trashing the copied values. To prevent that, we must first allocate the
- // needed stack memory by adjusting the sp before the copying.
-
- "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
- " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
-
- "mov ip, sp" "\n"
- "mov sp, lr" "\n"
- "mov lr, ip" "\n"
-
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
-
-SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
- "str ip, [lr]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
-
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
- "msr APSR, ip" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "mov lr, ip" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
-
- "pop { pc }" "\n"
-);
-#endif // USE(MASM_PROBE)
-
-
-
-#endif // COMPILER(GCC)
-
-} // namespace JSC
-
-#endif // JITStubsARM_h
diff --git a/Source/JavaScriptCore/jit/JITStubsARMv7.h b/Source/JavaScriptCore/jit/JITStubsARMv7.h
deleted file mode 100644
index 28bbf8a92..000000000
--- a/Source/JavaScriptCore/jit/JITStubsARMv7.h
+++ /dev/null
@@ -1,351 +0,0 @@
-/*
- * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
- * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
- * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubsARMv7_h
-#define JITStubsARMv7_h
-
-#if !CPU(ARM_THUMB2)
-#error "JITStubsARMv7.h should only be #included if CPU(ARM_THUMB2)"
-#endif
-
-#if !USE(JSVALUE32_64)
-#error "JITStubsARMv7.h only implements USE(JSVALUE32_64)"
-#endif
-
-namespace JSC {
-
-#if COMPILER(GCC)
-
-#if USE(MASM_PROBE)
-// The following are offsets for MacroAssembler::ProbeContext fields accessed
-// by the ctiMasmProbeTrampoline stub.
-
-#define PTR_SIZE 4
-#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
-#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
-#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
-
-#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE)
-
-#define GPREG_SIZE 4
-#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
-#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
-#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
-#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
-#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
-#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
-#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
-#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
-#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
-#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
-#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
-#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
-#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
-#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
-#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
-#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
-
-#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
-#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
-
-#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
-
-#define FPREG_SIZE 8
-#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
-#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
-#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
-#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
-#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
-#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
-#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
-#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
-#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
-#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
-#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
-#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
-#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
-#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
-#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
-#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
-
-#if CPU(APPLE_ARMV7S)
-#define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
-#define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE))
-#define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE))
-#define PROBE_CPU_D19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE))
-#define PROBE_CPU_D20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE))
-#define PROBE_CPU_D21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE))
-#define PROBE_CPU_D22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE))
-#define PROBE_CPU_D23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE))
-#define PROBE_CPU_D24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE))
-#define PROBE_CPU_D25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE))
-#define PROBE_CPU_D26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE))
-#define PROBE_CPU_D27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE))
-#define PROBE_CPU_D28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE))
-#define PROBE_CPU_D29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE))
-#define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE))
-#define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE))
-#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE))
-#else
-#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
-#endif // CPU(APPLE_ARMV7S)
-
-
-// These ASSERTs remind you that if you change the layout of ProbeContext,
-// you need to change ctiMasmProbeTrampoline offsets above to match.
-#define PROBE_OFFSETOF(x) offsetof(struct MacroAssembler::ProbeContext, x)
-COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r70_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
-
-#if CPU(APPLE_ARMV7S)
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d19) == PROBE_CPU_D19_OFFSET, ProbeContext_cpu_d19_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d20) == PROBE_CPU_D20_OFFSET, ProbeContext_cpu_d20_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d21) == PROBE_CPU_D21_OFFSET, ProbeContext_cpu_d21_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d22) == PROBE_CPU_D22_OFFSET, ProbeContext_cpu_d22_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d23) == PROBE_CPU_D23_OFFSET, ProbeContext_cpu_d23_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d24) == PROBE_CPU_D24_OFFSET, ProbeContext_cpu_d24_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d25) == PROBE_CPU_D25_OFFSET, ProbeContext_cpu_d25_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d26) == PROBE_CPU_D26_OFFSET, ProbeContext_cpu_d26_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d27) == PROBE_CPU_D27_OFFSET, ProbeContext_cpu_d27_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu_d28_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline);
-#endif // CPU(APPLE_ARMV7S)
-
-COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
-
-#undef PROBE_OFFSETOF
-
-asm (
-".text" "\n"
-".align 2" "\n"
-".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
-HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
-".thumb" "\n"
-".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampoline) "\n"
-SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
-
- // MacroAssembler::probe() has already generated code to store some values.
- // The top of stack now looks like this:
- // esp[0 * ptrSize]: probeFunction
- // esp[1 * ptrSize]: arg1
- // esp[2 * ptrSize]: arg2
- // esp[3 * ptrSize]: saved r0
- // esp[4 * ptrSize]: saved ip
- // esp[5 * ptrSize]: saved lr
- // esp[6 * ptrSize]: saved sp
-
- "mov ip, sp" "\n"
- "mov r0, sp" "\n"
- "sub r0, r0, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
-
- // The ARM EABI specifies that the stack needs to be 16 byte aligned.
- "bic r0, r0, #0xf" "\n"
- "mov sp, r0" "\n"
-
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R1_OFFSET) "\n"
- "stmia lr, { r1-r11 }" "\n"
- "mrs lr, APSR" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
- "vmrs lr, FPSCR" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
-
- "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
- "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
- "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
- "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "]" "\n"
- "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
-
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
-
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
-#if CPU(APPLE_ARMV7S)
- "vstmia.64 ip, { d0-d31 }" "\n"
-#else
- "vstmia.64 ip, { d0-d15 }" "\n"
-#endif
-
- "mov fp, sp" "\n" // Save the ProbeContext*.
-
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
- "mov r0, sp" "\n" // the ProbeContext* arg.
- "blx ip" "\n"
-
- "mov sp, fp" "\n"
-
- // To enable probes to modify register state, we copy all registers
- // out of the ProbeContext before returning.
-
-#if CPU(APPLE_ARMV7S)
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n"
- "vldmdb.64 ip!, { d0-d31 }" "\n"
-#else
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n"
- "vldmdb.64 ip!, { d0-d15 }" "\n"
-#endif
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
- "ldmdb ip, { r0-r11 }" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
- "vmsr FPSCR, ip" "\n"
-
- // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
- // There are 2 issues that complicate the restoration of these last few
- // registers:
- //
- // 1. Normal ARM calling convention relies on moving lr to pc to return to
- // the caller. In our case, the address to return to is specified by
- // ProbeContext.cpu.pc. And at that moment, we won't have any available
- // scratch registers to hold the return address (lr needs to hold
- // ProbeContext.cpu.lr, not the return address).
- //
- // The solution is to store the return address on the stack and load the
- // pc from there.
- //
- // 2. Issue 1 means we will need to write to the stack location at
- // ProbeContext.cpu.sp - 4. But if the user probe function had modified
- // the value of ProbeContext.cpu.sp to point in the range between
- // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
- // Issue 1 may trash the values to be restored before we can restore
- // them.
- //
- // The solution is to check if ProbeContext.cpu.sp contains a value in
- // the undesirable range. If so, we copy the remaining ProbeContext
- // register data to a safe range (at memory lower than where
- // ProbeContext.cpu.sp points) first, and restore the remaining register
- // from this new range.
-
- "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "cmp lr, ip" "\n"
- "it gt" "\n"
- "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
-
- // We get here because the new expected stack pointer location is lower
- // than where it's supposed to be. This means the safe range of stack
- // memory where we'll be copying the remaining register restore values to
- // might be in a region of memory below the sp i.e. unallocated stack
- // memory. This, in turn, makes it vulnerable to interrupts potentially
- // trashing the copied values. To prevent that, we must first allocate the
- // needed stack memory by adjusting the sp before the copying.
-
- "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
- " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
-
- "mov ip, sp" "\n"
- "mov sp, lr" "\n"
- "mov lr, ip" "\n"
-
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
- "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
-
-".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampolineEnd) "\n"
-SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
- "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
- "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
- "str ip, [lr]" "\n"
- "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
-
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
- "msr APSR, ip" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
- "mov lr, ip" "\n"
- "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
- "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
-
- "pop { pc }" "\n"
-);
-#endif // USE(MASM_PROBE)
-
-#endif // COMPILER(GCC)
-
-} // namespace JSC
-
-#endif // JITStubsARMv7_h
diff --git a/Source/JavaScriptCore/jit/JITStubsMSVC64.asm b/Source/JavaScriptCore/jit/JITStubsMSVC64.asm
new file mode 100644
index 000000000..ef9cd4e0e
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubsMSVC64.asm
@@ -0,0 +1,44 @@
+;/*
+; Copyright (C) 2014 Apple Inc. All rights reserved.
+;
+; Redistribution and use in source and binary forms, with or without
+; modification, are permitted provided that the following conditions
+; are met:
+; 1. Redistributions of source code must retain the above copyright
+; notice, this list of conditions and the following disclaimer.
+; 2. Redistributions in binary form must reproduce the above copyright
+; notice, this list of conditions and the following disclaimer in the
+; documentation and/or other materials provided with the distribution.
+;
+; THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+; OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+;*/
+
+EXTERN getHostCallReturnValueWithExecState : near
+
+PUBLIC getHostCallReturnValue
+
+_TEXT SEGMENT
+
+getHostCallReturnValue PROC
+ lea rcx, [rsp - 8]
+ ; Allocate space for all 4 parameter registers, and align stack pointer to 16 bytes boundary by allocating another 8 bytes.
+ ; The stack alignment is needed to fix a crash in the CRT library on a floating point instruction.
+ sub rsp, 40
+ call getHostCallReturnValueWithExecState
+ add rsp, 40
+ ret
+getHostCallReturnValue ENDP
+
+_TEXT ENDS
+
+END
diff --git a/Source/JavaScriptCore/jit/JITStubsX86.h b/Source/JavaScriptCore/jit/JITStubsX86.h
deleted file mode 100644
index 7a26a5afa..000000000
--- a/Source/JavaScriptCore/jit/JITStubsX86.h
+++ /dev/null
@@ -1,649 +0,0 @@
-/*
- * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
- * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
- * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubsX86_h
-#define JITStubsX86_h
-
-#include "JITStubsX86Common.h"
-#include <wtf/InlineASM.h>
-
-#if !CPU(X86)
-#error "JITStubsX86.h should only be #included if CPU(X86)"
-#endif
-
-#if !USE(JSVALUE32_64)
-#error "JITStubsX86.h only implements USE(JSVALUE32_64)"
-#endif
-
-namespace JSC {
-
-#if COMPILER(GCC)
-
-#if USE(MASM_PROBE)
-asm (
-".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
-HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
-SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
-
- "pushfd" "\n"
-
- // MacroAssembler::probe() has already generated code to store some values.
- // Together with the eflags pushed above, the top of stack now looks like
- // this:
- // esp[0 * ptrSize]: eflags
- // esp[1 * ptrSize]: return address / saved eip
- // esp[2 * ptrSize]: probeFunction
- // esp[3 * ptrSize]: arg1
- // esp[4 * ptrSize]: arg2
- // esp[5 * ptrSize]: saved eax
- // esp[6 * ptrSize]: saved esp
-
- "movl %esp, %eax" "\n"
- "subl $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %esp" "\n"
-
- // The X86_64 ABI specifies that the worse case stack alignment requirement
- // is 32 bytes.
- "andl $~0x1f, %esp" "\n"
-
- "movl %ebp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%esp)" "\n"
- "movl %esp, %ebp" "\n" // Save the ProbeContext*.
-
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp)" "\n"
- "movl %edx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp)" "\n"
- "movl %ebx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp)" "\n"
- "movl %esi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp)" "\n"
- "movl %edi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp)" "\n"
-
- "movl 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp)" "\n"
- "movl 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp)" "\n"
- "movl 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
- "movl 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%ebp)" "\n"
- "movl 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%ebp)" "\n"
- "movl 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp)" "\n"
- "movl 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
-
- "movdqa %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n"
- "movdqa %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n"
- "movdqa %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n"
- "movdqa %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n"
- "movdqa %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n"
- "movdqa %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n"
- "movdqa %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n"
- "movdqa %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n"
-
- // Reserve stack space for the arg while maintaining the required stack
- // pointer 32 byte alignment:
- "subl $0x20, %esp" "\n"
- "movl %ebp, 0(%esp)" "\n" // the ProbeContext* arg.
-
- "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
-
- // To enable probes to modify register state, we copy all registers
- // out of the ProbeContext before returning.
-
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp), %edx" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp), %ebx" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp), %esi" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp), %edi" "\n"
-
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n"
-
- // There are 6 more registers left to restore:
- // eax, ecx, ebp, esp, eip, and eflags.
- // We need to handle these last few restores carefully because:
- //
- // 1. We need to push the return address on the stack for ret to use.
- // That means we need to write to the stack.
- // 2. The user probe function may have altered the restore value of esp to
- // point to the vicinity of one of the restore values for the remaining
- // registers left to be restored.
- // That means, for requirement 1, we may end up writing over some of the
- // restore values. We can check for this, and first copy the restore
- // values to a "safe area" on the stack before commencing with the action
- // for requirement 1.
- // 3. For requirement 2, we need to ensure that the "safe area" is
- // protected from interrupt handlers overwriting it. Hence, the esp needs
- // to be adjusted to include the "safe area" before we start copying the
- // the restore values.
-
- "movl %ebp, %eax" "\n"
- "addl $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %eax" "\n"
- "cmpl %eax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
- "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
-
- // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
- // rsp will be. This time we don't have to 32-byte align it because we're
- // not using to store any xmm regs.
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
- "subl $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %eax" "\n"
- "movl %eax, %esp" "\n"
-
- "subl $" STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) ", %eax" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%eax)" "\n"
- "movl %eax, %ebp" "\n"
-
-SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
- "subl $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %eax" "\n"
- // At this point, %esp should be < %eax.
-
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
- "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
- "movl %ecx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
- "movl %eax, %esp" "\n"
-
- "popfd" "\n"
- "popl %eax" "\n"
- "popl %ecx" "\n"
- "popl %ebp" "\n"
- "ret" "\n"
-);
-#endif // USE(MASM_PROBE)
-
-#if OS(WINDOWS)
-extern "C" {
-
- // FIXME: Since Windows doesn't use the LLInt, we have inline stubs here.
- // Until the LLInt is changed to support Windows, these stub needs to be updated.
- asm (
- ".globl " SYMBOL_STRING(callToJavaScript) "\n"
- HIDE_SYMBOL(callToJavaScript) "\n"
- SYMBOL_STRING(callToJavaScript) ":" "\n"
- "mov (%esp),%edx" "\n"
- "push %ebp" "\n"
- "mov %ebp,%eax" "\n"
- "mov %esp,%ebp" "\n"
- "push %esi" "\n"
- "push %edi" "\n"
- "push %ebx" "\n"
- "sub $0x1c,%esp" "\n"
- "mov 0x34(%esp),%ecx" "\n"
- "mov 0x38(%esp),%esi" "\n"
- "mov 0x3c(%esp),%ebp" "\n"
- "sub $0x20,%ebp" "\n"
- "movl $0x0,0x24(%ebp)" "\n"
- "movl $0x0,0x20(%ebp)" "\n"
- "movl $0x0,0x1c(%ebp)" "\n"
- "mov %ecx,0x18(%ebp)" "\n"
- "mov (%ecx),%ebx" "\n"
- "movl $0x0,0x14(%ebp)" "\n"
- "mov %ebx,0x10(%ebp)" "\n"
- "movl $0x0,0xc(%ebp)" "\n"
- "movl $0x1,0x8(%ebp)" "\n"
- "mov %edx,0x4(%ebp)" "\n"
- "mov %eax,0x0(%ebp)" "\n"
- "mov %ebp,%eax" "\n"
-
- "mov 0x28(%esi),%edx" "\n"
- "add $0x5,%edx" "\n"
- "shl $0x3,%edx" "\n"
- "sub %edx,%ebp" "\n"
- "mov %eax,0x0(%ebp)" "\n"
-
- "mov $0x5,%eax" "\n"
-
- ".copyHeaderLoop:" "\n"
- "sub $0x1,%eax" "\n"
- "mov (%esi,%eax,8),%ecx" "\n"
- "mov %ecx,0x8(%ebp,%eax,8)" "\n"
- "mov 0x4(%esi,%eax,8),%ecx" "\n"
- "mov %ecx,0xc(%ebp,%eax,8)" "\n"
- "test %eax,%eax" "\n"
- "jne .copyHeaderLoop" "\n"
-
- "mov 0x18(%esi),%edx" "\n"
- "sub $0x1,%edx" "\n"
- "mov 0x28(%esi),%ecx" "\n"
- "sub $0x1,%ecx" "\n"
-
- "cmp %ecx,%edx" "\n"
- "je .copyArgs" "\n"
-
- "xor %eax,%eax" "\n"
- "mov $0xfffffffc,%ebx" "\n"
-
- ".fillExtraArgsLoop:" "\n"
- "sub $0x1,%ecx" "\n"
- "mov %eax,0x30(%ebp,%ecx,8)" "\n"
- "mov %ebx,0x34(%ebp,%ecx,8)" "\n"
- "cmp %ecx,%edx" "\n"
- "jne .fillExtraArgsLoop" "\n"
-
- ".copyArgs:" "\n"
- "mov 0x2c(%esi),%eax" "\n"
-
- ".copyArgsLoop:" "\n"
- "test %edx,%edx" "\n"
- "je .copyArgsDone" "\n"
- "sub $0x1,%edx" "\n"
- "mov (%eax,%edx,8),%ecx" "\n"
- "mov 0x4(%eax,%edx,8),%ebx" "\n"
- "mov %ecx,0x30(%ebp,%edx,8)" "\n"
- "mov %ebx,0x34(%ebp,%edx,8)" "\n"
- "jmp .copyArgsLoop" "\n"
-
- ".copyArgsDone:" "\n"
- "mov 0x34(%esp),%ecx" "\n"
- "mov %ebp,(%ecx)" "\n"
-
- "call *0x30(%esp)" "\n"
-
- "cmpl $0x1,0x8(%ebp)" "\n"
- "je .calleeFramePopped" "\n"
- "mov 0x0(%ebp),%ebp" "\n"
-
- ".calleeFramePopped:" "\n"
- "mov 0x18(%ebp),%ecx" "\n"
- "mov 0x10(%ebp),%ebx" "\n"
- "mov %ebx,(%ecx)" "\n"
-
- "add $0x1c,%esp" "\n"
- "pop %ebx" "\n"
- "pop %edi" "\n"
- "pop %esi" "\n"
- "pop %ebp" "\n"
- "ret" "\n"
-
- ".globl " SYMBOL_STRING(returnFromJavaScript) "\n"
- HIDE_SYMBOL(returnFromJavaScript) "\n"
- SYMBOL_STRING(returnFromJavaScript) ":" "\n"
- "add $0x1c,%esp" "\n"
- "pop %ebx" "\n"
- "pop %edi" "\n"
- "pop %esi" "\n"
- "pop %ebp" "\n"
- "ret" "\n"
-
- ".globl " SYMBOL_STRING(callToNativeFunction) "\n"
- HIDE_SYMBOL(callToNativeFunction) "\n"
- SYMBOL_STRING(callToNativeFunction) ":" "\n"
- "mov (%esp),%edx" "\n"
- "push %ebp" "\n"
- "mov %ebp,%eax" "\n"
- "mov %esp,%ebp" "\n"
- "push %esi" "\n"
- "push %edi" "\n"
- "push %ebx" "\n"
- "sub $0x1c,%esp" "\n"
- "mov 0x34(%esp),%ecx" "\n"
- "mov 0x38(%esp),%esi" "\n"
- "mov 0x3c(%esp),%ebp" "\n"
- "sub $0x20,%ebp" "\n"
- "movl $0x0,0x24(%ebp)" "\n"
- "movl $0x0,0x20(%ebp)" "\n"
- "movl $0x0,0x1c(%ebp)" "\n"
- "mov %ecx,0x18(%ebp)" "\n"
- "mov (%ecx),%ebx" "\n"
- "movl $0x0,0x14(%ebp)" "\n"
- "mov %ebx,0x10(%ebp)" "\n"
- "movl $0x0,0xc(%ebp)" "\n"
- "movl $0x1,0x8(%ebp)" "\n"
- "mov %edx,0x4(%ebp)" "\n"
- "mov %eax,0x0(%ebp)" "\n"
- "mov %ebp,%eax" "\n"
-
- "mov 0x28(%esi),%edx" "\n"
- "add $0x5,%edx" "\n"
- "shl $0x3,%edx" "\n"
- "sub %edx,%ebp" "\n"
- "mov %eax,0x0(%ebp)" "\n"
-
- "mov $0x5,%eax" "\n"
-
- "copyHeaderLoop:" "\n"
- "sub $0x1,%eax" "\n"
- "mov (%esi,%eax,8),%ecx" "\n"
- "mov %ecx,0x8(%ebp,%eax,8)" "\n"
- "mov 0x4(%esi,%eax,8),%ecx" "\n"
- "mov %ecx,0xc(%ebp,%eax,8)" "\n"
- "test %eax,%eax" "\n"
- "jne copyHeaderLoop" "\n"
-
- "mov 0x18(%esi),%edx" "\n"
- "sub $0x1,%edx" "\n"
- "mov 0x28(%esi),%ecx" "\n"
- "sub $0x1,%ecx" "\n"
-
- "cmp %ecx,%edx" "\n"
- "je copyArgs" "\n"
-
- "xor %eax,%eax" "\n"
- "mov $0xfffffffc,%ebx" "\n"
-
- "fillExtraArgsLoop:" "\n"
- "sub $0x1,%ecx" "\n"
- "mov %eax,0x30(%ebp,%ecx,8)" "\n"
- "mov %ebx,0x34(%ebp,%ecx,8)" "\n"
- "cmp %ecx,%edx" "\n"
- "jne fillExtraArgsLoop" "\n"
-
- "copyArgs:" "\n"
- "mov 0x2c(%esi),%eax" "\n"
-
- "copyArgsLoop:" "\n"
- "test %edx,%edx" "\n"
- "je copyArgsDone" "\n"
- "sub $0x1,%edx" "\n"
- "mov (%eax,%edx,8),%ecx" "\n"
- "mov 0x4(%eax,%edx,8),%ebx" "\n"
- "mov %ecx,0x30(%ebp,%edx,8)" "\n"
- "mov %ebx,0x34(%ebp,%edx,8)" "\n"
- "jmp copyArgsLoop" "\n"
-
- "copyArgsDone:" "\n"
- "mov 0x34(%esp),%ecx" "\n"
- "mov %ebp,(%ecx)" "\n"
-
- "mov 0x30(%esp),%edi" "\n"
- "mov %ebp,0x30(%esp)" "\n"
- "mov %ebp,%ecx" "\n"
- "call *%edi" "\n"
-
- "cmpl $0x1,0x8(%ebp)" "\n"
- "je calleeFramePopped" "\n"
- "mov 0x0(%ebp),%ebp" "\n"
-
- "calleeFramePopped:" "\n"
- "mov 0x18(%ebp),%ecx" "\n"
- "mov 0x10(%ebp),%ebx" "\n"
- "mov %ebx,(%ecx)" "\n"
-
- "add $0x1c,%esp" "\n"
- "pop %ebx" "\n"
- "pop %edi" "\n"
- "pop %esi" "\n"
- "pop %ebp" "\n"
- "ret" "\n"
- );
-}
-
-#endif // OS(WINDOWS)
-
-#endif // COMPILER(GCC)
-
-#if COMPILER(MSVC)
-
-extern "C" {
-
- // FIXME: Since Windows doesn't use the LLInt, we have inline stubs here.
- // Until the LLInt is changed to support Windows, these stub needs to be updated.
- __declspec(naked) EncodedJSValue callToJavaScript(void* code, ExecState**, ProtoCallFrame*, Register*)
- {
- __asm {
- mov edx, [esp]
- push ebp;
- mov eax, ebp;
- mov ebp, esp;
- push esi;
- push edi;
- push ebx;
- sub esp, 0x1c;
- mov ecx, dword ptr[esp + 0x34];
- mov esi, dword ptr[esp + 0x38];
- mov ebp, dword ptr[esp + 0x3c];
- sub ebp, 0x20;
- mov dword ptr[ebp + 0x24], 0;
- mov dword ptr[ebp + 0x20], 0;
- mov dword ptr[ebp + 0x1c], 0;
- mov dword ptr[ebp + 0x18], ecx;
- mov ebx, [ecx];
- mov dword ptr[ebp + 0x14], 0;
- mov dword ptr[ebp + 0x10], ebx;
- mov dword ptr[ebp + 0xc], 0;
- mov dword ptr[ebp + 0x8], 1;
- mov dword ptr[ebp + 0x4], edx;
- mov dword ptr[ebp], eax;
- mov eax, ebp;
-
- mov edx, dword ptr[esi + 0x28];
- add edx, 5;
- sal edx, 3;
- sub ebp, edx;
- mov dword ptr[ebp], eax;
-
- mov eax, 5;
-
- copyHeaderLoop:
- sub eax, 1;
- mov ecx, dword ptr[esi + eax * 8];
- mov dword ptr 8[ebp + eax * 8], ecx;
- mov ecx, dword ptr 4[esi + eax * 8];
- mov dword ptr 12[ebp + eax * 8], ecx;
- test eax, eax;
- jnz copyHeaderLoop;
-
- mov edx, dword ptr[esi + 0x18];
- sub edx, 1;
- mov ecx, dword ptr[esi + 0x28];
- sub ecx, 1;
-
- cmp edx, ecx;
- je copyArgs;
-
- xor eax, eax;
- mov ebx, -4;
-
- fillExtraArgsLoop:
- sub ecx, 1;
- mov dword ptr 0x30[ebp + ecx * 8], eax;
- mov dword ptr 0x34[ebp + ecx * 8], ebx;
- cmp edx, ecx;
- jne fillExtraArgsLoop;
-
- copyArgs:
- mov eax, dword ptr[esi + 0x2c];
-
- copyArgsLoop:
- test edx, edx;
- jz copyArgsDone;
- sub edx, 1;
- mov ecx, dword ptr 0[eax + edx * 8];
- mov ebx, dword ptr 4[eax + edx * 8];
- mov dword ptr 0x30[ebp + edx * 8], ecx;
- mov dword ptr 0x34[ebp + edx * 8], ebx;
- jmp copyArgsLoop;
-
- copyArgsDone:
- mov ecx, dword ptr[esp + 0x34];
- mov dword ptr[ecx], ebp;
-
- call dword ptr[esp + 0x30];
-
- cmp dword ptr[ebp + 8], 1;
- je calleeFramePopped;
- mov ebp, dword ptr[ebp];
-
- calleeFramePopped:
- mov ecx, dword ptr[ebp + 0x18];
- mov ebx, dword ptr[ebp + 0x10];
- mov dword ptr[ecx], ebx;
-
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) void returnFromJavaScript()
- {
- __asm {
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-
- __declspec(naked) EncodedJSValue callToNativeFunction(void* code, ExecState**, ProtoCallFrame*, Register*)
- {
- __asm {
- mov edx, [esp]
- push ebp;
- mov eax, ebp;
- mov ebp, esp;
- push esi;
- push edi;
- push ebx;
- sub esp, 0x1c;
- mov ecx, [esp + 0x34];
- mov esi, [esp + 0x38];
- mov ebp, [esp + 0x3c];
- sub ebp, 0x20;
- mov dword ptr[ebp + 0x24], 0;
- mov dword ptr[ebp + 0x20], 0;
- mov dword ptr[ebp + 0x1c], 0;
- mov dword ptr[ebp + 0x18], ecx;
- mov ebx, [ecx];
- mov dword ptr[ebp + 0x14], 0;
- mov dword ptr[ebp + 0x10], ebx;
- mov dword ptr[ebp + 0xc], 0;
- mov dword ptr[ebp + 0x8], 1;
- mov dword ptr[ebp + 0x4], edx;
- mov dword ptr[ebp], eax;
- mov eax, ebp;
-
- mov edx, dword ptr[esi + 0x28];
- add edx, 5;
- sal edx, 3;
- sub ebp, edx;
- mov dword ptr[ebp], eax;
-
- mov eax, 5;
-
- copyHeaderLoop:
- sub eax, 1;
- mov ecx, dword ptr[esi + eax * 8];
- mov dword ptr 8[ebp + eax * 8], ecx;
- mov ecx, dword ptr 4[esi + eax * 8];
- mov dword ptr 12[ebp + eax * 8], ecx;
- test eax, eax;
- jnz copyHeaderLoop;
-
- mov edx, dword ptr[esi + 0x18];
- sub edx, 1;
- mov ecx, dword ptr[esi + 0x28];
- sub ecx, 1;
-
- cmp edx, ecx;
- je copyArgs;
-
- xor eax, eax;
- mov ebx, -4;
-
- fillExtraArgsLoop:
- sub ecx, 1;
- mov dword ptr 0x30[ebp + ecx * 8], eax;
- mov dword ptr 0x34[ebp + ecx * 8], ebx;
- cmp edx, ecx;
- jne fillExtraArgsLoop;
-
- copyArgs:
- mov eax, dword ptr[esi + 0x2c];
-
- copyArgsLoop:
- test edx, edx;
- jz copyArgsDone;
- sub edx, 1;
- mov ecx, dword ptr 0[eax + edx * 8];
- mov ebx, dword ptr 4[eax + edx * 8];
- mov dword ptr 0x30[ebp + edx * 8], ecx;
- mov dword ptr 0x34[ebp + edx * 8], ebx;
- jmp copyArgsLoop;
-
- copyArgsDone:
- mov ecx, dword ptr[esp + 0x34];
- mov dword ptr[ecx], ebp;
-
- mov edi, dword ptr[esp + 0x30];
- mov dword ptr[esp + 0x30], ebp;
- mov ecx, ebp;
- call edi;
-
- cmp dword ptr[ebp + 8], 1;
- je calleeFramePopped;
- mov ebp, dword ptr[ebp];
-
- calleeFramePopped:
- mov ecx, dword ptr[ebp + 0x18];
- mov ebx, dword ptr[ebp + 0x10];
- mov dword ptr[ecx], ebx;
-
- add esp, 0x1c;
- pop ebx;
- pop edi;
- pop esi;
- pop ebp;
- ret;
- }
- }
-}
-
-#endif // COMPILER(MSVC)
-
-} // namespace JSC
-
-#endif // JITStubsX86_h
diff --git a/Source/JavaScriptCore/jit/JITStubsX86Common.h b/Source/JavaScriptCore/jit/JITStubsX86Common.h
deleted file mode 100644
index f102f3b25..000000000
--- a/Source/JavaScriptCore/jit/JITStubsX86Common.h
+++ /dev/null
@@ -1,148 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubsX86Common_h
-#define JITStubsX86Common_h
-
-#include "MacroAssembler.h"
-
-#if !CPU(X86) && !CPU(X86_64)
-#error "JITStubsX86Common.h should only be #included if CPU(X86) || CPU(X86_64)"
-#endif
-
-namespace JSC {
-
-#if COMPILER(GCC)
-
-#if USE(MASM_PROBE)
-// The following are offsets for MacroAssembler::ProbeContext fields accessed
-// by the ctiMasmProbeTrampoline stub.
-
-#if CPU(X86)
-#define PTR_SIZE 4
-#else // CPU(X86_64)
-#define PTR_SIZE 8
-#endif
-
-#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
-#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
-#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
-
-#define PROBE_CPU_EAX_OFFSET (4 * PTR_SIZE)
-#define PROBE_CPU_EBX_OFFSET (5 * PTR_SIZE)
-#define PROBE_CPU_ECX_OFFSET (6 * PTR_SIZE)
-#define PROBE_CPU_EDX_OFFSET (7 * PTR_SIZE)
-#define PROBE_CPU_ESI_OFFSET (8 * PTR_SIZE)
-#define PROBE_CPU_EDI_OFFSET (9 * PTR_SIZE)
-#define PROBE_CPU_EBP_OFFSET (10 * PTR_SIZE)
-#define PROBE_CPU_ESP_OFFSET (11 * PTR_SIZE)
-
-#if CPU(X86)
-#define PROBE_FIRST_SPECIAL_OFFSET (12 * PTR_SIZE)
-#else // CPU(X86_64)
-#define PROBE_CPU_R8_OFFSET (12 * PTR_SIZE)
-#define PROBE_CPU_R9_OFFSET (13 * PTR_SIZE)
-#define PROBE_CPU_R10_OFFSET (14 * PTR_SIZE)
-#define PROBE_CPU_R11_OFFSET (15 * PTR_SIZE)
-#define PROBE_CPU_R12_OFFSET (16 * PTR_SIZE)
-#define PROBE_CPU_R13_OFFSET (17 * PTR_SIZE)
-#define PROBE_CPU_R14_OFFSET (18 * PTR_SIZE)
-#define PROBE_CPU_R15_OFFSET (19 * PTR_SIZE)
-#define PROBE_FIRST_SPECIAL_OFFSET (20 * PTR_SIZE)
-#endif // CPU(X86_64)
-
-#define PROBE_CPU_EIP_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (0 * PTR_SIZE))
-#define PROBE_CPU_EFLAGS_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (1 * PTR_SIZE))
-
-#if CPU(X86)
-#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (4 * PTR_SIZE)) // After padding.
-#else // CPU(X86_64)
-#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE)) // After padding.
-#endif // CPU(X86_64)
-
-#define XMM_SIZE 16
-#define PROBE_CPU_XMM0_OFFSET (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE))
-#define PROBE_CPU_XMM1_OFFSET (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE))
-#define PROBE_CPU_XMM2_OFFSET (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE))
-#define PROBE_CPU_XMM3_OFFSET (PROBE_FIRST_XMM_OFFSET + (3 * XMM_SIZE))
-#define PROBE_CPU_XMM4_OFFSET (PROBE_FIRST_XMM_OFFSET + (4 * XMM_SIZE))
-#define PROBE_CPU_XMM5_OFFSET (PROBE_FIRST_XMM_OFFSET + (5 * XMM_SIZE))
-#define PROBE_CPU_XMM6_OFFSET (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE))
-#define PROBE_CPU_XMM7_OFFSET (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE))
-
-#define PROBE_SIZE (PROBE_CPU_XMM7_OFFSET + XMM_SIZE)
-
-// These ASSERTs remind you that if you change the layout of ProbeContext,
-// you need to change ctiMasmProbeTrampoline offsets above to match.
-#define PROBE_OFFSETOF(x) offsetof(struct MacroAssembler::ProbeContext, x)
-COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eax) == PROBE_CPU_EAX_OFFSET, ProbeContext_cpu_eax_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ecx) == PROBE_CPU_ECX_OFFSET, ProbeContext_cpu_ecx_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edx) == PROBE_CPU_EDX_OFFSET, ProbeContext_cpu_edx_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebx) == PROBE_CPU_EBX_OFFSET, ProbeContext_cpu_ebx_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esp) == PROBE_CPU_ESP_OFFSET, ProbeContext_cpu_esp_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebp) == PROBE_CPU_EBP_OFFSET, ProbeContext_cpu_ebp_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esi) == PROBE_CPU_ESI_OFFSET, ProbeContext_cpu_esi_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edi) == PROBE_CPU_EDI_OFFSET, ProbeContext_cpu_edi_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eip) == PROBE_CPU_EIP_OFFSET, ProbeContext_cpu_eip_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eflags) == PROBE_CPU_EFLAGS_OFFSET, ProbeContext_cpu_eflags_offset_matches_ctiMasmProbeTrampoline);
-
-#if CPU(X86_64)
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r12) == PROBE_CPU_R12_OFFSET, ProbeContext_cpu_r12_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r13) == PROBE_CPU_R13_OFFSET, ProbeContext_cpu_r13_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r14) == PROBE_CPU_R14_OFFSET, ProbeContext_cpu_r14_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r15) == PROBE_CPU_R15_OFFSET, ProbeContext_cpu_r15_offset_matches_ctiMasmProbeTrampoline);
-#endif // CPU(X86_64)
-
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm0) == PROBE_CPU_XMM0_OFFSET, ProbeContext_cpu_xmm0_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm1) == PROBE_CPU_XMM1_OFFSET, ProbeContext_cpu_xmm1_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm2) == PROBE_CPU_XMM2_OFFSET, ProbeContext_cpu_xmm2_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm3) == PROBE_CPU_XMM3_OFFSET, ProbeContext_cpu_xmm3_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm4) == PROBE_CPU_XMM4_OFFSET, ProbeContext_cpu_xmm4_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm5) == PROBE_CPU_XMM5_OFFSET, ProbeContext_cpu_xmm5_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm6) == PROBE_CPU_XMM6_OFFSET, ProbeContext_cpu_xmm6_offset_matches_ctiMasmProbeTrampoline);
-COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm7) == PROBE_CPU_XMM7_OFFSET, ProbeContext_cpu_xmm7_offset_matches_ctiMasmProbeTrampoline);
-
-COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
-
-// Also double check that the xmm registers are 16 byte (128-bit) aligned as
-// required by the movdqa instruction used in the trampoline.
-COMPILE_ASSERT(!(PROBE_OFFSETOF(cpu.xmm0) % 16), ProbeContext_xmm0_offset_not_aligned_properly);
-#undef PROBE_OFFSETOF
-
-#endif // USE(MASM_PROBE)
-
-#endif // COMPILER(GCC)
-
-} // namespace JSC
-
-#endif // JITStubsX86Common
diff --git a/Source/JavaScriptCore/jit/JITStubsX86_64.h b/Source/JavaScriptCore/jit/JITStubsX86_64.h
deleted file mode 100644
index f2ed206ab..000000000
--- a/Source/JavaScriptCore/jit/JITStubsX86_64.h
+++ /dev/null
@@ -1,218 +0,0 @@
-/*
- * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
- * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
- * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITStubsX86_64_h
-#define JITStubsX86_64_h
-
-#include "JITStubsX86Common.h"
-
-#if !CPU(X86_64)
-#error "JITStubsX86_64.h should only be #included if CPU(X86_64)"
-#endif
-
-#if !USE(JSVALUE64)
-#error "JITStubsX86_64.h only implements USE(JSVALUE64)"
-#endif
-
-namespace JSC {
-
-#if COMPILER(GCC)
-
-#if USE(MASM_PROBE)
-asm (
-".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
-HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
-SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
-
- "pushfq" "\n"
-
- // MacroAssembler::probe() has already generated code to store some values.
- // Together with the rflags pushed above, the top of stack now looks like
- // this:
- // esp[0 * ptrSize]: rflags
- // esp[1 * ptrSize]: return address / saved rip
- // esp[2 * ptrSize]: probeFunction
- // esp[3 * ptrSize]: arg1
- // esp[4 * ptrSize]: arg2
- // esp[5 * ptrSize]: saved rax
- // esp[6 * ptrSize]: saved rsp
-
- "movq %rsp, %rax" "\n"
- "subq $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rsp" "\n"
-
- // The X86_64 ABI specifies that the worse case stack alignment requirement
- // is 32 bytes.
- "andq $~0x1f, %rsp" "\n"
-
- "movq %rbp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rsp)" "\n"
- "movq %rsp, %rbp" "\n" // Save the ProbeContext*.
-
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp)" "\n"
- "movq %rdx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp)" "\n"
- "movq %rbx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp)" "\n"
- "movq %rsi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp)" "\n"
- "movq %rdi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp)" "\n"
-
- "movq 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp)" "\n"
- "movq 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp)" "\n"
- "movq 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
- "movq 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%rbp)" "\n"
- "movq 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%rbp)" "\n"
- "movq 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp)" "\n"
- "movq 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
-
- "movq %r8, " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp)" "\n"
- "movq %r9, " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp)" "\n"
- "movq %r10, " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp)" "\n"
- "movq %r11, " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp)" "\n"
- "movq %r12, " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp)" "\n"
- "movq %r13, " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp)" "\n"
- "movq %r14, " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp)" "\n"
- "movq %r15, " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp)" "\n"
-
- "movdqa %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n"
- "movdqa %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n"
- "movdqa %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n"
- "movdqa %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n"
- "movdqa %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n"
- "movdqa %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n"
- "movdqa %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n"
- "movdqa %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n"
-
- "movq %rbp, %rdi" "\n" // the ProbeContext* arg.
- "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
-
- // To enable probes to modify register state, we copy all registers
- // out of the ProbeContext before returning.
-
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp), %rdx" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp), %rbx" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp), %rsi" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp), %rdi" "\n"
-
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp), %r8" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp), %r9" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp), %r10" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp), %r11" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp), %r12" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp), %r13" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp), %r14" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp), %r15" "\n"
-
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n"
- "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n"
-
- // There are 6 more registers left to restore:
- // rax, rcx, rbp, rsp, rip, and rflags.
- // We need to handle these last few restores carefully because:
- //
- // 1. We need to push the return address on the stack for ret to use
- // That means we need to write to the stack.
- // 2. The user probe function may have altered the restore value of esp to
- // point to the vicinity of one of the restore values for the remaining
- // registers left to be restored.
- // That means, for requirement 1, we may end up writing over some of the
- // restore values. We can check for this, and first copy the restore
- // values to a "safe area" on the stack before commencing with the action
- // for requirement 1.
- // 3. For both requirement 2, we need to ensure that the "safe area" is
- // protected from interrupt handlers overwriting it. Hence, the esp needs
- // to be adjusted to include the "safe area" before we start copying the
- // the restore values.
-
- "movq %rbp, %rax" "\n"
- "addq $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %rax" "\n"
- "cmpq %rax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
- "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
-
- // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
- // rsp will be. This time we don't have to 32-byte align it because we're
- // not using to store any xmm regs.
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
- "subq $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rax" "\n"
- "movq %rax, %rsp" "\n"
-
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rax)" "\n"
- "movq %rax, %rbp" "\n"
-
-SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
- "subq $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %rax" "\n"
- // At this point, %rsp should be < %rax.
-
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
- "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
- "movq %rcx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
- "movq %rax, %rsp" "\n"
-
- "popfq" "\n"
- "popq %rax" "\n"
- "popq %rcx" "\n"
- "popq %rbp" "\n"
- "ret" "\n"
-);
-#endif // USE(MASM_PROBE)
-
-#endif // COMPILER(GCC)
-
-} // namespace JSC
-
-#endif // JITStubsX86_64_h
diff --git a/Source/JavaScriptCore/jit/JITSubGenerator.cpp b/Source/JavaScriptCore/jit/JITSubGenerator.cpp
new file mode 100644
index 000000000..795c27584
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITSubGenerator.cpp
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITSubGenerator.h"
+
+#include "ArithProfile.h"
+#include "JITMathIC.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+JITMathICInlineResult JITSubGenerator::generateInline(CCallHelpers& jit, MathICGenerationState& state, const ArithProfile* arithProfile)
+{
+ // We default to speculating int32.
+ ObservedType lhs = ObservedType().withInt32();
+ ObservedType rhs = ObservedType().withInt32();
+ if (arithProfile) {
+ lhs = arithProfile->lhsObservedType();
+ rhs = arithProfile->rhsObservedType();
+ }
+
+ if (lhs.isOnlyNonNumber() && rhs.isOnlyNonNumber())
+ return JITMathICInlineResult::DontGenerate;
+
+ if (lhs.isOnlyNumber() && rhs.isOnlyNumber()) {
+ if (!jit.supportsFloatingPoint())
+ return JITMathICInlineResult::DontGenerate;
+
+ if (!m_leftOperand.definitelyIsNumber())
+ state.slowPathJumps.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ state.slowPathJumps.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+ state.slowPathJumps.append(jit.branchIfInt32(m_left));
+ state.slowPathJumps.append(jit.branchIfInt32(m_right));
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+ jit.subDouble(m_rightFPR, m_leftFPR);
+ jit.boxDouble(m_leftFPR, m_result);
+
+ return JITMathICInlineResult::GeneratedFastPath;
+ }
+ if (lhs.isOnlyInt32() && rhs.isOnlyInt32()) {
+ ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
+ state.slowPathJumps.append(jit.branchIfNotInt32(m_left));
+ state.slowPathJumps.append(jit.branchIfNotInt32(m_right));
+
+ jit.move(m_left.payloadGPR(), m_scratchGPR);
+ state.slowPathJumps.append(jit.branchSub32(CCallHelpers::Overflow, m_right.payloadGPR(), m_scratchGPR));
+
+ jit.boxInt32(m_scratchGPR, m_result);
+ return JITMathICInlineResult::GeneratedFastPath;
+ }
+
+ return JITMathICInlineResult::GenerateFullSnippet;
+}
+
+bool JITSubGenerator::generateFastPath(CCallHelpers& jit, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile* arithProfile, bool shouldEmitProfiling)
+{
+ ASSERT(m_scratchGPR != InvalidGPRReg);
+ ASSERT(m_scratchGPR != m_left.payloadGPR());
+ ASSERT(m_scratchGPR != m_right.payloadGPR());
+#if USE(JSVALUE32_64)
+ ASSERT(m_scratchGPR != m_left.tagGPR());
+ ASSERT(m_scratchGPR != m_right.tagGPR());
+ ASSERT(m_scratchFPR != InvalidFPRReg);
+#endif
+
+ CCallHelpers::Jump leftNotInt = jit.branchIfNotInt32(m_left);
+ CCallHelpers::Jump rightNotInt = jit.branchIfNotInt32(m_right);
+
+ jit.move(m_left.payloadGPR(), m_scratchGPR);
+ slowPathJumpList.append(jit.branchSub32(CCallHelpers::Overflow, m_right.payloadGPR(), m_scratchGPR));
+
+ jit.boxInt32(m_scratchGPR, m_result);
+
+ endJumpList.append(jit.jump());
+
+ if (!jit.supportsFloatingPoint()) {
+ slowPathJumpList.append(leftNotInt);
+ slowPathJumpList.append(rightNotInt);
+ return true;
+ }
+
+ leftNotInt.link(&jit);
+ if (!m_leftOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
+ if (!m_rightOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
+ CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
+
+ jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
+ CCallHelpers::Jump rightWasInteger = jit.jump();
+
+ rightNotInt.link(&jit);
+ if (!m_rightOperand.definitelyIsNumber())
+ slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
+
+ jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
+
+ rightIsDouble.link(&jit);
+ jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
+
+ rightWasInteger.link(&jit);
+
+ jit.subDouble(m_rightFPR, m_leftFPR);
+ if (arithProfile && shouldEmitProfiling)
+ arithProfile->emitSetDouble(jit);
+
+ jit.boxDouble(m_leftFPR, m_result);
+
+ return true;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITSubGenerator.h b/Source/JavaScriptCore/jit/JITSubGenerator.h
new file mode 100644
index 000000000..561f6334a
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITSubGenerator.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "JITMathICInlineResult.h"
+#include "SnippetOperand.h"
+
+namespace JSC {
+
+struct MathICGenerationState;
+
+class JITSubGenerator {
+public:
+ JITSubGenerator() { }
+
+ JITSubGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
+ JSValueRegs result, JSValueRegs left, JSValueRegs right,
+ FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR)
+ : m_leftOperand(leftOperand)
+ , m_rightOperand(rightOperand)
+ , m_result(result)
+ , m_left(left)
+ , m_right(right)
+ , m_leftFPR(leftFPR)
+ , m_rightFPR(rightFPR)
+ , m_scratchGPR(scratchGPR)
+ , m_scratchFPR(scratchFPR)
+ { }
+
+ JITMathICInlineResult generateInline(CCallHelpers&, MathICGenerationState&, const ArithProfile*);
+ bool generateFastPath(CCallHelpers&, CCallHelpers::JumpList& endJumpList, CCallHelpers::JumpList& slowPathJumpList, const ArithProfile*, bool shouldEmitProfiling);
+
+ static bool isLeftOperandValidConstant(SnippetOperand) { return false; }
+ static bool isRightOperandValidConstant(SnippetOperand) { return false; }
+
+private:
+ SnippetOperand m_leftOperand;
+ SnippetOperand m_rightOperand;
+ JSValueRegs m_result;
+ JSValueRegs m_left;
+ JSValueRegs m_right;
+ FPRReg m_leftFPR;
+ FPRReg m_rightFPR;
+ GPRReg m_scratchGPR;
+ FPRReg m_scratchFPR;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITThunks.cpp b/Source/JavaScriptCore/jit/JITThunks.cpp
index 4c48163e9..de48e62dd 100644
--- a/Source/JavaScriptCore/jit/JITThunks.cpp
+++ b/Source/JavaScriptCore/jit/JITThunks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,15 +28,15 @@
#if ENABLE(JIT)
-#include "Executable.h"
#include "JIT.h"
+#include "JSCInlines.h"
+#include "LLIntData.h"
#include "VM.h"
-#include "Operations.h"
namespace JSC {
JITThunks::JITThunks()
- : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap))
+ : m_hostFunctionStubMap(std::make_unique<HostFunctionStubMap>())
{
}
@@ -46,24 +46,33 @@ JITThunks::~JITThunks()
MacroAssemblerCodePtr JITThunks::ctiNativeCall(VM* vm)
{
-#if ENABLE(LLINT)
if (!vm->canUseJIT())
return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline);
-#endif
return ctiStub(vm, nativeCallGenerator).code();
}
+
MacroAssemblerCodePtr JITThunks::ctiNativeConstruct(VM* vm)
{
-#if ENABLE(LLINT)
if (!vm->canUseJIT())
return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
-#endif
return ctiStub(vm, nativeConstructGenerator).code();
}
+MacroAssemblerCodePtr JITThunks::ctiNativeTailCall(VM* vm)
+{
+ ASSERT(vm->canUseJIT());
+ return ctiStub(vm, nativeTailCallGenerator).code();
+}
+
+MacroAssemblerCodePtr JITThunks::ctiNativeTailCallWithoutSavedTags(VM* vm)
+{
+ ASSERT(vm->canUseJIT());
+ return ctiStub(vm, nativeTailCallWithoutSavedTagsGenerator).code();
+}
+
MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
{
- Locker locker(m_lock);
+ LockHolder locker(m_lock);
CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
if (entry.isNewEntry) {
// Compilation thread can only retrieve existing entries.
@@ -73,42 +82,47 @@ MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
return entry.iterator->value;
}
-NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor)
+void JITThunks::finalize(Handle<Unknown> handle, void*)
{
- ASSERT(!isCompilationThread());
-
- if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, constructor)))
- return nativeExecutable;
+ auto* nativeExecutable = static_cast<NativeExecutable*>(handle.get().asCell());
+ weakRemove(*m_hostFunctionStubMap, std::make_tuple(nativeExecutable->function(), nativeExecutable->constructor(), nativeExecutable->name()), nativeExecutable);
+}
- NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, JIT::compileCTINativeCall(vm, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), constructor, NoIntrinsic);
- weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), Weak<NativeExecutable>(nativeExecutable));
- return nativeExecutable;
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor, const String& name)
+{
+ return hostFunctionStub(vm, function, constructor, nullptr, NoIntrinsic, nullptr, name);
}
-NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor, ThunkGenerator generator, Intrinsic intrinsic, const DOMJIT::Signature* signature, const String& name)
{
ASSERT(!isCompilationThread());
+ ASSERT(vm->canUseJIT());
- if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, &callHostFunctionAsConstructor)))
+ if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_tuple(function, constructor, name)))
return nativeExecutable;
- MacroAssemblerCodeRef code;
+ RefPtr<JITCode> forCall;
if (generator) {
- if (vm->canUseJIT())
- code = generator(vm);
- else
- code = MacroAssemblerCodeRef();
+ MacroAssemblerCodeRef entry = generator(vm);
+ forCall = adoptRef(new DirectJITCode(entry, entry.code(), JITCode::HostCallThunk));
} else
- code = JIT::compileCTINativeCall(vm, function);
-
- NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), callHostFunctionAsConstructor, intrinsic);
- weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), Weak<NativeExecutable>(nativeExecutable));
+ forCall = adoptRef(new NativeJITCode(JIT::compileCTINativeCall(vm, function), JITCode::HostCallThunk));
+
+ Ref<JITCode> forConstruct = adoptRef(*new NativeJITCode(MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), JITCode::HostCallThunk));
+
+ NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, forCall.releaseNonNull(), function, WTFMove(forConstruct), constructor, intrinsic, signature, name);
+ weakAdd(*m_hostFunctionStubMap, std::make_tuple(function, constructor, name), Weak<NativeExecutable>(nativeExecutable, this));
return nativeExecutable;
}
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic, const String& name)
+{
+ return hostFunctionStub(vm, function, callHostFunctionAsConstructor, generator, intrinsic, nullptr, name);
+}
+
void JITThunks::clearHostFunctionStubs()
{
- m_hostFunctionStubMap.clear();
+ m_hostFunctionStubMap = nullptr;
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITThunks.h b/Source/JavaScriptCore/jit/JITThunks.h
index 97e7ecd6b..addcf230c 100644
--- a/Source/JavaScriptCore/jit/JITThunks.h
+++ b/Source/JavaScriptCore/jit/JITThunks.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,60 +23,90 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITThunks_h
-#define JITThunks_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
#include "CallData.h"
#include "Intrinsic.h"
-#include "LowLevelInterpreter.h"
#include "MacroAssemblerCodeRef.h"
#include "ThunkGenerator.h"
#include "Weak.h"
-#include "WeakInlines.h"
+#include "WeakHandleOwner.h"
+#include <tuple>
#include <wtf/HashMap.h>
-#include <wtf/OwnPtr.h>
-#include <wtf/RefPtr.h>
#include <wtf/ThreadingPrimitives.h>
+#include <wtf/text/StringHash.h>
namespace JSC {
+namespace DOMJIT {
+class Signature;
+}
class VM;
class NativeExecutable;
-class JITThunks {
+class JITThunks final : private WeakHandleOwner {
+ WTF_MAKE_FAST_ALLOCATED;
public:
JITThunks();
- ~JITThunks();
+ virtual ~JITThunks();
MacroAssemblerCodePtr ctiNativeCall(VM*);
MacroAssemblerCodePtr ctiNativeConstruct(VM*);
+ MacroAssemblerCodePtr ctiNativeTailCall(VM*);
+ MacroAssemblerCodePtr ctiNativeTailCallWithoutSavedTags(VM*);
MacroAssemblerCodeRef ctiStub(VM*, ThunkGenerator);
- NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor);
- NativeExecutable* hostFunctionStub(VM*, NativeFunction, ThunkGenerator, Intrinsic);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor, const String& name);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor, ThunkGenerator, Intrinsic, const DOMJIT::Signature*, const String& name);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, ThunkGenerator, Intrinsic, const String& name);
void clearHostFunctionStubs();
private:
- // Main thread can hold this lock for a while, so use an adaptive mutex.
- typedef Mutex Lock;
- typedef MutexLocker Locker;
+ void finalize(Handle<Unknown>, void* context) override;
typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
CTIStubMap m_ctiStubMap;
- typedef HashMap<std::pair<NativeFunction, NativeFunction>, Weak<NativeExecutable>> HostFunctionStubMap;
- OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
+
+ typedef std::tuple<NativeFunction, NativeFunction, String> HostFunctionKey;
+
+ struct HostFunctionHash {
+ static unsigned hash(const HostFunctionKey& key)
+ {
+ unsigned hash = WTF::pairIntHash(hashPointer(std::get<0>(key)), hashPointer(std::get<1>(key)));
+ if (!std::get<2>(key).isNull())
+ hash = WTF::pairIntHash(hash, DefaultHash<String>::Hash::hash(std::get<2>(key)));
+ return hash;
+ }
+ static bool equal(const HostFunctionKey& a, const HostFunctionKey& b)
+ {
+ return (std::get<0>(a) == std::get<0>(b)) && (std::get<1>(a) == std::get<1>(b)) && (std::get<2>(a) == std::get<2>(b));
+ }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+
+ private:
+ static inline unsigned hashPointer(NativeFunction p)
+ {
+ return DefaultHash<NativeFunction>::Hash::hash(p);
+ }
+ };
+
+ struct HostFunctionHashTrait : WTF::GenericHashTraits<HostFunctionKey> {
+ static const bool emptyValueIsZero = true;
+ static EmptyValueType emptyValue() { return std::make_tuple(nullptr, nullptr, String()); }
+
+ static void constructDeletedValue(HostFunctionKey& slot) { std::get<0>(slot) = reinterpret_cast<NativeFunction>(-1); }
+ static bool isDeletedValue(const HostFunctionKey& value) { return std::get<0>(value) == reinterpret_cast<NativeFunction>(-1); }
+ };
+
+ typedef HashMap<HostFunctionKey, Weak<NativeExecutable>, HostFunctionHash, HostFunctionHashTrait> HostFunctionStubMap;
+ std::unique_ptr<HostFunctionStubMap> m_hostFunctionStubMap;
Lock m_lock;
};
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JITThunks_h
-
diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
index c83125da4..471af9b28 100644
--- a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
@@ -29,21 +29,22 @@
#if ENABLE(DFG_JIT)
#include "CodeBlock.h"
-#include "Executable.h"
+#include "JSCInlines.h"
namespace JSC {
JITToDFGDeferredCompilationCallback::JITToDFGDeferredCompilationCallback() { }
JITToDFGDeferredCompilationCallback::~JITToDFGDeferredCompilationCallback() { }
-PassRefPtr<JITToDFGDeferredCompilationCallback> JITToDFGDeferredCompilationCallback::create()
+Ref<JITToDFGDeferredCompilationCallback> JITToDFGDeferredCompilationCallback::create()
{
- return adoptRef(new JITToDFGDeferredCompilationCallback());
+ return adoptRef(*new JITToDFGDeferredCompilationCallback());
}
void JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously(
- CodeBlock* codeBlock)
+ CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock)
{
+ ASSERT_UNUSED(profiledDFGCodeBlock, !profiledDFGCodeBlock);
ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
if (Options::verboseOSR())
@@ -53,17 +54,20 @@ void JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronousl
}
void JITToDFGDeferredCompilationCallback::compilationDidComplete(
- CodeBlock* codeBlock, CompilationResult result)
+ CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result)
{
+ ASSERT(!profiledDFGCodeBlock);
ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
if (Options::verboseOSR())
dataLog("Optimizing compilation of ", *codeBlock, " result: ", result, "\n");
if (result == CompilationSuccessful)
- codeBlock->install();
+ codeBlock->ownerScriptExecutable()->installCode(codeBlock);
codeBlock->alternative()->setOptimizationThresholdBasedOnCompilationResult(result);
+
+ DeferredCompilationCallback::compilationDidComplete(codeBlock, profiledDFGCodeBlock, result);
}
} // JSC
diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
index cf1c0770c..ec53d7597 100644
--- a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
+++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
@@ -23,15 +23,11 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITToDFGDeferredCompilationCallback_h
-#define JITToDFGDeferredCompilationCallback_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(DFG_JIT)
#include "DeferredCompilationCallback.h"
-#include <wtf/PassRefPtr.h>
namespace JSC {
@@ -44,15 +40,12 @@ protected:
public:
virtual ~JITToDFGDeferredCompilationCallback();
- static PassRefPtr<JITToDFGDeferredCompilationCallback> create();
+ static Ref<JITToDFGDeferredCompilationCallback> create();
- virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*) override;
- virtual void compilationDidComplete(CodeBlock*, CompilationResult) override;
+ void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) override;
+ void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult) override;
};
} // namespace JSC
#endif // ENABLE(DFG_JIT)
-
-#endif // JITToDFGDeferredCompilationCallback_h
-
diff --git a/Source/JavaScriptCore/jit/JITWorklist.cpp b/Source/JavaScriptCore/jit/JITWorklist.cpp
new file mode 100644
index 000000000..f64596560
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITWorklist.cpp
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "JITWorklist.h"
+
+#if ENABLE(JIT)
+
+#include "JIT.h"
+#include "JSCInlines.h"
+#include "VMInlines.h"
+
+namespace JSC {
+
+class JITWorklist::Plan : public ThreadSafeRefCounted<JITWorklist::Plan> {
+public:
+ Plan(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
+ : m_codeBlock(codeBlock)
+ , m_jit(codeBlock->vm(), codeBlock, loopOSREntryBytecodeOffset)
+ {
+ m_jit.doMainThreadPreparationBeforeCompile();
+ }
+
+ void compileInThread()
+ {
+ m_jit.compileWithoutLinking(JITCompilationCanFail);
+
+ LockHolder locker(m_lock);
+ m_isFinishedCompiling = true;
+ }
+
+ void finalize()
+ {
+ CompilationResult result = m_jit.link();
+ switch (result) {
+ case CompilationFailed:
+ CODEBLOCK_LOG_EVENT(m_codeBlock, "delayJITCompile", ("compilation failed"));
+ if (Options::verboseOSR())
+ dataLogF(" JIT compilation failed.\n");
+ m_codeBlock->dontJITAnytimeSoon();
+ m_codeBlock->m_didFailJITCompilation = true;
+ return;
+ case CompilationSuccessful:
+ if (Options::verboseOSR())
+ dataLogF(" JIT compilation successful.\n");
+ m_codeBlock->ownerScriptExecutable()->installCode(m_codeBlock);
+ m_codeBlock->jitSoon();
+ return;
+ default:
+ RELEASE_ASSERT_NOT_REACHED();
+ return;
+ }
+ }
+
+ CodeBlock* codeBlock() { return m_codeBlock; }
+ VM* vm() { return m_codeBlock->vm(); }
+
+ bool isFinishedCompiling()
+ {
+ LockHolder locker(m_lock);
+ return m_isFinishedCompiling;
+ }
+
+ static void compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
+ {
+ Plan plan(codeBlock, loopOSREntryBytecodeOffset);
+ plan.compileInThread();
+ plan.finalize();
+ }
+
+private:
+ CodeBlock* m_codeBlock;
+ JIT m_jit;
+ Lock m_lock;
+ bool m_isFinishedCompiling { false };
+};
+
+class JITWorklist::Thread : public AutomaticThread {
+public:
+ Thread(const AbstractLocker& locker, JITWorklist& worklist)
+ : AutomaticThread(locker, worklist.m_lock, worklist.m_condition)
+ , m_worklist(worklist)
+ {
+ m_worklist.m_numAvailableThreads++;
+ }
+
+protected:
+ PollResult poll(const AbstractLocker&) override
+ {
+ RELEASE_ASSERT(m_worklist.m_numAvailableThreads);
+
+ if (m_worklist.m_queue.isEmpty())
+ return PollResult::Wait;
+
+ m_myPlans = WTFMove(m_worklist.m_queue);
+ m_worklist.m_numAvailableThreads--;
+ return PollResult::Work;
+ }
+
+ WorkResult work() override
+ {
+ RELEASE_ASSERT(!m_myPlans.isEmpty());
+
+ for (RefPtr<Plan>& plan : m_myPlans) {
+ plan->compileInThread();
+ plan = nullptr;
+
+ // Make sure that the main thread realizes that we just compiled something. Notifying
+ // a condition is basically free if nobody is waiting.
+ LockHolder locker(*m_worklist.m_lock);
+ m_worklist.m_condition->notifyAll(locker);
+ }
+
+ m_myPlans.clear();
+
+ LockHolder locker(*m_worklist.m_lock);
+ m_worklist.m_numAvailableThreads++;
+ return WorkResult::Continue;
+ }
+
+private:
+ JITWorklist& m_worklist;
+ Plans m_myPlans;
+};
+
+JITWorklist::JITWorklist()
+ : m_lock(Box<Lock>::create())
+ , m_condition(AutomaticThreadCondition::create())
+{
+ LockHolder locker(*m_lock);
+ m_thread = new Thread(locker, *this);
+}
+
+JITWorklist::~JITWorklist()
+{
+ UNREACHABLE_FOR_PLATFORM();
+}
+
+bool JITWorklist::completeAllForVM(VM& vm)
+{
+ bool result = false;
+ DeferGC deferGC(vm.heap);
+ for (;;) {
+ Vector<RefPtr<Plan>, 32> myPlans;
+ {
+ LockHolder locker(*m_lock);
+ for (;;) {
+ bool didFindUnfinishedPlan = false;
+ m_plans.removeAllMatching(
+ [&] (RefPtr<Plan>& plan) {
+ if (plan->vm() != &vm)
+ return false;
+ if (!plan->isFinishedCompiling()) {
+ didFindUnfinishedPlan = true;
+ return false;
+ }
+ myPlans.append(WTFMove(plan));
+ return true;
+ });
+
+ // If we found plans then we should finalize them now.
+ if (!myPlans.isEmpty())
+ break;
+
+ // If we don't find plans, then we're either done or we need to wait, depending on
+ // whether we found some unfinished plans.
+ if (!didFindUnfinishedPlan)
+ return result;
+
+ m_condition->wait(*m_lock);
+ }
+ }
+
+ RELEASE_ASSERT(!myPlans.isEmpty());
+ result = true;
+ finalizePlans(myPlans);
+ }
+}
+
+void JITWorklist::poll(VM& vm)
+{
+ DeferGC deferGC(vm.heap);
+ Plans myPlans;
+ {
+ LockHolder locker(*m_lock);
+ m_plans.removeAllMatching(
+ [&] (RefPtr<Plan>& plan) {
+ if (plan->vm() != &vm)
+ return false;
+ if (!plan->isFinishedCompiling())
+ return false;
+ myPlans.append(WTFMove(plan));
+ return true;
+ });
+ }
+
+ finalizePlans(myPlans);
+}
+
+void JITWorklist::compileLater(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
+{
+ DeferGC deferGC(codeBlock->vm()->heap);
+ RELEASE_ASSERT(codeBlock->jitType() == JITCode::InterpreterThunk);
+
+ if (codeBlock->m_didFailJITCompilation) {
+ codeBlock->dontJITAnytimeSoon();
+ return;
+ }
+
+ if (!Options::useConcurrentJIT()) {
+ Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset);
+ return;
+ }
+
+ codeBlock->jitSoon();
+
+ {
+ LockHolder locker(*m_lock);
+
+ if (m_planned.contains(codeBlock))
+ return;
+
+ if (m_numAvailableThreads) {
+ m_planned.add(codeBlock);
+ RefPtr<Plan> plan = adoptRef(new Plan(codeBlock, loopOSREntryBytecodeOffset));
+ m_plans.append(plan);
+ m_queue.append(plan);
+ m_condition->notifyAll(locker);
+ return;
+ }
+ }
+
+ // Compiling on the main thread if the helper thread isn't available is a defense against this
+ // pathology:
+ //
+ // 1) Do something that is allowed to take a while, like load a giant piece of initialization
+ // code. This plans the compile of the init code, but doesn't finish it. It will take a
+ // while.
+ //
+ // 2) Do something that is supposed to be quick. Now all baseline compiles, and so all DFG and
+ // FTL compiles, of everything is blocked on the long-running baseline compile of that
+ // initialization code.
+ //
+ // The single-threaded concurrent JIT has this tendency to convoy everything while at the same
+ // time postponing when it happens, which means that the convoy delays are less predictable.
+ // This works around the issue. If the concurrent JIT thread is convoyed, we revert to main
+ // thread compiles. This is probably not as good as if we had multiple JIT threads. Maybe we
+ // can do that someday.
+ Plan::compileNow(codeBlock, loopOSREntryBytecodeOffset);
+}
+
+void JITWorklist::compileNow(CodeBlock* codeBlock, unsigned loopOSREntryBytecodeOffset)
+{
+ DeferGC deferGC(codeBlock->vm()->heap);
+ if (codeBlock->jitType() != JITCode::InterpreterThunk)
+ return;
+
+ bool isPlanned;
+ {
+ LockHolder locker(*m_lock);
+ isPlanned = m_planned.contains(codeBlock);
+ }
+
+ if (isPlanned) {
+ RELEASE_ASSERT(Options::useConcurrentJIT());
+ // This is expensive, but probably good enough.
+ completeAllForVM(*codeBlock->vm());
+ }
+
+ // Now it might be compiled!
+ if (codeBlock->jitType() != JITCode::InterpreterThunk)
+ return;
+
+ // We do this in case we had previously attempted, and then failed, to compile with the
+ // baseline JIT.
+ codeBlock->resetJITData();
+
+ // OK, just compile it.
+ JIT::compile(codeBlock->vm(), codeBlock, JITCompilationMustSucceed, loopOSREntryBytecodeOffset);
+ codeBlock->ownerScriptExecutable()->installCode(codeBlock);
+}
+
+void JITWorklist::finalizePlans(Plans& myPlans)
+{
+ for (RefPtr<Plan>& plan : myPlans) {
+ plan->finalize();
+
+ LockHolder locker(*m_lock);
+ m_planned.remove(plan->codeBlock());
+ }
+}
+
+JITWorklist* JITWorklist::instance()
+{
+ static JITWorklist* worklist;
+ static std::once_flag once;
+ std::call_once(
+ once,
+ [] {
+ worklist = new JITWorklist();
+ });
+ return worklist;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/JITWorklist.h b/Source/JavaScriptCore/jit/JITWorklist.h
new file mode 100644
index 000000000..7ea1c1e91
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITWorklist.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include <wtf/AutomaticThread.h>
+#include <wtf/FastMalloc.h>
+#include <wtf/HashSet.h>
+#include <wtf/Lock.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/RefPtr.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CodeBlock;
+class VM;
+
+class JITWorklist {
+ WTF_MAKE_NONCOPYABLE(JITWorklist);
+ WTF_MAKE_FAST_ALLOCATED;
+
+ class Plan;
+ typedef Vector<RefPtr<Plan>, 32> Plans;
+
+public:
+ ~JITWorklist();
+
+ bool completeAllForVM(VM&); // Return true if any JIT work happened.
+ void poll(VM&);
+
+ void compileLater(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0);
+
+ void compileNow(CodeBlock*, unsigned loopOSREntryBytecodeOffset = 0);
+
+ static JITWorklist* instance();
+
+private:
+ JITWorklist();
+
+ class Thread;
+ friend class Thread;
+
+ void finalizePlans(Plans&);
+
+ Plans m_queue;
+ Plans m_plans;
+ HashSet<CodeBlock*> m_planned;
+
+ Box<Lock> m_lock;
+ RefPtr<AutomaticThreadCondition> m_condition; // We use One True Condition for everything because that's easier.
+ RefPtr<AutomaticThread> m_thread;
+
+ unsigned m_numAvailableThreads { 0 };
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITWriteBarrier.h b/Source/JavaScriptCore/jit/JITWriteBarrier.h
deleted file mode 100644
index ca2ca6eb2..000000000
--- a/Source/JavaScriptCore/jit/JITWriteBarrier.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
- * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
- * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
- * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
- * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
- * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
- * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
- * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
- * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
- * THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITWriteBarrier_h
-#define JITWriteBarrier_h
-
-#if ENABLE(JIT)
-
-#include "MacroAssembler.h"
-#include "SlotVisitor.h"
-#include "UnusedPointer.h"
-#include "WriteBarrier.h"
-
-namespace JSC {
-
-class JSCell;
-class VM;
-
-// Needs to be even to appease some of the backends.
-#define JITWriteBarrierFlag ((void*)2)
-class JITWriteBarrierBase {
-public:
- typedef void* (JITWriteBarrierBase::*UnspecifiedBoolType);
- operator UnspecifiedBoolType*() const { return get() ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
- bool operator!() const { return !get(); }
-
- void setFlagOnBarrier()
- {
- ASSERT(!m_location);
- m_location = CodeLocationDataLabelPtr(JITWriteBarrierFlag);
- }
-
- bool isFlagged() const
- {
- return !!m_location;
- }
-
- void setLocation(CodeLocationDataLabelPtr location)
- {
- ASSERT(!m_location);
- m_location = location;
- }
-
- CodeLocationDataLabelPtr location() const
- {
- ASSERT((!!m_location) && m_location.executableAddress() != JITWriteBarrierFlag);
- return m_location;
- }
-
- void clear() { clear(0); }
- void clearToUnusedPointer() { clear(reinterpret_cast<void*>(unusedPointer)); }
-
-protected:
- JITWriteBarrierBase()
- {
- }
-
- void set(VM&, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
- {
- Heap::writeBarrier(owner, value);
- m_location = location;
- ASSERT(((!!m_location) && m_location.executableAddress() != JITWriteBarrierFlag) || (location.executableAddress() == m_location.executableAddress()));
- MacroAssembler::repatchPointer(m_location, value);
- ASSERT(get() == value);
- }
-
- JSCell* get() const
- {
- if (!m_location || m_location.executableAddress() == JITWriteBarrierFlag)
- return 0;
- void* result = static_cast<JSCell*>(MacroAssembler::readPointer(m_location));
- if (result == reinterpret_cast<void*>(unusedPointer))
- return 0;
- return static_cast<JSCell*>(result);
- }
-
-private:
- void clear(void* clearedValue)
- {
- if (!m_location)
- return;
- if (m_location.executableAddress() != JITWriteBarrierFlag)
- MacroAssembler::repatchPointer(m_location, clearedValue);
- }
-
- CodeLocationDataLabelPtr m_location;
-};
-
-#undef JITWriteBarrierFlag
-
-template <typename T> class JITWriteBarrier : public JITWriteBarrierBase {
-public:
- JITWriteBarrier()
- {
- }
-
- void set(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, T* value)
- {
- validateCell(owner);
- validateCell(value);
- JITWriteBarrierBase::set(vm, location, owner, value);
- }
- void set(VM& vm, JSCell* owner, T* value)
- {
- set(vm, location(), owner, value);
- }
- T* get() const
- {
- T* result = static_cast<T*>(JITWriteBarrierBase::get());
- if (result)
- validateCell(result);
- return result;
- }
-};
-
-template<typename T> inline void SlotVisitor::append(JITWriteBarrier<T>* slot)
-{
- internalAppend(0, slot->get());
-}
-
-}
-
-#endif // ENABLE(JIT)
-
-#endif
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index ac1ab7965..dc0cc1a4f 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,8 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JSInterfaceJIT_h
-#define JSInterfaceJIT_h
+#pragma once
#include "BytecodeConventions.h"
#include "CCallHelpers.h"
@@ -32,12 +31,9 @@
#include "GPRInfo.h"
#include "JITCode.h"
#include "JITOperations.h"
-#include "JITStubs.h"
#include "JSCJSValue.h"
-#include "JSStack.h"
#include "JSString.h"
#include "MacroAssembler.h"
-#include <wtf/Vector.h>
#if ENABLE(JIT)
@@ -50,9 +46,7 @@ namespace JSC {
}
#if USE(JSVALUE32_64)
- // Can't just propogate JSValue::Int32Tag as visual studio doesn't like it
- static const unsigned Int32Tag = 0xffffffff;
- COMPILE_ASSERT(Int32Tag == JSValue::Int32Tag, Int32Tag_out_of_sync);
+ static const unsigned Int32Tag = static_cast<unsigned>(JSValue::Int32Tag);
#else
static const unsigned Int32Tag = static_cast<unsigned>(TagTypeNumber >> 32);
#endif
@@ -67,18 +61,17 @@ namespace JSC {
#if USE(JSVALUE64)
Jump emitJumpIfNotJSCell(RegisterID);
- Jump emitJumpIfImmediateNumber(RegisterID reg);
- Jump emitJumpIfNotImmediateNumber(RegisterID reg);
- void emitFastArithImmToInt(RegisterID reg);
- void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
+ Jump emitJumpIfNumber(RegisterID);
+ Jump emitJumpIfNotNumber(RegisterID);
+ void emitTagInt(RegisterID src, RegisterID dest);
#endif
- Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType);
+ Jump emitJumpIfNotType(RegisterID baseReg, JSType);
- void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
- void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
- void emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+ void emitGetFromCallFrameHeaderPtr(int entry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitPutToCallFrameHeader(RegisterID from, int entry);
+ void emitPutToCallFrameHeader(void* value, int entry);
+ void emitPutCellToCallFrameHeader(RegisterID from, int entry);
inline Address payloadFor(int index, RegisterID base = callFrameRegister);
inline Address intPayloadFor(int index, RegisterID base = callFrameRegister);
@@ -156,11 +149,11 @@ namespace JSC {
return branchTest64(NonZero, reg, tagMaskRegister);
}
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNumber(RegisterID reg)
{
return branchTest64(NonZero, reg, tagTypeNumberRegister);
}
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotNumber(RegisterID reg)
{
return branchTest64(Zero, reg, tagTypeNumberRegister);
}
@@ -181,7 +174,7 @@ namespace JSC {
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
{
load64(addressFor(virtualRegisterIndex), scratch);
- Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
+ Jump notNumber = emitJumpIfNotNumber(scratch);
Jump notInt = branch64(Below, scratch, tagTypeNumberRegister);
convertInt32ToDouble(scratch, dst);
Jump done = jump();
@@ -192,12 +185,8 @@ namespace JSC {
return notNumber;
}
- ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
- {
- }
-
// operand is int32_t, must have been zero-extended if register is 64-bit.
- ALWAYS_INLINE void JSInterfaceJIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
+ ALWAYS_INLINE void JSInterfaceJIT::emitTagInt(RegisterID src, RegisterID dest)
{
if (src != dest)
move(src, dest);
@@ -224,18 +213,17 @@ namespace JSC {
}
#endif
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, JSType type)
{
- loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
- return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
+ return branch8(NotEqual, Address(baseReg, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
}
- ALWAYS_INLINE void JSInterfaceJIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+ ALWAYS_INLINE void JSInterfaceJIT::emitGetFromCallFrameHeaderPtr(int entry, RegisterID to, RegisterID from)
{
loadPtr(Address(from, entry * sizeof(Register)), to);
}
- ALWAYS_INLINE void JSInterfaceJIT::emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutToCallFrameHeader(RegisterID from, int entry)
{
#if USE(JSVALUE32_64)
storePtr(from, payloadFor(entry, callFrameRegister));
@@ -244,12 +232,12 @@ namespace JSC {
#endif
}
- ALWAYS_INLINE void JSInterfaceJIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutToCallFrameHeader(void* value, int entry)
{
storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
}
- ALWAYS_INLINE void JSInterfaceJIT::emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry entry)
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutCellToCallFrameHeader(RegisterID from, int entry)
{
#if USE(JSVALUE32_64)
store32(TrustedImm32(JSValue::CellTag), tagFor(entry, callFrameRegister));
@@ -268,5 +256,3 @@ namespace JSC {
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // JSInterfaceJIT_h
diff --git a/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp b/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp
new file mode 100644
index 000000000..410009ab0
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp
@@ -0,0 +1,306 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PCToCodeOriginMap.h"
+
+#if ENABLE(JIT)
+
+#include "B3PCToOriginMap.h"
+#include "DFGNode.h"
+#include "LinkBuffer.h"
+
+#if COMPILER(MSVC)
+// See https://msdn.microsoft.com/en-us/library/4wz07268.aspx
+#pragma warning(disable: 4333)
+#endif
+
+namespace JSC {
+
+namespace {
+
+class DeltaCompressionBuilder {
+public:
+ DeltaCompressionBuilder(size_t maxSize)
+ : m_offset(0)
+ , m_maxSize(maxSize)
+ {
+ m_buffer = static_cast<uint8_t*>(fastMalloc(m_maxSize));
+ }
+
+ template <typename T>
+ void write(T item)
+ {
+ RELEASE_ASSERT(m_offset + sizeof(T) <= m_maxSize);
+ static const uint8_t mask = std::numeric_limits<uint8_t>::max();
+ for (unsigned i = 0; i < sizeof(T); i++) {
+ *(m_buffer + m_offset) = static_cast<uint8_t>(item & mask);
+ item = item >> (sizeof(uint8_t) * 8);
+ m_offset += 1;
+ }
+ }
+
+ uint8_t* m_buffer;
+ size_t m_offset;
+ size_t m_maxSize;
+};
+
+class DeltaCompresseionReader {
+public:
+ DeltaCompresseionReader(uint8_t* buffer, size_t size)
+ : m_buffer(buffer)
+ , m_size(size)
+ , m_offset(0)
+ { }
+
+ template <typename T>
+ T read()
+ {
+ RELEASE_ASSERT(m_offset + sizeof(T) <= m_size);
+ T result = 0;
+ for (unsigned i = 0; i < sizeof(T); i++) {
+ uint8_t bitsAsInt8 = *(m_buffer + m_offset);
+ T bits = static_cast<T>(bitsAsInt8);
+ bits = bits << (sizeof(uint8_t) * 8 * i);
+ result |= bits;
+ m_offset += 1;
+ }
+ return result;
+ }
+
+private:
+ uint8_t* m_buffer;
+ size_t m_size;
+ size_t m_offset;
+};
+
+} // anonymous namespace
+
+PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(VM& vm)
+ : m_vm(vm)
+ , m_shouldBuildMapping(vm.shouldBuilderPCToCodeOriginMapping())
+{ }
+
+PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(PCToCodeOriginMapBuilder&& other)
+ : m_vm(other.m_vm)
+ , m_codeRanges(WTFMove(other.m_codeRanges))
+ , m_shouldBuildMapping(other.m_shouldBuildMapping)
+{ }
+
+#if ENABLE(FTL_JIT)
+PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(VM& vm, B3::PCToOriginMap&& b3PCToOriginMap)
+ : m_vm(vm)
+ , m_shouldBuildMapping(vm.shouldBuilderPCToCodeOriginMapping())
+{
+ if (!m_shouldBuildMapping)
+ return;
+
+ for (const B3::PCToOriginMap::OriginRange& originRange : b3PCToOriginMap.ranges()) {
+ DFG::Node* node = bitwise_cast<DFG::Node*>(originRange.origin.data());
+ if (node)
+ appendItem(originRange.label, node->origin.semantic);
+ else
+ appendItem(originRange.label, PCToCodeOriginMapBuilder::defaultCodeOrigin());
+ }
+}
+#endif
+
+void PCToCodeOriginMapBuilder::appendItem(MacroAssembler::Label label, const CodeOrigin& codeOrigin)
+{
+ if (!m_shouldBuildMapping)
+ return;
+
+ if (m_codeRanges.size()) {
+ if (m_codeRanges.last().end == label)
+ return;
+ m_codeRanges.last().end = label;
+ if (m_codeRanges.last().codeOrigin == codeOrigin || !codeOrigin)
+ return;
+ }
+
+ m_codeRanges.append(CodeRange{label, label, codeOrigin});
+}
+
+
+static const uint8_t sentinelPCDelta = 0;
+static const int8_t sentinelBytecodeDelta = 0;
+
+PCToCodeOriginMap::PCToCodeOriginMap(PCToCodeOriginMapBuilder&& builder, LinkBuffer& linkBuffer)
+{
+ RELEASE_ASSERT(builder.didBuildMapping());
+
+ if (!builder.m_codeRanges.size()) {
+ m_pcRangeStart = std::numeric_limits<uintptr_t>::max();
+ m_pcRangeEnd = std::numeric_limits<uintptr_t>::max();
+
+ m_compressedPCBufferSize = 0;
+ m_compressedPCs = nullptr;
+
+ m_compressedCodeOriginsSize = 0;
+ m_compressedCodeOrigins = nullptr;
+
+ return;
+ }
+
+ // We do a final touch-up on the last range here because of how we generate the table.
+ // The final range (if non empty) would be ignored if we didn't append any (arbitrary)
+ // range as the last item of the vector.
+ PCToCodeOriginMapBuilder::CodeRange& last = builder.m_codeRanges.last();
+ if (!(last.start == last.end))
+ builder.m_codeRanges.append(PCToCodeOriginMapBuilder::CodeRange{ last.end, last.end, last.codeOrigin }); // This range will never actually be found, but it ensures the real last range is found.
+
+ DeltaCompressionBuilder pcCompressor((sizeof(uintptr_t) + sizeof(uint8_t)) * builder.m_codeRanges.size());
+ void* lastPCValue = nullptr;
+ auto buildPCTable = [&] (void* pcValue) {
+ RELEASE_ASSERT(pcValue > lastPCValue);
+ uintptr_t delta = bitwise_cast<uintptr_t>(pcValue) - bitwise_cast<uintptr_t>(lastPCValue);
+ RELEASE_ASSERT(delta != sentinelPCDelta);
+ lastPCValue = pcValue;
+ if (delta > std::numeric_limits<uint8_t>::max()) {
+ pcCompressor.write<uint8_t>(sentinelPCDelta);
+ pcCompressor.write<uintptr_t>(delta);
+ return;
+ }
+
+ pcCompressor.write<uint8_t>(static_cast<uint8_t>(delta));
+ };
+
+ DeltaCompressionBuilder codeOriginCompressor((sizeof(intptr_t) + sizeof(int8_t) + sizeof(int8_t) + sizeof(InlineCallFrame*)) * builder.m_codeRanges.size());
+ CodeOrigin lastCodeOrigin(0, nullptr);
+ auto buildCodeOriginTable = [&] (const CodeOrigin& codeOrigin) {
+ intptr_t delta = static_cast<intptr_t>(codeOrigin.bytecodeIndex) - static_cast<intptr_t>(lastCodeOrigin.bytecodeIndex);
+ lastCodeOrigin = codeOrigin;
+ if (delta > std::numeric_limits<int8_t>::max() || delta < std::numeric_limits<int8_t>::min() || delta == sentinelBytecodeDelta) {
+ codeOriginCompressor.write<int8_t>(sentinelBytecodeDelta);
+ codeOriginCompressor.write<intptr_t>(delta);
+ } else
+ codeOriginCompressor.write<int8_t>(static_cast<int8_t>(delta));
+
+ int8_t hasInlineCallFrameByte = codeOrigin.inlineCallFrame ? 1 : 0;
+ codeOriginCompressor.write<int8_t>(hasInlineCallFrameByte);
+ if (hasInlineCallFrameByte)
+ codeOriginCompressor.write<uintptr_t>(bitwise_cast<uintptr_t>(codeOrigin.inlineCallFrame));
+ };
+
+ m_pcRangeStart = bitwise_cast<uintptr_t>(linkBuffer.locationOf(builder.m_codeRanges.first().start).dataLocation());
+ m_pcRangeEnd = bitwise_cast<uintptr_t>(linkBuffer.locationOf(builder.m_codeRanges.last().end).dataLocation());
+ m_pcRangeEnd -= 1;
+
+ for (unsigned i = 0; i < builder.m_codeRanges.size(); i++) {
+ PCToCodeOriginMapBuilder::CodeRange& codeRange = builder.m_codeRanges[i];
+ void* start = linkBuffer.locationOf(codeRange.start).dataLocation();
+ void* end = linkBuffer.locationOf(codeRange.end).dataLocation();
+ ASSERT(m_pcRangeStart <= bitwise_cast<uintptr_t>(start));
+ ASSERT(m_pcRangeEnd >= bitwise_cast<uintptr_t>(end) - 1);
+ if (start == end)
+ ASSERT(i == builder.m_codeRanges.size() - 1);
+ if (i > 0)
+ ASSERT(linkBuffer.locationOf(builder.m_codeRanges[i - 1].end).dataLocation() == start);
+
+ buildPCTable(start);
+ buildCodeOriginTable(codeRange.codeOrigin);
+ }
+
+ m_compressedPCBufferSize = pcCompressor.m_offset;
+ m_compressedPCs = static_cast<uint8_t*>(fastRealloc(pcCompressor.m_buffer, m_compressedPCBufferSize));
+
+ m_compressedCodeOriginsSize = codeOriginCompressor.m_offset;
+ m_compressedCodeOrigins = static_cast<uint8_t*>(fastRealloc(codeOriginCompressor.m_buffer, m_compressedCodeOriginsSize));
+}
+
+PCToCodeOriginMap::~PCToCodeOriginMap()
+{
+ if (m_compressedPCs)
+ fastFree(m_compressedPCs);
+ if (m_compressedCodeOrigins)
+ fastFree(m_compressedCodeOrigins);
+}
+
+double PCToCodeOriginMap::memorySize()
+{
+ double size = 0;
+ size += m_compressedPCBufferSize;
+ size += m_compressedCodeOriginsSize;
+ return size;
+}
+
+std::optional<CodeOrigin> PCToCodeOriginMap::findPC(void* pc) const
+{
+ uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc);
+ if (!(m_pcRangeStart <= pcAsInt && pcAsInt <= m_pcRangeEnd))
+ return std::nullopt;
+
+ uintptr_t currentPC = 0;
+ CodeOrigin currentCodeOrigin(0, nullptr);
+
+ DeltaCompresseionReader pcReader(m_compressedPCs, m_compressedPCBufferSize);
+ DeltaCompresseionReader codeOriginReader(m_compressedCodeOrigins, m_compressedCodeOriginsSize);
+ while (true) {
+ uintptr_t previousPC = currentPC;
+ {
+ uint8_t value = pcReader.read<uint8_t>();
+ uintptr_t delta;
+ if (value == sentinelPCDelta)
+ delta = pcReader.read<uintptr_t>();
+ else
+ delta = value;
+ currentPC += delta;
+ }
+
+ CodeOrigin previousOrigin = currentCodeOrigin;
+ {
+ int8_t value = codeOriginReader.read<int8_t>();
+ intptr_t delta;
+ if (value == sentinelBytecodeDelta)
+ delta = codeOriginReader.read<intptr_t>();
+ else
+ delta = static_cast<intptr_t>(value);
+
+ currentCodeOrigin.bytecodeIndex = static_cast<unsigned>(static_cast<intptr_t>(currentCodeOrigin.bytecodeIndex) + delta);
+
+ int8_t hasInlineFrame = codeOriginReader.read<int8_t>();
+ ASSERT(hasInlineFrame == 0 || hasInlineFrame == 1);
+ if (hasInlineFrame)
+ currentCodeOrigin.inlineCallFrame = bitwise_cast<InlineCallFrame*>(codeOriginReader.read<uintptr_t>());
+ else
+ currentCodeOrigin.inlineCallFrame = nullptr;
+ }
+
+ if (previousPC) {
+ uintptr_t startOfRange = previousPC;
+ // We subtract 1 because we generate end points inclusively in this table, even though we are interested in ranges of the form: [previousPC, currentPC)
+ uintptr_t endOfRange = currentPC - 1;
+ if (startOfRange <= pcAsInt && pcAsInt <= endOfRange)
+ return std::optional<CodeOrigin>(previousOrigin); // We return previousOrigin here because CodeOrigin's are mapped to the startValue of the range.
+ }
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+ return std::nullopt;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PCToCodeOriginMap.h b/Source/JavaScriptCore/jit/PCToCodeOriginMap.h
new file mode 100644
index 000000000..c01c441a5
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PCToCodeOriginMap.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CodeOrigin.h"
+#include "DFGCommon.h"
+#include "MacroAssembler.h"
+#include "VM.h"
+#include <wtf/Bag.h>
+#include <wtf/Optional.h>
+#include <wtf/RedBlackTree.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+#if ENABLE(FTL_JIT)
+namespace B3 {
+class PCToOriginMap;
+}
+#endif
+
+class LinkBuffer;
+class PCToCodeOriginMapBuilder;
+
+class PCToCodeOriginMapBuilder {
+ WTF_MAKE_NONCOPYABLE(PCToCodeOriginMapBuilder);
+ friend class PCToCodeOriginMap;
+
+public:
+ PCToCodeOriginMapBuilder(VM&);
+ PCToCodeOriginMapBuilder(PCToCodeOriginMapBuilder&& other);
+
+#if ENABLE(FTL_JIT)
+ PCToCodeOriginMapBuilder(VM&, B3::PCToOriginMap&&);
+#endif
+
+ void appendItem(MacroAssembler::Label, const CodeOrigin&);
+ static CodeOrigin defaultCodeOrigin() { return CodeOrigin(0, nullptr); }
+
+ bool didBuildMapping() const { return m_shouldBuildMapping; }
+
+private:
+
+ struct CodeRange {
+ MacroAssembler::Label start;
+ MacroAssembler::Label end;
+ CodeOrigin codeOrigin;
+ };
+
+ VM& m_vm;
+ Vector<CodeRange> m_codeRanges;
+ bool m_shouldBuildMapping;
+};
+
+class PCToCodeOriginMap {
+ WTF_MAKE_NONCOPYABLE(PCToCodeOriginMap);
+public:
+ PCToCodeOriginMap(PCToCodeOriginMapBuilder&&, LinkBuffer&);
+ ~PCToCodeOriginMap();
+
+ std::optional<CodeOrigin> findPC(void* pc) const;
+
+ double memorySize();
+
+private:
+ size_t m_compressedPCBufferSize;
+ size_t m_compressedCodeOriginsSize;
+ uint8_t* m_compressedPCs;
+ uint8_t* m_compressedCodeOrigins;
+ uintptr_t m_pcRangeStart;
+ uintptr_t m_pcRangeEnd;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
new file mode 100644
index 000000000..0b24b02e4
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "PolymorphicCallStubRoutine.h"
+
+#if ENABLE(JIT)
+
+#include "CallLinkInfo.h"
+#include "CodeBlock.h"
+#include "JSCInlines.h"
+#include "LinkBuffer.h"
+
+namespace JSC {
+
+PolymorphicCallNode::~PolymorphicCallNode()
+{
+ if (isOnList())
+ remove();
+}
+
+void PolymorphicCallNode::unlink(VM& vm)
+{
+ if (m_callLinkInfo) {
+ if (Options::dumpDisassembly())
+ dataLog("Unlinking polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n");
+
+ m_callLinkInfo->unlink(vm);
+ }
+
+ if (isOnList())
+ remove();
+}
+
+void PolymorphicCallNode::clearCallLinkInfo()
+{
+ if (Options::dumpDisassembly())
+ dataLog("Clearing call link info for polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n");
+
+ m_callLinkInfo = nullptr;
+}
+
+void PolymorphicCallCase::dump(PrintStream& out) const
+{
+ out.print("<variant = ", m_variant, ", codeBlock = ", pointerDump(m_codeBlock), ">");
+}
+
+PolymorphicCallStubRoutine::PolymorphicCallStubRoutine(
+ const MacroAssemblerCodeRef& codeRef, VM& vm, const JSCell* owner, ExecState* callerFrame,
+ CallLinkInfo& info, const Vector<PolymorphicCallCase>& cases,
+ std::unique_ptr<uint32_t[]> fastCounts)
+ : GCAwareJITStubRoutine(codeRef, vm)
+ , m_fastCounts(WTFMove(fastCounts))
+{
+ for (PolymorphicCallCase callCase : cases) {
+ m_variants.append(WriteBarrier<JSCell>(vm, owner, callCase.variant().rawCalleeCell()));
+ if (shouldDumpDisassemblyFor(callerFrame->codeBlock()))
+ dataLog("Linking polymorphic call in ", *callerFrame->codeBlock(), " at ", callerFrame->codeOrigin(), " to ", callCase.variant(), ", codeBlock = ", pointerDump(callCase.codeBlock()), "\n");
+ if (CodeBlock* codeBlock = callCase.codeBlock())
+ codeBlock->linkIncomingPolymorphicCall(callerFrame, m_callNodes.add(&info));
+ }
+ m_variants.shrinkToFit();
+ WTF::storeStoreFence();
+}
+
+PolymorphicCallStubRoutine::~PolymorphicCallStubRoutine() { }
+
+CallVariantList PolymorphicCallStubRoutine::variants() const
+{
+ CallVariantList result;
+ for (size_t i = 0; i < m_variants.size(); ++i)
+ result.append(CallVariant(m_variants[i].get()));
+ return result;
+}
+
+CallEdgeList PolymorphicCallStubRoutine::edges() const
+{
+ // We wouldn't have these if this was an FTL stub routine. We shouldn't be asking for profiling
+ // from the FTL.
+ RELEASE_ASSERT(m_fastCounts);
+
+ CallEdgeList result;
+ for (size_t i = 0; i < m_variants.size(); ++i)
+ result.append(CallEdge(CallVariant(m_variants[i].get()), m_fastCounts[i]));
+ return result;
+}
+
+void PolymorphicCallStubRoutine::clearCallNodesFor(CallLinkInfo* info)
+{
+ for (Bag<PolymorphicCallNode>::iterator iter = m_callNodes.begin(); !!iter; ++iter) {
+ PolymorphicCallNode& node = **iter;
+ // All nodes should point to info, but okay to be a little paranoid.
+ if (node.hasCallLinkInfo(info))
+ node.clearCallLinkInfo();
+ }
+}
+
+bool PolymorphicCallStubRoutine::visitWeak(VM&)
+{
+ for (auto& variant : m_variants) {
+ if (!Heap::isMarked(variant.get()))
+ return false;
+ }
+ return true;
+}
+
+void PolymorphicCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
+{
+ for (auto& variant : m_variants)
+ visitor.append(variant);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h
new file mode 100644
index 000000000..3bfd2d6c3
--- /dev/null
+++ b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CallEdge.h"
+#include "CallVariant.h"
+#include "CodeOrigin.h"
+#include "GCAwareJITStubRoutine.h"
+#include <wtf/FastMalloc.h>
+#include <wtf/Noncopyable.h>
+#include <wtf/Vector.h>
+
+namespace JSC {
+
+class CallLinkInfo;
+
+class PolymorphicCallNode : public BasicRawSentinelNode<PolymorphicCallNode> {
+ WTF_MAKE_NONCOPYABLE(PolymorphicCallNode);
+public:
+ PolymorphicCallNode(CallLinkInfo* info)
+ : m_callLinkInfo(info)
+ {
+ }
+
+ ~PolymorphicCallNode();
+
+ void unlink(VM&);
+
+ bool hasCallLinkInfo(CallLinkInfo* info) { return m_callLinkInfo == info; }
+ void clearCallLinkInfo();
+
+private:
+ CallLinkInfo* m_callLinkInfo;
+};
+
+class PolymorphicCallCase {
+public:
+ PolymorphicCallCase()
+ : m_codeBlock(nullptr)
+ {
+ }
+
+ PolymorphicCallCase(CallVariant variant, CodeBlock* codeBlock)
+ : m_variant(variant)
+ , m_codeBlock(codeBlock)
+ {
+ }
+
+ CallVariant variant() const { return m_variant; }
+ CodeBlock* codeBlock() const { return m_codeBlock; }
+
+ void dump(PrintStream&) const;
+
+private:
+ CallVariant m_variant;
+ CodeBlock* m_codeBlock;
+};
+
+class PolymorphicCallStubRoutine : public GCAwareJITStubRoutine {
+public:
+ PolymorphicCallStubRoutine(
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner,
+ ExecState* callerFrame, CallLinkInfo&, const Vector<PolymorphicCallCase>&,
+ std::unique_ptr<uint32_t[]> fastCounts);
+
+ virtual ~PolymorphicCallStubRoutine();
+
+ CallVariantList variants() const;
+ CallEdgeList edges() const;
+
+ void clearCallNodesFor(CallLinkInfo*);
+
+ bool visitWeak(VM&) override;
+
+protected:
+ void markRequiredObjectsInternal(SlotVisitor&) override;
+
+private:
+ Vector<WriteBarrier<JSCell>, 2> m_variants;
+ std::unique_ptr<uint32_t[]> m_fastCounts;
+ Bag<PolymorphicCallNode> m_callNodes;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp b/Source/JavaScriptCore/jit/Reg.cpp
index 1588f7fea..4aa965374 100644
--- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/Reg.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,37 +24,32 @@
*/
#include "config.h"
-#include "ClosureCallStubRoutine.h"
+#include "Reg.h"
#if ENABLE(JIT)
-#include "Executable.h"
-#include "Heap.h"
-#include "VM.h"
-#include "Operations.h"
-#include "SlotVisitor.h"
-#include "Structure.h"
+#include "FPRInfo.h"
+#include "GPRInfo.h"
namespace JSC {
-ClosureCallStubRoutine::ClosureCallStubRoutine(
- const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner,
- Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin)
- : GCAwareJITStubRoutine(code, vm, true)
- , m_structure(vm, owner, structure)
- , m_executable(vm, owner, executable)
- , m_codeOrigin(codeOrigin)
+const char* Reg::debugName() const
{
+ if (!*this)
+ return nullptr;
+ if (isGPR())
+ return GPRInfo::debugName(gpr());
+ return FPRInfo::debugName(fpr());
}
-ClosureCallStubRoutine::~ClosureCallStubRoutine()
+void Reg::dump(PrintStream& out) const
{
-}
-
-void ClosureCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
-{
- visitor.append(&m_structure);
- visitor.append(&m_executable);
+ if (!*this)
+ out.print("<none>");
+ else if (isGPR())
+ out.print(gpr());
+ else
+ out.print(fpr());
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/Reg.h b/Source/JavaScriptCore/jit/Reg.h
new file mode 100644
index 000000000..84ae35904
--- /dev/null
+++ b/Source/JavaScriptCore/jit/Reg.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "MacroAssembler.h"
+
+namespace JSC {
+
+// Reg is a polymorphic register class. It can refer to either integer or float registers.
+// Here are some use cases:
+//
+// GPRReg gpr;
+// Reg reg = gpr;
+// reg.isSet() == true
+// reg.isGPR() == true
+// reg.isFPR() == false
+//
+// for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+// if (reg.isGPR()) {
+// } else /* reg.isFPR() */ {
+// }
+// }
+//
+// The above loop could have also used !!reg or reg.isSet() as a condition.
+
+class Reg {
+public:
+ Reg()
+ : m_index(invalid())
+ {
+ }
+
+ Reg(WTF::HashTableDeletedValueType)
+ : m_index(deleted())
+ {
+ }
+
+ Reg(MacroAssembler::RegisterID reg)
+ : m_index(MacroAssembler::registerIndex(reg))
+ {
+ }
+
+ Reg(MacroAssembler::FPRegisterID reg)
+ : m_index(MacroAssembler::registerIndex(reg))
+ {
+ }
+
+ static Reg fromIndex(unsigned index)
+ {
+ Reg result;
+ result.m_index = index;
+ return result;
+ }
+
+ static Reg first()
+ {
+ Reg result;
+ result.m_index = 0;
+ return result;
+ }
+
+ static Reg last()
+ {
+ Reg result;
+ result.m_index = MacroAssembler::numberOfRegisters() + MacroAssembler::numberOfFPRegisters() - 1;
+ return result;
+ }
+
+ Reg next() const
+ {
+ ASSERT(!!*this);
+ if (*this == last())
+ return Reg();
+ Reg result;
+ result.m_index = m_index + 1;
+ return result;
+ }
+
+ unsigned index() const { return m_index; }
+
+ static unsigned maxIndex()
+ {
+ return last().index();
+ }
+
+ bool isSet() const { return m_index != invalid(); }
+ explicit operator bool() const { return isSet(); }
+
+ bool isHashTableDeletedValue() const { return m_index == deleted(); }
+
+ bool isGPR() const
+ {
+ return m_index < MacroAssembler::numberOfRegisters();
+ }
+
+ bool isFPR() const
+ {
+ return (m_index - MacroAssembler::numberOfRegisters()) < MacroAssembler::numberOfFPRegisters();
+ }
+
+ MacroAssembler::RegisterID gpr() const
+ {
+ ASSERT(isGPR());
+ return static_cast<MacroAssembler::RegisterID>(MacroAssembler::firstRegister() + m_index);
+ }
+
+ MacroAssembler::FPRegisterID fpr() const
+ {
+ ASSERT(isFPR());
+ return static_cast<MacroAssembler::FPRegisterID>(
+ MacroAssembler::firstFPRegister() + (m_index - MacroAssembler::numberOfRegisters()));
+ }
+
+ bool operator==(const Reg& other) const
+ {
+ return m_index == other.m_index;
+ }
+
+ bool operator!=(const Reg& other) const
+ {
+ return m_index != other.m_index;
+ }
+
+ bool operator<(const Reg& other) const
+ {
+ return m_index < other.m_index;
+ }
+
+ bool operator>(const Reg& other) const
+ {
+ return m_index > other.m_index;
+ }
+
+ bool operator<=(const Reg& other) const
+ {
+ return m_index <= other.m_index;
+ }
+
+ bool operator>=(const Reg& other) const
+ {
+ return m_index >= other.m_index;
+ }
+
+ unsigned hash() const
+ {
+ return m_index;
+ }
+
+ const char* debugName() const;
+
+ void dump(PrintStream&) const;
+
+ class AllRegsIterable {
+ public:
+
+ class iterator {
+ public:
+ iterator() { }
+
+ explicit iterator(Reg reg)
+ : m_regIndex(reg.index())
+ {
+ }
+
+ Reg operator*() const { return Reg::fromIndex(m_regIndex); }
+
+ iterator& operator++()
+ {
+ m_regIndex = Reg::fromIndex(m_regIndex).next().index();
+ return *this;
+ }
+
+ bool operator==(const iterator& other) const
+ {
+ return m_regIndex == other.m_regIndex;
+ }
+
+ bool operator!=(const iterator& other) const
+ {
+ return !(*this == other);
+ }
+
+ private:
+ unsigned m_regIndex;
+ };
+
+ iterator begin() const { return iterator(Reg::first()); }
+ iterator end() const { return iterator(Reg()); }
+ };
+
+ static AllRegsIterable all() { return AllRegsIterable(); }
+
+private:
+ static uint8_t invalid() { return 0xff; }
+
+ static uint8_t deleted() { return 0xfe; }
+
+ uint8_t m_index;
+};
+
+struct RegHash {
+ static unsigned hash(const Reg& key) { return key.hash(); }
+ static bool equal(const Reg& a, const Reg& b) { return a == b; }
+ static const bool safeToCompareToEmptyOrDeleted = true;
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+template<typename T> struct DefaultHash;
+template<> struct DefaultHash<JSC::Reg> {
+ typedef JSC::RegHash Hash;
+};
+
+template<typename T> struct HashTraits;
+template<> struct HashTraits<JSC::Reg> : SimpleClassHashTraits<JSC::Reg> {
+ static const bool emptyValueIsZero = false;
+ };
+
+} // namespace WTF
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffset.cpp b/Source/JavaScriptCore/jit/RegisterAtOffset.cpp
new file mode 100644
index 000000000..16a639ca8
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterAtOffset.cpp
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RegisterAtOffset.h"
+
+#if ENABLE(JIT)
+
+namespace JSC {
+
+#if !COMPILER(MSVC)
+static_assert(sizeof(RegisterAtOffset) == sizeof(ptrdiff_t), "RegisterAtOffset should be small.");
+#endif
+
+void RegisterAtOffset::dump(PrintStream& out) const
+{
+ out.print(reg(), " at ", offset());
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffset.h b/Source/JavaScriptCore/jit/RegisterAtOffset.h
new file mode 100644
index 000000000..0db8da4a7
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterAtOffset.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "Reg.h"
+#include <wtf/PrintStream.h>
+
+namespace JSC {
+
+class RegisterAtOffset {
+public:
+ RegisterAtOffset()
+ : m_offset(0)
+ {
+ }
+
+ RegisterAtOffset(Reg reg, ptrdiff_t offset)
+ : m_reg(reg)
+ , m_offset(offset)
+ {
+ }
+
+ bool operator!() const { return !m_reg; }
+
+ Reg reg() const { return m_reg; }
+ ptrdiff_t offset() const { return m_offset; }
+ int offsetAsIndex() const { return offset() / sizeof(void*); }
+
+ bool operator==(const RegisterAtOffset& other) const
+ {
+ return reg() == other.reg() && offset() == other.offset();
+ }
+
+ bool operator<(const RegisterAtOffset& other) const
+ {
+ if (reg() != other.reg())
+ return reg() < other.reg();
+ return offset() < other.offset();
+ }
+
+ static Reg getReg(RegisterAtOffset* value) { return value->reg(); }
+
+ void dump(PrintStream& out) const;
+
+private:
+ Reg m_reg;
+ ptrdiff_t m_offset : sizeof(ptrdiff_t) * 8 - sizeof(Reg) * 8;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp b/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp
new file mode 100644
index 000000000..dd5b5b32f
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "RegisterAtOffsetList.h"
+
+#if ENABLE(JIT)
+
+#include <wtf/ListDump.h>
+
+namespace JSC {
+
+RegisterAtOffsetList::RegisterAtOffsetList() { }
+
+RegisterAtOffsetList::RegisterAtOffsetList(RegisterSet registerSet, OffsetBaseType offsetBaseType)
+{
+ size_t numberOfRegisters = registerSet.numberOfSetRegisters();
+ ptrdiff_t offset = 0;
+
+ if (offsetBaseType == FramePointerBased)
+ offset = -(static_cast<ptrdiff_t>(numberOfRegisters) * sizeof(void*));
+
+ m_registers.reserveInitialCapacity(numberOfRegisters);
+ registerSet.forEach([&] (Reg reg) {
+ m_registers.append(RegisterAtOffset(reg, offset));
+ offset += sizeof(void*);
+ });
+}
+
+void RegisterAtOffsetList::dump(PrintStream& out) const
+{
+ out.print(listDump(m_registers));
+}
+
+RegisterAtOffset* RegisterAtOffsetList::find(Reg reg) const
+{
+ return tryBinarySearch<RegisterAtOffset, Reg>(m_registers, m_registers.size(), reg, RegisterAtOffset::getReg);
+}
+
+unsigned RegisterAtOffsetList::indexOf(Reg reg) const
+{
+ if (RegisterAtOffset* pointer = find(reg))
+ return pointer - m_registers.begin();
+ return UINT_MAX;
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h b/Source/JavaScriptCore/jit/RegisterAtOffsetList.h
index ad61ed514..875a591c8 100644
--- a/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
+++ b/Source/JavaScriptCore/jit/RegisterAtOffsetList.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,44 +23,50 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ClosureCallStubRoutine_h
-#define ClosureCallStubRoutine_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
-#include "CodeOrigin.h"
-#include "GCAwareJITStubRoutine.h"
+#include "RegisterAtOffset.h"
+#include "RegisterSet.h"
namespace JSC {
-class ClosureCallStubRoutine : public GCAwareJITStubRoutine {
+class RegisterAtOffsetList {
+ WTF_MAKE_FAST_ALLOCATED;
public:
- ClosureCallStubRoutine(
- const MacroAssemblerCodeRef&, VM&, const JSCell* owner,
- Structure*, ExecutableBase*, const CodeOrigin&);
-
- virtual ~ClosureCallStubRoutine();
+ enum OffsetBaseType { FramePointerBased, ZeroBased };
+
+ RegisterAtOffsetList();
+ RegisterAtOffsetList(RegisterSet, OffsetBaseType = FramePointerBased);
+
+ void dump(PrintStream&) const;
+
+ void clear()
+ {
+ m_registers.clear();
+ }
+
+ size_t size() const
+ {
+ return m_registers.size();
+ }
+
+ RegisterAtOffset& at(size_t index)
+ {
+ return m_registers.at(index);
+ }
- Structure* structure() const { return m_structure.get(); }
- ExecutableBase* executable() const { return m_executable.get(); }
- const CodeOrigin& codeOrigin() const { return m_codeOrigin; }
+ RegisterAtOffset* find(Reg) const;
+ unsigned indexOf(Reg) const; // Returns UINT_MAX if not found.
-protected:
- virtual void markRequiredObjectsInternal(SlotVisitor&) override;
+ Vector<RegisterAtOffset>::const_iterator begin() const { return m_registers.begin(); }
+ Vector<RegisterAtOffset>::const_iterator end() const { return m_registers.end(); }
private:
- WriteBarrier<Structure> m_structure;
- WriteBarrier<ExecutableBase> m_executable;
- // This allows us to figure out who a call is linked to by searching through
- // stub routines.
- CodeOrigin m_codeOrigin;
+ Vector<RegisterAtOffset> m_registers;
};
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // ClosureCallStubRoutine_h
-
diff --git a/Source/JavaScriptCore/jit/RegisterMap.h b/Source/JavaScriptCore/jit/RegisterMap.h
new file mode 100644
index 000000000..0f3f9574f
--- /dev/null
+++ b/Source/JavaScriptCore/jit/RegisterMap.h
@@ -0,0 +1,110 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "FPRInfo.h"
+#include "GPRInfo.h"
+#include "MacroAssembler.h"
+#include "Reg.h"
+
+namespace JSC {
+
+template<typename T>
+class RegisterMap {
+public:
+ T& operator[](Reg reg)
+ {
+ return m_map[reg.index()];
+ }
+
+ T& operator[](GPRReg gpr)
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+ T& operator[](FPRReg fpr)
+ {
+ return m_map[MacroAssembler::registerIndex(fpr)];
+ }
+
+ const T& operator[](Reg reg) const
+ {
+ return m_map[reg.index()];
+ }
+
+ const T& operator[](GPRReg gpr) const
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+ const T& operator[](FPRReg fpr) const
+ {
+ return m_map[MacroAssembler::registerIndex(fpr)];
+ }
+
+private:
+ std::array<T, MacroAssembler::totalNumberOfRegisters()> m_map { { } };
+};
+
+template<typename T>
+class GPRMap {
+public:
+ T& operator[](GPRReg gpr)
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+ const T& operator[](GPRReg gpr) const
+ {
+ return m_map[MacroAssembler::registerIndex(gpr)];
+ }
+
+private:
+ std::array<T, MacroAssembler::numberOfRegisters()> m_map { { } };
+};
+
+template<typename T>
+class FPRMap {
+public:
+ T& operator[](FPRReg fpr)
+ {
+ return m_map[MacroAssembler::fpRegisterIndex(fpr)];
+ }
+
+ const T& operator[](FPRReg fpr) const
+ {
+ return m_map[MacroAssembler::fpRegisterIndex(fpr)];
+ }
+
+private:
+ std::array<T, MacroAssembler::numberOfFPRegisters()> m_map { { } };
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/RegisterSet.cpp b/Source/JavaScriptCore/jit/RegisterSet.cpp
index 362ada0de..e6e7741cf 100644
--- a/Source/JavaScriptCore/jit/RegisterSet.cpp
+++ b/Source/JavaScriptCore/jit/RegisterSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,45 +30,300 @@
#include "GPRInfo.h"
#include "MacroAssembler.h"
+#include "JSCInlines.h"
+#include <wtf/CommaPrinter.h>
namespace JSC {
RegisterSet RegisterSet::stackRegisters()
{
- RegisterSet result;
- result.set(MacroAssembler::stackPointerRegister);
- result.set(MacroAssembler::framePointerRegister);
- return result;
+ return RegisterSet(
+ MacroAssembler::stackPointerRegister,
+ MacroAssembler::framePointerRegister);
}
-RegisterSet RegisterSet::specialRegisters()
+RegisterSet RegisterSet::reservedHardwareRegisters()
+{
+#if CPU(ARM64)
+#if PLATFORM(IOS)
+ return RegisterSet(ARM64Registers::x18, ARM64Registers::lr);
+#else
+ return RegisterSet(ARM64Registers::lr);
+#endif // PLATFORM(IOS)
+#else
+ return RegisterSet();
+#endif
+}
+
+RegisterSet RegisterSet::runtimeRegisters()
{
- RegisterSet result;
- result.merge(stackRegisters());
- result.set(GPRInfo::callFrameRegister);
#if USE(JSVALUE64)
- result.set(GPRInfo::tagTypeNumberRegister);
- result.set(GPRInfo::tagMaskRegister);
+ return RegisterSet(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
+#else
+ return RegisterSet();
#endif
+}
+
+RegisterSet RegisterSet::specialRegisters()
+{
+ return RegisterSet(
+ stackRegisters(), reservedHardwareRegisters(), runtimeRegisters());
+}
+
+RegisterSet RegisterSet::volatileRegistersForJSCall()
+{
+ RegisterSet volatileRegisters = allRegisters();
+ volatileRegisters.exclude(RegisterSet::stackRegisters());
+ volatileRegisters.exclude(RegisterSet::reservedHardwareRegisters());
+ volatileRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
+ return volatileRegisters;
+}
+
+RegisterSet RegisterSet::stubUnavailableRegisters()
+{
+ return RegisterSet(specialRegisters(), vmCalleeSaveRegisters());
+}
+
+RegisterSet RegisterSet::macroScratchRegisters()
+{
+#if CPU(X86_64)
+ return RegisterSet(MacroAssembler::s_scratchRegister);
+#elif CPU(ARM64)
+ return RegisterSet(MacroAssembler::dataTempRegister, MacroAssembler::memoryTempRegister);
+#elif CPU(MIPS)
+ RegisterSet result;
+ result.set(MacroAssembler::immTempRegister);
+ result.set(MacroAssembler::dataTempRegister);
+ result.set(MacroAssembler::addrTempRegister);
+ result.set(MacroAssembler::cmpTempRegister);
return result;
+#else
+ return RegisterSet();
+#endif
}
RegisterSet RegisterSet::calleeSaveRegisters()
{
RegisterSet result;
-#if CPU(X86_64)
+#if CPU(X86)
+ result.set(X86Registers::ebx);
+ result.set(X86Registers::ebp);
+ result.set(X86Registers::edi);
+ result.set(X86Registers::esi);
+#elif CPU(X86_64)
result.set(X86Registers::ebx);
result.set(X86Registers::ebp);
result.set(X86Registers::r12);
result.set(X86Registers::r13);
result.set(X86Registers::r14);
result.set(X86Registers::r15);
+#elif CPU(ARM_THUMB2)
+ result.set(ARMRegisters::r4);
+ result.set(ARMRegisters::r5);
+ result.set(ARMRegisters::r6);
+ result.set(ARMRegisters::r8);
+#if !PLATFORM(IOS)
+ result.set(ARMRegisters::r9);
+#endif
+ result.set(ARMRegisters::r10);
+ result.set(ARMRegisters::r11);
+#elif CPU(ARM_TRADITIONAL)
+ result.set(ARMRegisters::r4);
+ result.set(ARMRegisters::r5);
+ result.set(ARMRegisters::r6);
+ result.set(ARMRegisters::r7);
+ result.set(ARMRegisters::r8);
+ result.set(ARMRegisters::r9);
+ result.set(ARMRegisters::r10);
+ result.set(ARMRegisters::r11);
+#elif CPU(ARM64)
+ // We don't include LR in the set of callee-save registers even though it technically belongs
+ // there. This is because we use this set to describe the set of registers that need to be saved
+ // beyond what you would save by the platform-agnostic "preserve return address" and "restore
+ // return address" operations in CCallHelpers.
+ for (
+ ARM64Registers::RegisterID reg = ARM64Registers::x19;
+ reg <= ARM64Registers::x28;
+ reg = static_cast<ARM64Registers::RegisterID>(reg + 1))
+ result.set(reg);
+ result.set(ARM64Registers::fp);
+ for (
+ ARM64Registers::FPRegisterID reg = ARM64Registers::q8;
+ reg <= ARM64Registers::q15;
+ reg = static_cast<ARM64Registers::FPRegisterID>(reg + 1))
+ result.set(reg);
+#elif CPU(MIPS)
#else
UNREACHABLE_FOR_PLATFORM();
#endif
return result;
}
+RegisterSet RegisterSet::vmCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86_64)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#if OS(WINDOWS)
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+#endif
+#elif CPU(ARM64)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+ result.set(GPRInfo::regCS7);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+ result.set(FPRInfo::fpRegCS0);
+ result.set(FPRInfo::fpRegCS1);
+ result.set(FPRInfo::fpRegCS2);
+ result.set(FPRInfo::fpRegCS3);
+ result.set(FPRInfo::fpRegCS4);
+ result.set(FPRInfo::fpRegCS5);
+ result.set(FPRInfo::fpRegCS6);
+ result.set(FPRInfo::fpRegCS7);
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::llintBaselineCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86)
+#elif CPU(X86_64)
+#if !OS(WINDOWS)
+ result.set(GPRInfo::regCS2);
+ ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#else
+ result.set(GPRInfo::regCS4);
+ ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+#endif
+#elif CPU(ARM_THUMB2)
+#elif CPU(ARM_TRADITIONAL)
+#elif CPU(ARM64)
+ result.set(GPRInfo::regCS7);
+ ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+#elif CPU(MIPS)
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::dfgCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if CPU(X86)
+#elif CPU(X86_64)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+#if !OS(WINDOWS)
+ ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#else
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+ ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+#endif
+#elif CPU(ARM_THUMB2)
+#elif CPU(ARM_TRADITIONAL)
+#elif CPU(ARM64)
+ ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+#elif CPU(MIPS)
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::ftlCalleeSaveRegisters()
+{
+ RegisterSet result;
+#if ENABLE(FTL_JIT)
+#if CPU(X86_64) && !OS(WINDOWS)
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+#elif CPU(ARM64)
+ // B3 might save and use all ARM64 callee saves specified in the ABI.
+ result.set(GPRInfo::regCS0);
+ result.set(GPRInfo::regCS1);
+ result.set(GPRInfo::regCS2);
+ result.set(GPRInfo::regCS3);
+ result.set(GPRInfo::regCS4);
+ result.set(GPRInfo::regCS5);
+ result.set(GPRInfo::regCS6);
+ result.set(GPRInfo::regCS7);
+ ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
+ ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
+ result.set(GPRInfo::regCS8);
+ result.set(GPRInfo::regCS9);
+ result.set(FPRInfo::fpRegCS0);
+ result.set(FPRInfo::fpRegCS1);
+ result.set(FPRInfo::fpRegCS2);
+ result.set(FPRInfo::fpRegCS3);
+ result.set(FPRInfo::fpRegCS4);
+ result.set(FPRInfo::fpRegCS5);
+ result.set(FPRInfo::fpRegCS6);
+ result.set(FPRInfo::fpRegCS7);
+#else
+ UNREACHABLE_FOR_PLATFORM();
+#endif
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::argumentGPRS()
+{
+ RegisterSet result;
+#if NUMBER_OF_ARGUMENT_REGISTERS
+ for (unsigned i = 0; i < GPRInfo::numberOfArgumentRegisters; i++)
+ result.set(GPRInfo::toArgumentRegister(i));
+#endif
+ return result;
+}
+
+RegisterSet RegisterSet::registersToNotSaveForJSCall()
+{
+ return RegisterSet(RegisterSet::vmCalleeSaveRegisters(), RegisterSet::stackRegisters(), RegisterSet::reservedHardwareRegisters());
+}
+
+RegisterSet RegisterSet::registersToNotSaveForCCall()
+{
+ return RegisterSet(RegisterSet::calleeSaveRegisters(), RegisterSet::stackRegisters(), RegisterSet::reservedHardwareRegisters());
+}
+
RegisterSet RegisterSet::allGPRs()
{
RegisterSet result;
@@ -93,9 +348,29 @@ RegisterSet RegisterSet::allRegisters()
return result;
}
+size_t RegisterSet::numberOfSetGPRs() const
+{
+ RegisterSet temp = *this;
+ temp.filter(allGPRs());
+ return temp.numberOfSetRegisters();
+}
+
+size_t RegisterSet::numberOfSetFPRs() const
+{
+ RegisterSet temp = *this;
+ temp.filter(allFPRs());
+ return temp.numberOfSetRegisters();
+}
+
void RegisterSet::dump(PrintStream& out) const
{
- m_vector.dump(out);
+ CommaPrinter comma;
+ out.print("[");
+ for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
+ if (get(reg))
+ out.print(comma, reg);
+ }
+ out.print("]");
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/RegisterSet.h b/Source/JavaScriptCore/jit/RegisterSet.h
index 84ad226ad..0359066e3 100644
--- a/Source/JavaScriptCore/jit/RegisterSet.h
+++ b/Source/JavaScriptCore/jit/RegisterSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,92 +23,150 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef RegisterSet_h
-#define RegisterSet_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
#include "FPRInfo.h"
#include "GPRInfo.h"
#include "MacroAssembler.h"
+#include "Reg.h"
#include "TempRegisterSet.h"
-#include <wtf/BitVector.h>
+#include <wtf/Bitmap.h>
namespace JSC {
class RegisterSet {
public:
- RegisterSet() { }
+ template<typename... Regs>
+ explicit RegisterSet(Regs... regs)
+ {
+ setMany(regs...);
+ }
- static RegisterSet stackRegisters();
- static RegisterSet specialRegisters();
- static RegisterSet calleeSaveRegisters();
- static RegisterSet allGPRs();
- static RegisterSet allFPRs();
+ JS_EXPORT_PRIVATE static RegisterSet stackRegisters();
+ JS_EXPORT_PRIVATE static RegisterSet reservedHardwareRegisters();
+ static RegisterSet runtimeRegisters();
+ static RegisterSet specialRegisters(); // The union of stack, reserved hardware, and runtime registers.
+ JS_EXPORT_PRIVATE static RegisterSet calleeSaveRegisters();
+ static RegisterSet vmCalleeSaveRegisters(); // Callee save registers that might be saved and used by any tier.
+ static RegisterSet llintBaselineCalleeSaveRegisters(); // Registers saved and used by the LLInt.
+ static RegisterSet dfgCalleeSaveRegisters(); // Registers saved and used by the DFG JIT.
+ static RegisterSet ftlCalleeSaveRegisters(); // Registers that might be saved and used by the FTL JIT.
+#if ENABLE(WEBASSEMBLY)
+ static RegisterSet webAssemblyCalleeSaveRegisters(); // Registers saved and used by the WebAssembly JIT.
+#endif
+ static RegisterSet volatileRegistersForJSCall();
+ static RegisterSet stubUnavailableRegisters(); // The union of callee saves and special registers.
+ JS_EXPORT_PRIVATE static RegisterSet macroScratchRegisters();
+ JS_EXPORT_PRIVATE static RegisterSet allGPRs();
+ JS_EXPORT_PRIVATE static RegisterSet allFPRs();
static RegisterSet allRegisters();
+ static RegisterSet argumentGPRS();
- void set(GPRReg reg, bool value = true)
+ static RegisterSet registersToNotSaveForJSCall();
+ static RegisterSet registersToNotSaveForCCall();
+
+ void set(Reg reg, bool value = true)
{
- m_vector.set(MacroAssembler::registerIndex(reg), value);
+ ASSERT(!!reg);
+ m_bits.set(reg.index(), value);
}
- void set(JSValueRegs regs)
+ void set(JSValueRegs regs, bool value = true)
{
if (regs.tagGPR() != InvalidGPRReg)
- set(regs.tagGPR());
- set(regs.payloadGPR());
+ set(regs.tagGPR(), value);
+ set(regs.payloadGPR(), value);
}
- void clear(GPRReg reg)
+ void clear(Reg reg)
{
+ ASSERT(!!reg);
set(reg, false);
}
- bool get(GPRReg reg) const { return m_vector.get(MacroAssembler::registerIndex(reg)); }
-
- void set(FPRReg reg, bool value = true)
+ bool get(Reg reg) const
{
- m_vector.set(MacroAssembler::registerIndex(reg), value);
+ ASSERT(!!reg);
+ return m_bits.get(reg.index());
}
- void clear(FPRReg reg)
+ template<typename Iterable>
+ void setAll(const Iterable& iterable)
{
- set(reg, false);
+ for (Reg reg : iterable)
+ set(reg);
}
- bool get(FPRReg reg) const { return m_vector.get(MacroAssembler::registerIndex(reg)); }
+ void merge(const RegisterSet& other) { m_bits.merge(other.m_bits); }
+ void filter(const RegisterSet& other) { m_bits.filter(other.m_bits); }
+ void exclude(const RegisterSet& other) { m_bits.exclude(other.m_bits); }
- void merge(const RegisterSet& other) { m_vector.merge(other.m_vector); }
- void filter(const RegisterSet& other) { m_vector.filter(other.m_vector); }
- void exclude(const RegisterSet& other) { m_vector.exclude(other.m_vector); }
+ size_t numberOfSetGPRs() const;
+ size_t numberOfSetFPRs() const;
+ size_t numberOfSetRegisters() const { return m_bits.count(); }
- size_t numberOfSetRegisters() const { return m_vector.bitCount(); }
+ bool isEmpty() const { return m_bits.isEmpty(); }
- void dump(PrintStream&) const;
+ JS_EXPORT_PRIVATE void dump(PrintStream&) const;
enum EmptyValueTag { EmptyValue };
enum DeletedValueTag { DeletedValue };
RegisterSet(EmptyValueTag)
- : m_vector(BitVector::EmptyValue)
{
+ m_bits.set(hashSpecialBitIndex);
}
RegisterSet(DeletedValueTag)
- : m_vector(BitVector::DeletedValue)
{
+ m_bits.set(hashSpecialBitIndex);
+ m_bits.set(deletedBitIndex);
}
- bool isEmptyValue() const { return m_vector.isEmptyValue(); }
- bool isDeletedValue() const { return m_vector.isDeletedValue(); }
+ bool isEmptyValue() const
+ {
+ return m_bits.get(hashSpecialBitIndex) && !m_bits.get(deletedBitIndex);
+ }
+
+ bool isDeletedValue() const
+ {
+ return m_bits.get(hashSpecialBitIndex) && m_bits.get(deletedBitIndex);
+ }
- bool operator==(const RegisterSet& other) const { return m_vector == other.m_vector; }
- unsigned hash() const { return m_vector.hash(); }
+ bool operator==(const RegisterSet& other) const { return m_bits == other.m_bits; }
+ bool operator!=(const RegisterSet& other) const { return m_bits != other.m_bits; }
+
+ unsigned hash() const { return m_bits.hash(); }
+
+ template<typename Func>
+ void forEach(const Func& func) const
+ {
+ m_bits.forEachSetBit(
+ [&] (size_t index) {
+ func(Reg::fromIndex(index));
+ });
+ }
private:
- BitVector m_vector;
+ void setAny(Reg reg) { set(reg); }
+ void setAny(const RegisterSet& set) { merge(set); }
+ void setMany() { }
+ template<typename RegType, typename... Regs>
+ void setMany(RegType reg, Regs... regs)
+ {
+ setAny(reg);
+ setMany(regs...);
+ }
+
+ // These offsets mirror the logic in Reg.h.
+ static const unsigned gprOffset = 0;
+ static const unsigned fprOffset = gprOffset + MacroAssembler::numGPRs;
+ static const unsigned hashSpecialBitIndex = fprOffset + MacroAssembler::numFPRs;
+ static const unsigned deletedBitIndex = 0;
+
+ Bitmap<MacroAssembler::numGPRs + MacroAssembler::numFPRs + 1> m_bits;
};
struct RegisterSetHash {
@@ -132,6 +190,3 @@ template<> struct HashTraits<JSC::RegisterSet> : public CustomHashTraits<JSC::Re
} // namespace WTF
#endif // ENABLE(JIT)
-
-#endif // RegisterSet_h
-
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
index 9c31722e8..bab11b696 100644
--- a/Source/JavaScriptCore/jit/Repatch.cpp
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,55 +28,60 @@
#if ENABLE(JIT)
+#include "BinarySwitch.h"
#include "CCallHelpers.h"
-#include "CallFrameInlines.h"
+#include "CallFrameShuffler.h"
#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
+#include "DOMJITGetterSetter.h"
+#include "DirectArguments.h"
#include "FTLThunks.h"
+#include "FunctionCodeBlock.h"
#include "GCAwareJITStubRoutine.h"
+#include "GetterSetter.h"
+#include "GetterSetterAccessCase.h"
+#include "ICStats.h"
+#include "InlineAccess.h"
+#include "IntrinsicGetterAccessCase.h"
+#include "JIT.h"
+#include "JITInlines.h"
+#include "JSCInlines.h"
+#include "JSModuleNamespaceObject.h"
+#include "JSWebAssembly.h"
#include "LinkBuffer.h"
-#include "Operations.h"
-#include "PolymorphicPutByIdList.h"
-#include "RepatchBuffer.h"
+#include "ModuleNamespaceAccessCase.h"
+#include "PolymorphicAccess.h"
+#include "ScopedArguments.h"
#include "ScratchRegisterAllocator.h"
#include "StackAlignment.h"
#include "StructureRareDataInlines.h"
#include "StructureStubClearingWatchpoint.h"
+#include "StructureStubInfo.h"
#include "ThunkGenerators.h"
+#include <wtf/CommaPrinter.h>
+#include <wtf/ListDump.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
-// Beware: in this code, it is not safe to assume anything about the following registers
-// that would ordinarily have well-known values:
-// - tagTypeNumberRegister
-// - tagMaskRegister
-// - callFrameRegister **
-//
-// We currently only use the callFrameRegister for closure call patching, and we're not going to
-// give the FTL closure call patching support until we switch to the C stack - but when we do that,
-// callFrameRegister will disappear.
-
-static FunctionPtr readCallTarget(RepatchBuffer& repatchBuffer, CodeLocationCall call)
+static FunctionPtr readCallTarget(CodeBlock* codeBlock, CodeLocationCall call)
{
FunctionPtr result = MacroAssembler::readCallTarget(call);
#if ENABLE(FTL_JIT)
- CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
return FunctionPtr(codeBlock->vm()->ftlThunks->keyForSlowPathCallThunk(
MacroAssemblerCodePtr::createFromExecutableAddress(
result.executableAddress())).callTarget());
}
#else
- UNUSED_PARAM(repatchBuffer);
+ UNUSED_PARAM(codeBlock);
#endif // ENABLE(FTL_JIT)
return result;
}
-static void repatchCall(RepatchBuffer& repatchBuffer, CodeLocationCall call, FunctionPtr newCalleeFunction)
+void ftlThunkAwareRepatchCall(CodeBlock* codeBlock, CodeLocationCall call, FunctionPtr newCalleeFunction)
{
#if ENABLE(FTL_JIT)
- CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
VM& vm = *codeBlock->vm();
FTL::Thunks& thunks = *vm.ftlThunks;
@@ -87,655 +92,247 @@ static void repatchCall(RepatchBuffer& repatchBuffer, CodeLocationCall call, Fun
newCalleeFunction = FunctionPtr(
thunks.getSlowPathCallThunk(vm, key).code().executableAddress());
}
+#else // ENABLE(FTL_JIT)
+ UNUSED_PARAM(codeBlock);
#endif // ENABLE(FTL_JIT)
- repatchBuffer.relink(call, newCalleeFunction);
+ MacroAssembler::repatchCall(call, newCalleeFunction);
}
-static void repatchCall(CodeBlock* codeblock, CodeLocationCall call, FunctionPtr newCalleeFunction)
-{
- RepatchBuffer repatchBuffer(codeblock);
- repatchCall(repatchBuffer, call, newCalleeFunction);
-}
+enum InlineCacheAction {
+ GiveUpOnCache,
+ RetryCacheLater,
+ AttemptToCache
+};
-static void repatchByIdSelfAccess(VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, const Identifier& propertyName, PropertyOffset offset,
- const FunctionPtr &slowPathFunction, bool compact)
+static InlineCacheAction actionForCell(VM& vm, JSCell* cell)
{
- if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm.registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
-
- RepatchBuffer repatchBuffer(codeBlock);
+ Structure* structure = cell->structure(vm);
- // Only optimize once!
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, slowPathFunction);
+ TypeInfo typeInfo = structure->typeInfo();
+ if (typeInfo.prohibitsPropertyCaching())
+ return GiveUpOnCache;
- // Patch the structure check & the offset of the load.
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), structure);
- repatchBuffer.setLoadInstructionIsActive(stubInfo.callReturnLocation.convertibleLoadAtOffset(stubInfo.patch.deltaCallToStorageLoad), isOutOfLineOffset(offset));
-#if USE(JSVALUE64)
- if (compact)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
- else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
-#elif USE(JSVALUE32_64)
- if (compact) {
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- } else {
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
- }
-#endif
-}
-
-static void addStructureTransitionCheck(
- JSCell* object, Structure* structure, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
- MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
-{
- if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
- structure->addTransitionWatchpoint(stubInfo.addWatchpoint(codeBlock));
-#if !ASSERT_DISABLED
- // If we execute this code, the object must have the structure we expect. Assert
- // this in debug modes.
- jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
- MacroAssembler::Jump ok = jit.branchPtr(
- MacroAssembler::Equal,
- MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure));
- jit.breakpoint();
- ok.link(&jit);
-#endif
- return;
+ if (structure->isUncacheableDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return GiveUpOnCache;
+ // Flattening could have changed the offset, so return early for another try.
+ asObject(cell)->flattenDictionaryObject(vm);
+ return RetryCacheLater;
}
- jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
- failureCases.append(
- jit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure)));
-}
-
-static void addStructureTransitionCheck(
- JSValue prototype, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
- MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
-{
- if (prototype.isNull())
- return;
-
- ASSERT(prototype.isCell());
-
- addStructureTransitionCheck(
- prototype.asCell(), prototype.asCell()->structure(), codeBlock, stubInfo, jit,
- failureCases, scratchGPR);
-}
+ if (!structure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
-static void replaceWithJump(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
-{
- if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(
- stubInfo.callReturnLocation.dataLabelPtrAtOffset(
- -(intptr_t)stubInfo.patch.deltaCheckImmToCall)),
- CodeLocationLabel(target));
- return;
- }
-
- repatchBuffer.relink(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(target));
+ return AttemptToCache;
}
-static void emitRestoreScratch(MacroAssembler& stubJit, bool needToRestoreScratch, GPRReg scratchGPR, MacroAssembler::Jump& success, MacroAssembler::Jump& fail, MacroAssembler::JumpList failureCases)
+static bool forceICFailure(ExecState*)
{
- if (needToRestoreScratch) {
- stubJit.popToRestore(scratchGPR);
-
- success = stubJit.jump();
-
- // link failure cases here, so we can pop scratchGPR, and then jump back.
- failureCases.link(&stubJit);
-
- stubJit.popToRestore(scratchGPR);
-
- fail = stubJit.jump();
- return;
- }
-
- success = stubJit.jump();
+#if CPU(ARM_TRADITIONAL)
+ // FIXME: Remove this workaround once the proper fixes are landed.
+ // [ARM] Disable Inline Caching on ARMv7 traditional until proper fix
+ // https://bugs.webkit.org/show_bug.cgi?id=159759
+ return true;
+#else
+ return Options::forceICFailure();
+#endif
}
-static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases, CodeLocationLabel successLabel, CodeLocationLabel slowCaseBegin)
+inline J_JITOperation_ESsiJI appropriateOptimizingGetByIdFunction(GetByIDKind kind)
{
- patchBuffer.link(success, successLabel);
-
- if (needToRestoreScratch) {
- patchBuffer.link(fail, slowCaseBegin);
- return;
- }
-
- // link failure cases directly back to normal path
- patchBuffer.link(failureCases, slowCaseBegin);
+ if (kind == GetByIDKind::Normal)
+ return operationGetByIdOptimize;
+ return operationTryGetByIdOptimize;
}
-static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, StructureStubInfo& stubInfo, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases)
+inline J_JITOperation_ESsiJI appropriateGenericGetByIdFunction(GetByIDKind kind)
{
- linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ if (kind == GetByIDKind::Normal)
+ return operationGetById;
+ return operationTryGetById;
}
-enum ProtoChainGenerationResult {
- ProtoChainGenerationFailed,
- ProtoChainGenerationSucceeded
-};
-
-static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState*, const PropertySlot&, const Identifier&, StructureStubInfo&, StructureChain*, size_t, PropertyOffset, Structure*, CodeLocationLabel, CodeLocationLabel, RefPtr<JITStubRoutine>&) WARN_UNUSED_RETURN;
-static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState* exec, const PropertySlot& slot, const Identifier& propertyName, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, PropertyOffset offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, RefPtr<JITStubRoutine>& stubRoutine)
+static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo, GetByIDKind kind)
{
- VM* vm = &exec->vm();
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
- bool needToRestoreScratch = scratchGPR == InvalidGPRReg;
- if (needToRestoreScratch && !slot.isCacheableValue())
- return ProtoChainGenerationFailed;
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
- CCallHelpers stubJit(&exec->vm(), exec->codeBlock());
- if (needToRestoreScratch) {
-#if USE(JSVALUE64)
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
-#else
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
-#endif
- stubJit.pushToSave(scratchGPR);
- needToRestoreScratch = true;
- }
-
- MacroAssembler::JumpList failureCases;
-
- failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure)));
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell())
+ return GiveUpOnCache;
CodeBlock* codeBlock = exec->codeBlock();
- if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
-
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- JSObject* protoObject = 0;
- for (unsigned i = 0; i < count; ++i, ++it) {
- protoObject = asObject(currStructure->prototypeForLookup(exec));
- Structure* protoStructure = protoObject->structure();
- if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
- addStructureTransitionCheck(
- protoObject, protoStructure, codeBlock, stubInfo, stubJit,
- failureCases, scratchGPR);
- currStructure = it->get();
- }
-
- bool isAccessor = slot.isCacheableGetter() || slot.isCacheableCustom();
- if (isAccessor)
- stubJit.move(baseGPR, scratchGPR);
+ VM& vm = exec->vm();
- if (!slot.isCacheableCustom()) {
- if (isInlineOffset(offset)) {
-#if USE(JSVALUE64)
- stubJit.load64(protoObject->locationForOffset(offset), resultGPR);
-#elif USE(JSVALUE32_64)
- stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
-#endif
- } else {
- stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR);
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
-#elif USE(JSVALUE32_64)
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
-#endif
- }
- }
- MacroAssembler::Call operationCall;
- MacroAssembler::Call handlerCall;
- FunctionPtr operationFunction;
- MacroAssembler::Jump success, fail;
- if (isAccessor) {
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
- if (slot.isCacheableGetter()) {
- stubJit.setupArguments(callFrameRegister, scratchGPR, resultGPR);
- operationFunction = operationCallGetter;
- } else {
- stubJit.move(MacroAssembler::TrustedImmPtr(protoObject), scratchGPR);
- stubJit.setupArguments(callFrameRegister, scratchGPR,
- MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
- MacroAssembler::TrustedImmPtr(propertyName.impl()));
- operationFunction = operationCallCustomGetter;
- }
+ std::unique_ptr<AccessCase> newCase;
- // Need to make sure that whenever this call is made in the future, we remember the
- // place that we made it from. It just so happens to be the place that we are at
- // right now!
- stubJit.store32(MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
- CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
+ if (propertyName == vm.propertyNames->length) {
+ if (isJSArray(baseValue)) {
+ if (stubInfo.cacheType == CacheType::Unset
+ && slot.slotBase() == baseValue
+ && InlineAccess::isCacheableArrayLength(stubInfo, jsCast<JSArray*>(baseValue))) {
- operationCall = stubJit.call();
-#if USE(JSVALUE64)
- stubJit.move(GPRInfo::returnValueGPR, resultGPR);
-#else
- stubJit.setupResults(resultGPR, resultTagGPR);
-#endif
- MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
-
- stubJit.setupArgumentsExecState();
- handlerCall = stubJit.call();
- stubJit.jumpToExceptionHandler();
-
- noException.link(&stubJit);
- }
- emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
-
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-
- linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
- if (isAccessor) {
- patchBuffer.link(operationCall, operationFunction);
- patchBuffer.link(handlerCall, lookupExceptionHandler);
- }
- stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG prototype chain access stub for %s, return point %p",
- toCString(*exec->codeBlock()).data(), successLabel.executableAddress()));
- return ProtoChainGenerationSucceeded;
-}
-
-static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
-{
- // FIXME: Write a test that proves we need to check for recursion here just
- // like the interpreter does, then add a check for recursion.
+ bool generatedCodeInline = InlineAccess::generateArrayLength(*codeBlock->vm(), stubInfo, jsCast<JSArray*>(baseValue));
+ if (generatedCodeInline) {
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
+ stubInfo.initArrayLength();
+ return RetryCacheLater;
+ }
+ }
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
-
- if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
- bool needToRestoreScratch = false;
-
- MacroAssembler stubJit;
-
- if (scratchGPR == InvalidGPRReg) {
-#if USE(JSVALUE64)
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
-#else
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
-#endif
- stubJit.pushToSave(scratchGPR);
- needToRestoreScratch = true;
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::ArrayLength);
+ } else if (isJSString(baseValue))
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::StringLength);
+ else if (DirectArguments* arguments = jsDynamicCast<DirectArguments*>(vm, baseValue)) {
+ // If there were overrides, then we can handle this as a normal property load! Guarding
+ // this with such a check enables us to add an IC case for that load if needed.
+ if (!arguments->overrodeThings())
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::DirectArgumentsLength);
+ } else if (ScopedArguments* arguments = jsDynamicCast<ScopedArguments*>(vm, baseValue)) {
+ // Ditto.
+ if (!arguments->overrodeThings())
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::ScopedArgumentsLength);
}
-
- MacroAssembler::JumpList failureCases;
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSCell::structureOffset()), scratchGPR);
- stubJit.load8(MacroAssembler::Address(scratchGPR, Structure::indexingTypeOffset()), scratchGPR);
- failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray)));
- failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask)));
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
- failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
-
- stubJit.move(scratchGPR, resultGPR);
-#if USE(JSVALUE64)
- stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR);
-#elif USE(JSVALUE32_64)
- stubJit.move(AssemblyHelpers::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
-#endif
-
- MacroAssembler::Jump success, fail;
-
- emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
-
- LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
-
- linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
-
- stubInfo.stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG GetById array length stub for %s, return point %p",
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress()));
-
- RepatchBuffer repatchBuffer(codeBlock);
- replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById);
-
- return true;
}
-
- // FIXME: should support length access for String.
-
- // FIXME: Cache property access for immediates.
- if (!baseValue.isCell())
- return false;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
- if (!slot.isCacheable())
- return false;
- if (!structure->propertyAccessesAreCacheable())
- return false;
-
- // Optimize self access.
- if (slot.slotBase() == baseValue) {
- if (!slot.isCacheableValue()
- || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) {
- repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList);
- return true;
- }
- repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, propertyName, slot.cachedOffset(), operationGetByIdBuildList, true);
- stubInfo.initGetByIdSelf(*vm, codeBlock->ownerExecutable(), structure);
- return true;
+ if (!propertyName.isSymbol() && isJSModuleNamespaceObject(baseValue) && !slot.isUnset()) {
+ if (auto moduleNamespaceSlot = slot.moduleNamespaceSlot())
+ newCase = ModuleNamespaceAccessCase::create(vm, codeBlock, jsCast<JSModuleNamespaceObject*>(baseValue), moduleNamespaceSlot->environment, ScopeOffset(moduleNamespaceSlot->scopeOffset));
}
- if (structure->isDictionary())
- return false;
-
- if (!stubInfo.patch.registersFlushed) {
- // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
- // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
- // if registers were not flushed, don't do non-Value caching.
- if (!slot.isCacheableValue())
- return false;
- }
-
- PropertyOffset offset = slot.cachedOffset();
- size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), propertyName, offset);
- if (count == InvalidPrototypeChain)
- return false;
-
- StructureChain* prototypeChain = structure->prototypeChain(exec);
- if (generateProtoChainAccessStub(exec, slot, propertyName, stubInfo, prototypeChain, count, offset,
- structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), stubInfo.stubRoutine) == ProtoChainGenerationFailed)
- return false;
-
- RepatchBuffer repatchBuffer(codeBlock);
- replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdBuildList);
-
- stubInfo.initGetByIdChain(*vm, codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.isCacheableValue());
- return true;
-}
+ if (!newCase) {
+ if (!slot.isCacheable() && !slot.isUnset())
+ return GiveUpOnCache;
-void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
-{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
-
- bool cached = tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo);
- if (!cached)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
-}
+ ObjectPropertyConditionSet conditionSet;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure(vm);
-static bool getPolymorphicStructureList(
- VM* vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
- PolymorphicAccessStructureList*& polymorphicStructureList, int& listIndex,
- CodeLocationLabel& slowCase)
-{
- slowCase = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase);
-
- if (stubInfo.accessType == access_unset) {
- RELEASE_ASSERT(!stubInfo.stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList();
- stubInfo.initGetByIdSelfList(polymorphicStructureList, 0, false);
- listIndex = 0;
- } else if (stubInfo.accessType == access_get_by_id_self) {
- RELEASE_ASSERT(!stubInfo.stubRoutine);
- polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), JITStubRoutine::createSelfManagedRoutine(slowCase), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
- stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, true);
- listIndex = 1;
- } else if (stubInfo.accessType == access_get_by_id_chain) {
- RELEASE_ASSERT(!!stubInfo.stubRoutine);
- slowCase = CodeLocationLabel(stubInfo.stubRoutine->code().code());
- polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true);
- stubInfo.stubRoutine.clear();
- stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, false);
- listIndex = 1;
- } else {
- RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
- polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
- listIndex = stubInfo.u.getByIdSelfList.listSize;
- slowCase = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
- }
-
- if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
- return false;
-
- RELEASE_ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
- return true;
-}
+ bool loadTargetFromProxy = false;
+ if (baseCell->type() == PureForwardingProxyType) {
+ baseValue = jsCast<JSProxy*>(baseCell)->target();
+ baseCell = baseValue.asCell();
+ structure = baseCell->structure(vm);
+ loadTargetFromProxy = true;
+ }
-static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JITStubRoutine* stubRoutine)
-{
- RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
- RepatchBuffer repatchBuffer(codeBlock);
- if (stubInfo.u.getByIdSelfList.didSelfPatching) {
- repatchBuffer.relink(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(stubRoutine->code().code()));
- return;
- }
-
- replaceWithJump(repatchBuffer, stubInfo, stubRoutine->code().code());
-}
+ InlineCacheAction action = actionForCell(vm, baseCell);
+ if (action != AttemptToCache)
+ return action;
+
+ // Optimize self access.
+ if (stubInfo.cacheType == CacheType::Unset
+ && slot.isCacheableValue()
+ && slot.slotBase() == baseValue
+ && !slot.watchpointSet()
+ && !structure->needImpurePropertyWatchpoint()
+ && !loadTargetFromProxy) {
+
+ bool generatedCodeInline = InlineAccess::generateSelfPropertyAccess(*codeBlock->vm(), stubInfo, structure, slot.cachedOffset());
+ if (generatedCodeInline) {
+ LOG_IC((ICEvent::GetByIdSelfPatch, structure->classInfo(), propertyName));
+ structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset());
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
+ stubInfo.initGetByIdSelf(codeBlock, structure, slot.cachedOffset());
+ return RetryCacheLater;
+ }
+ }
-static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo)
-{
- if (!baseValue.isCell()
- || !slot.isCacheable()
- || !baseValue.asCell()->structure()->propertyAccessesAreCacheable())
- return false;
+ PropertyOffset offset = slot.isUnset() ? invalidOffset : slot.cachedOffset();
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
-
- if (slot.slotBase() == baseValue) {
- if (!stubInfo.patch.registersFlushed) {
- // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
- // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
- // if registers were not flushed, don't do non-Value caching.
- if (!slot.isCacheableValue())
- return false;
- }
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex;
- CodeLocationLabel slowCase;
+ if (slot.isUnset() || slot.slotBase() != baseValue) {
+ if (structure->typeInfo().prohibitsPropertyCaching())
+ return GiveUpOnCache;
- if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
- return false;
-
- stubInfo.u.getByIdSelfList.listSize++;
-
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
-
- CCallHelpers stubJit(vm, codeBlock);
-
- MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
-
- // The strategy we use for stubs is as follows:
- // 1) Call DFG helper that calls the getter.
- // 2) Check if there was an exception, and if there was, call yet another
- // helper.
-
- bool isDirect = false;
- MacroAssembler::Call operationCall;
- MacroAssembler::Call handlerCall;
- FunctionPtr operationFunction;
- MacroAssembler::Jump success;
-
- if (slot.isCacheableGetter() || slot.isCacheableCustom()) {
- if (slot.isCacheableGetter()) {
- ASSERT(scratchGPR != InvalidGPRReg);
- ASSERT(baseGPR != scratchGPR);
- if (isInlineOffset(slot.cachedOffset())) {
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#else
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#endif
- } else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#else
- stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
-#endif
- }
- stubJit.setupArguments(callFrameRegister, baseGPR, scratchGPR);
- operationFunction = operationCallGetter;
- } else {
- stubJit.setupArguments(
- callFrameRegister, baseGPR,
- MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
- MacroAssembler::TrustedImmPtr(ident.impl()));
- operationFunction = operationCallCustomGetter;
+ if (structure->isDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return GiveUpOnCache;
+ structure->flattenDictionaryStructure(vm, jsCast<JSObject*>(baseCell));
}
- // Need to make sure that whenever this call is made in the future, we remember the
- // place that we made it from. It just so happens to be the place that we are at
- // right now!
- stubJit.store32(
- MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
- CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
-
- operationCall = stubJit.call();
-#if USE(JSVALUE64)
- stubJit.move(GPRInfo::returnValueGPR, resultGPR);
-#else
- stubJit.setupResults(resultGPR, resultTagGPR);
-#endif
- success = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
-
- stubJit.setupArgumentsExecState();
- handlerCall = stubJit.call();
- stubJit.jumpToExceptionHandler();
- } else {
- if (isInlineOffset(slot.cachedOffset())) {
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
-#else
- if (baseGPR == resultTagGPR) {
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- } else {
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
- }
-#endif
+ if (slot.isUnset() && structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
+ return GiveUpOnCache;
+
+ if (slot.isUnset()) {
+ conditionSet = generateConditionsForPropertyMiss(
+ vm, codeBlock, exec, structure, propertyName.impl());
} else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
-#if USE(JSVALUE64)
- stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
-#else
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
- stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
-#endif
+ conditionSet = generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.slotBase(),
+ propertyName.impl());
}
- success = stubJit.jump();
- isDirect = true;
+
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+
+ offset = slot.isUnset() ? invalidOffset : conditionSet.slotBaseCondition().offset();
}
- LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
-
- patchBuffer.link(wrongStruct, slowCase);
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
- if (!isDirect) {
- patchBuffer.link(operationCall, operationFunction);
- patchBuffer.link(handlerCall, lookupExceptionHandler);
+ JSFunction* getter = nullptr;
+ if (slot.isCacheableGetter())
+ getter = jsDynamicCast<JSFunction*>(vm, slot.getterSetter()->getter());
+
+ DOMJIT::GetterSetter* domJIT = nullptr;
+ if (slot.isCacheableCustom() && slot.domJIT())
+ domJIT = slot.domJIT();
+
+ if (kind == GetByIDKind::Try) {
+ AccessCase::AccessType type;
+ if (slot.isCacheableValue())
+ type = AccessCase::Load;
+ else if (slot.isUnset())
+ type = AccessCase::Miss;
+ else if (slot.isCacheableGetter())
+ type = AccessCase::GetGetter;
+ else
+ RELEASE_ASSERT_NOT_REACHED();
+
+ newCase = ProxyableAccessCase::create(vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy, slot.watchpointSet());
+ } else if (!loadTargetFromProxy && getter && IntrinsicGetterAccessCase::canEmitIntrinsicGetter(getter, structure))
+ newCase = IntrinsicGetterAccessCase::create(vm, codeBlock, slot.cachedOffset(), structure, conditionSet, getter);
+ else {
+ if (slot.isCacheableValue() || slot.isUnset()) {
+ newCase = ProxyableAccessCase::create(vm, codeBlock, slot.isUnset() ? AccessCase::Miss : AccessCase::Load,
+ offset, structure, conditionSet, loadTargetFromProxy, slot.watchpointSet());
+ } else {
+ AccessCase::AccessType type;
+ if (slot.isCacheableGetter())
+ type = AccessCase::Getter;
+ else if (slot.attributes() & CustomAccessor)
+ type = AccessCase::CustomAccessorGetter;
+ else
+ type = AccessCase::CustomValueGetter;
+
+ newCase = GetterSetterAccessCase::create(
+ vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy,
+ slot.watchpointSet(), slot.isCacheableCustom() ? slot.customGetter() : nullptr,
+ slot.isCacheableCustom() ? slot.slotBase() : nullptr,
+ domJIT);
+ }
}
-
- RefPtr<JITStubRoutine> stubRoutine =
- createJITStubRoutine(
- FINALIZE_DFG_CODE(
- patchBuffer,
- ("DFG GetById polymorphic list access for %s, return point %p",
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress())),
- *vm,
- codeBlock->ownerExecutable(),
- slot.isCacheableGetter() || slot.isCacheableCustom());
-
- polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
-
- patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
- return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
-
- if (baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()
- || baseValue.asCell()->structure()->isDictionary())
- return false;
-
- if (!stubInfo.patch.registersFlushed) {
- // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
- // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
- // if registers were not flushed, don't do non-Value caching.
- if (!slot.isCacheableValue())
- return false;
- }
-
- PropertyOffset offset = slot.cachedOffset();
- size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), ident, offset);
- if (count == InvalidPrototypeChain)
- return false;
+ LOG_IC((ICEvent::GetByIdAddAccessCase, baseValue.classInfoOrNull(vm), propertyName));
- StructureChain* prototypeChain = structure->prototypeChain(exec);
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex;
- CodeLocationLabel slowCase;
- if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
- return false;
-
- stubInfo.u.getByIdProtoList.listSize++;
-
- RefPtr<JITStubRoutine> stubRoutine;
-
- if (generateProtoChainAccessStub(exec, slot, ident, stubInfo, prototypeChain, count, offset, structure,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
- slowCase, stubRoutine) == ProtoChainGenerationFailed)
- return false;
-
- polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, slot.isCacheableValue());
-
- patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
+ AccessGenerationResult result = stubInfo.addAccessCase(codeBlock, propertyName, WTFMove(newCase));
+
+ if (result.generatedSomeCode()) {
+ LOG_IC((ICEvent::GetByIdReplaceWithJump, baseValue.classInfoOrNull(vm), propertyName));
+
+ RELEASE_ASSERT(result.code());
+ InlineAccess::rewireStubAsJump(exec->vm(), stubInfo, CodeLocationLabel(result.code()));
+ }
- return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
+ return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
}
-void buildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo, GetByIDKind kind)
{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
+ SuperSamplerScope superSamplerScope(false);
+ GCSafeConcurrentJSLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- bool dontChangeCall = tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo);
- if (!dontChangeCall)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+ if (tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo, kind) == GiveUpOnCache)
+ ftlThunkAwareRepatchCall(exec->codeBlock(), stubInfo.slowPathCallLocation(), appropriateGenericGetByIdFunction(kind));
}
static V_JITOperation_ESsiJJI appropriateGenericPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
@@ -750,862 +347,666 @@ static V_JITOperation_ESsiJJI appropriateGenericPutByIdFunction(const PutPropert
return operationPutByIdNonStrict;
}
-static V_JITOperation_ESsiJJI appropriateListBuildingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+static V_JITOperation_ESsiJJI appropriateOptimizingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
{
if (slot.isStrictMode()) {
if (putKind == Direct)
- return operationPutByIdDirectStrictBuildList;
- return operationPutByIdStrictBuildList;
+ return operationPutByIdDirectStrictOptimize;
+ return operationPutByIdStrictOptimize;
}
if (putKind == Direct)
- return operationPutByIdDirectNonStrictBuildList;
- return operationPutByIdNonStrictBuildList;
+ return operationPutByIdDirectNonStrictOptimize;
+ return operationPutByIdNonStrictOptimize;
}
-#if ENABLE(GGC)
-static MacroAssembler::Call storeToWriteBarrierBuffer(CCallHelpers& jit, GPRReg cell, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
+static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
- ASSERT(scratch1 != scratch2);
- WriteBarrierBuffer* writeBarrierBuffer = &jit.vm()->heap.writeBarrierBuffer();
- jit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer), scratch1);
- jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
- MacroAssembler::Jump needToFlush = jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
-
- jit.add32(MacroAssembler::TrustedImm32(1), scratch2);
- jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
-
- jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
- // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
- jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
-
- MacroAssembler::Jump done = jit.jump();
- needToFlush.link(&jit);
-
- ScratchBuffer* scratchBuffer = jit.vm()->scratchBufferForSize(allocator.desiredScratchBufferSize());
- allocator.preserveUsedRegistersToScratchBuffer(jit, scratchBuffer, scratch1);
-
- unsigned bytesFromBase = allocator.numberOfReusedRegisters() * sizeof(void*);
- unsigned bytesToSubtract = 0;
-#if CPU(X86)
- bytesToSubtract += 2 * sizeof(void*);
- bytesFromBase += bytesToSubtract;
-#endif
- unsigned currentAlignment = bytesFromBase % stackAlignmentBytes();
- bytesToSubtract += currentAlignment;
-
- if (bytesToSubtract)
- jit.subPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
-
- jit.setupArguments(callFrameRegister, cell);
- MacroAssembler::Call call = jit.call();
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM& vm = exec->vm();
- if (bytesToSubtract)
- jit.addPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
- allocator.restoreUsedRegistersFromScratchBuffer(jit, scratchBuffer, scratch1);
+ if (!baseValue.isCell())
+ return GiveUpOnCache;
+
+ if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter())
+ return GiveUpOnCache;
- done.link(&jit);
+ if (!structure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
+
+ std::unique_ptr<AccessCase> newCase;
+
+ if (slot.base() == baseValue && slot.isCacheablePut()) {
+ if (slot.type() == PutPropertySlot::ExistingProperty) {
+ structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+
+ if (stubInfo.cacheType == CacheType::Unset
+ && InlineAccess::canGenerateSelfPropertyReplace(stubInfo, slot.cachedOffset())
+ && !structure->needImpurePropertyWatchpoint()
+ && !structure->inferredTypeFor(ident.impl())) {
+
+ bool generatedCodeInline = InlineAccess::generateSelfPropertyReplace(vm, stubInfo, structure, slot.cachedOffset());
+ if (generatedCodeInline) {
+ LOG_IC((ICEvent::PutByIdSelfPatch, structure->classInfo(), ident));
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingPutByIdFunction(slot, putKind));
+ stubInfo.initPutByIdReplace(codeBlock, structure, slot.cachedOffset());
+ return RetryCacheLater;
+ }
+ }
- return call;
-}
+ newCase = AccessCase::create(vm, codeBlock, AccessCase::Replace, slot.cachedOffset(), structure);
+ } else {
+ ASSERT(slot.type() == PutPropertySlot::NewProperty);
-static MacroAssembler::Call writeBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
-{
- ASSERT(owner != scratch1);
- ASSERT(owner != scratch2);
+ if (!structure->isObject())
+ return GiveUpOnCache;
- MacroAssembler::Jump definitelyNotMarked = DFG::SpeculativeJIT::genericWriteBarrier(jit, owner, scratch1, scratch2);
- MacroAssembler::Call call = storeToWriteBarrierBuffer(jit, owner, scratch1, scratch2, callFrameRegister, allocator);
- definitelyNotMarked.link(&jit);
- return call;
-}
-#endif // ENABLE(GGC)
-
-static void emitPutReplaceStub(
- ExecState* exec,
- JSValue,
- const Identifier&,
- const PutPropertySlot& slot,
- StructureStubInfo& stubInfo,
- PutKind,
- Structure* structure,
- CodeLocationLabel failureLabel,
- RefPtr<JITStubRoutine>& stubRoutine)
-{
- VM* vm = &exec->vm();
-#if ENABLE(GGC)
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
-#endif
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
-
- ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
- allocator.lock(baseGPR);
-#if USE(JSVALUE32_64)
- allocator.lock(valueTagGPR);
-#endif
- allocator.lock(valueGPR);
-
- GPRReg scratchGPR1 = allocator.allocateScratchGPR();
-#if ENABLE(GGC)
- GPRReg scratchGPR2 = allocator.allocateScratchGPR();
-#endif
+ if (structure->isDictionary()) {
+ if (structure->hasBeenFlattenedBefore())
+ return GiveUpOnCache;
+ structure->flattenDictionaryStructure(vm, jsCast<JSObject*>(baseValue));
+ }
- CCallHelpers stubJit(vm, exec->codeBlock());
+ PropertyOffset offset;
+ Structure* newStructure =
+ Structure::addPropertyTransitionToExistingStructureConcurrently(
+ structure, ident.impl(), 0, offset);
+ if (!newStructure || !newStructure->propertyAccessesAreCacheable())
+ return GiveUpOnCache;
- allocator.preserveReusedRegistersByPushing(stubJit);
+ ASSERT(newStructure->previousID() == structure);
+ ASSERT(!newStructure->isDictionary());
+ ASSERT(newStructure->isObject());
+
+ ObjectPropertyConditionSet conditionSet;
+ if (putKind == NotDirect) {
+ conditionSet =
+ generateConditionsForPropertySetterMiss(
+ vm, codeBlock, exec, newStructure, ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ }
- MacroAssembler::Jump badStructure = stubJit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure));
+ newCase = AccessCase::create(vm, codeBlock, offset, structure, newStructure, conditionSet);
+ }
+ } else if (slot.isCacheableCustom() || slot.isCacheableSetter()) {
+ if (slot.isCacheableCustom()) {
+ ObjectPropertyConditionSet conditionSet;
+
+ if (slot.base() != baseValue) {
+ conditionSet =
+ generateConditionsForPrototypePropertyHitCustom(
+ vm, codeBlock, exec, structure, slot.base(), ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ }
-#if USE(JSVALUE64)
- if (isInlineOffset(slot.cachedOffset()))
- stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
- else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
- }
-#elif USE(JSVALUE32_64)
- if (isInlineOffset(slot.cachedOffset())) {
- stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- } else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ newCase = GetterSetterAccessCase::create(
+ vm, codeBlock, slot.isCustomAccessor() ? AccessCase::CustomAccessorSetter : AccessCase::CustomValueSetter, structure, invalidOffset, conditionSet,
+ slot.customSetter(), slot.base());
+ } else {
+ ObjectPropertyConditionSet conditionSet;
+ PropertyOffset offset;
+
+ if (slot.base() != baseValue) {
+ conditionSet =
+ generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.base(), ident.impl());
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
+ offset = conditionSet.slotBaseCondition().offset();
+ } else
+ offset = slot.cachedOffset();
+
+ newCase = GetterSetterAccessCase::create(
+ vm, codeBlock, AccessCase::Setter, structure, offset, conditionSet);
+ }
}
-#endif
-
-#if ENABLE(GGC)
- MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
-#endif
+
+ LOG_IC((ICEvent::PutByIdAddAccessCase, structure->classInfo(), ident));
- MacroAssembler::Jump success;
- MacroAssembler::Jump failure;
+ AccessGenerationResult result = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
- if (allocator.didReuseRegisters()) {
- allocator.restoreReusedRegistersByPopping(stubJit);
- success = stubJit.jump();
+ if (result.generatedSomeCode()) {
+ LOG_IC((ICEvent::PutByIdReplaceWithJump, structure->classInfo(), ident));
- badStructure.link(&stubJit);
- allocator.restoreReusedRegistersByPopping(stubJit);
- failure = stubJit.jump();
- } else {
- success = stubJit.jump();
- failure = badStructure;
+ RELEASE_ASSERT(result.code());
+
+ InlineAccess::rewireStubAsJump(vm, stubInfo, CodeLocationLabel(result.code()));
}
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-#if ENABLE(GGC)
- patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
-#endif
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
- patchBuffer.link(failure, failureLabel);
-
- stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG PutById replace stub for %s, return point %p",
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress()));
+ return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
}
-static void emitPutTransitionStub(
- ExecState* exec,
- JSValue,
- const Identifier&,
- const PutPropertySlot& slot,
- StructureStubInfo& stubInfo,
- PutKind putKind,
- Structure* structure,
- Structure* oldStructure,
- StructureChain* prototypeChain,
- CodeLocationLabel failureLabel,
- RefPtr<JITStubRoutine>& stubRoutine)
+void repatchPutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
- VM* vm = &exec->vm();
-
- GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
-#if USE(JSVALUE32_64)
- GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
-#endif
- GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
-
- ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
- allocator.lock(baseGPR);
-#if USE(JSVALUE32_64)
- allocator.lock(valueTagGPR);
-#endif
- allocator.lock(valueGPR);
-
- CCallHelpers stubJit(vm);
-
- bool needThirdScratch = false;
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()
- && oldStructure->outOfLineCapacity()) {
- needThirdScratch = true;
- }
-
- GPRReg scratchGPR1 = allocator.allocateScratchGPR();
- ASSERT(scratchGPR1 != baseGPR);
- ASSERT(scratchGPR1 != valueGPR);
+ SuperSamplerScope superSamplerScope(false);
+ GCSafeConcurrentJSLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- GPRReg scratchGPR2 = allocator.allocateScratchGPR();
- ASSERT(scratchGPR2 != baseGPR);
- ASSERT(scratchGPR2 != valueGPR);
- ASSERT(scratchGPR2 != scratchGPR1);
-
- GPRReg scratchGPR3;
- if (needThirdScratch) {
- scratchGPR3 = allocator.allocateScratchGPR();
- ASSERT(scratchGPR3 != baseGPR);
- ASSERT(scratchGPR3 != valueGPR);
- ASSERT(scratchGPR3 != scratchGPR1);
- ASSERT(scratchGPR3 != scratchGPR2);
- } else
- scratchGPR3 = InvalidGPRReg;
-
- allocator.preserveReusedRegistersByPushing(stubJit);
+ if (tryCachePutByID(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache)
+ ftlThunkAwareRepatchCall(exec->codeBlock(), stubInfo.slowPathCallLocation(), appropriateGenericPutByIdFunction(slot, putKind));
+}
- MacroAssembler::JumpList failureCases;
-
- ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated());
+static InlineCacheAction tryRepatchIn(
+ ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
+ const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (forceICFailure(exec))
+ return GiveUpOnCache;
- failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
+ if (!base->structure()->propertyAccessesAreCacheable() || (!wasFound && !base->structure()->propertyAccessesAreCacheableForAbsence()))
+ return GiveUpOnCache;
- addStructureTransitionCheck(
- oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR1);
-
- if (putKind == NotDirect) {
- for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) {
- addStructureTransitionCheck(
- (*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR1);
- }
+ if (wasFound) {
+ if (!slot.isCacheable())
+ return GiveUpOnCache;
}
-
- MacroAssembler::JumpList slowPath;
- bool scratchGPR1HasStorage = false;
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM& vm = exec->vm();
+ Structure* structure = base->structure(vm);
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
- size_t newSize = structure->outOfLineCapacity() * sizeof(JSValue);
- CopiedAllocator* copiedAllocator = &vm->heap.storageAllocator();
-
- if (!oldStructure->outOfLineCapacity()) {
- stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
- slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
- stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
- stubJit.negPtr(scratchGPR1);
- stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
- stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
- } else {
- size_t oldSize = oldStructure->outOfLineCapacity() * sizeof(JSValue);
- ASSERT(newSize > oldSize);
-
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
- stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
- slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
- stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
- stubJit.negPtr(scratchGPR1);
- stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
- stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
- // We have scratchGPR1 = new storage, scratchGPR3 = old storage, scratchGPR2 = available
- for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
- stubJit.loadPtr(MacroAssembler::Address(scratchGPR3, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR2);
- stubJit.storePtr(scratchGPR2, MacroAssembler::Address(scratchGPR1, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
- }
+ ObjectPropertyConditionSet conditionSet;
+ if (wasFound) {
+ if (slot.slotBase() != base) {
+ conditionSet = generateConditionsForPrototypePropertyHit(
+ vm, codeBlock, exec, structure, slot.slotBase(), ident.impl());
}
-
- stubJit.storePtr(scratchGPR1, MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()));
- scratchGPR1HasStorage = true;
- }
-
- stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
-#if USE(JSVALUE64)
- if (isInlineOffset(slot.cachedOffset()))
- stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
- else {
- if (!scratchGPR1HasStorage)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
- }
-#elif USE(JSVALUE32_64)
- if (isInlineOffset(slot.cachedOffset())) {
- stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
} else {
- if (!scratchGPR1HasStorage)
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
- stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ conditionSet = generateConditionsForPropertyMiss(
+ vm, codeBlock, exec, structure, ident.impl());
}
-#endif
-
-#if ENABLE(GGC)
- MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
-#endif
-
- MacroAssembler::Jump success;
- MacroAssembler::Jump failure;
-
- if (allocator.didReuseRegisters()) {
- allocator.restoreReusedRegistersByPopping(stubJit);
- success = stubJit.jump();
+ if (!conditionSet.isValid())
+ return GiveUpOnCache;
- failureCases.link(&stubJit);
- allocator.restoreReusedRegistersByPopping(stubJit);
- failure = stubJit.jump();
- } else
- success = stubJit.jump();
-
- MacroAssembler::Call operationCall;
- MacroAssembler::Jump successInSlowPath;
+ LOG_IC((ICEvent::InAddAccessCase, structure->classInfo(), ident));
+
+ std::unique_ptr<AccessCase> newCase = AccessCase::create(
+ vm, codeBlock, wasFound ? AccessCase::InHit : AccessCase::InMiss, invalidOffset, structure, conditionSet);
+
+ AccessGenerationResult result = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
- slowPath.link(&stubJit);
+ if (result.generatedSomeCode()) {
+ LOG_IC((ICEvent::InReplaceWithJump, structure->classInfo(), ident));
- allocator.restoreReusedRegistersByPopping(stubJit);
- ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSize());
- allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
-#if USE(JSVALUE64)
- stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
-#else
- stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
-#endif
- operationCall = stubJit.call();
- allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
- successInSlowPath = stubJit.jump();
- }
-
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-#if ENABLE(GGC)
- patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
-#endif
- patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
- if (allocator.didReuseRegisters())
- patchBuffer.link(failure, failureLabel);
- else
- patchBuffer.link(failureCases, failureLabel);
- if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
- patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
- patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ RELEASE_ASSERT(result.code());
+
+ MacroAssembler::repatchJump(
+ stubInfo.patchableJumpForIn(),
+ CodeLocationLabel(result.code()));
}
- stubRoutine =
- createJITStubRoutine(
- FINALIZE_DFG_CODE(
- patchBuffer,
- ("DFG PutById %stransition stub (%p -> %p) for %s, return point %p",
- structure->outOfLineCapacity() != oldStructure->outOfLineCapacity() ? "reallocating " : "",
- oldStructure, structure,
- toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToDone).executableAddress())),
- *vm,
- exec->codeBlock()->ownerExecutable(),
- structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(),
- structure);
+ return result.shouldGiveUpNow() ? GiveUpOnCache : RetryCacheLater;
}
-static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+void repatchIn(
+ ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
+ const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
-
- if (!baseValue.isCell())
- return false;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
- Structure* oldStructure = structure->previousID();
-
- if (!slot.isCacheable())
- return false;
- if (!structure->propertyAccessesAreCacheable())
- return false;
-
- // Optimize self access.
- if (slot.base() == baseValue) {
- if (slot.type() == PutPropertySlot::NewProperty) {
- if (structure->isDictionary())
- return false;
-
- // Skip optimizing the case where we need a realloc, if we don't have
- // enough registers to make it happen.
- if (GPRInfo::numberOfRegisters < 6
- && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
- && oldStructure->outOfLineCapacity())
- return false;
-
- // Skip optimizing the case where we need realloc, and the structure has
- // indexing storage.
- if (oldStructure->couldHaveIndexingHeader())
- return false;
-
- if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
- return false;
-
- StructureChain* prototypeChain = structure->prototypeChain(exec);
-
- emitPutTransitionStub(
- exec, baseValue, ident, slot, stubInfo, putKind,
- structure, oldStructure, prototypeChain,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase),
- stubInfo.stubRoutine);
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(stubInfo.stubRoutine->code().code()));
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
-
- stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
-
- return true;
- }
-
- if (!MacroAssembler::isPtrAlignedAddressOffset(offsetRelativeToPatchedStorage(slot.cachedOffset())))
- return false;
-
- repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, ident, slot.cachedOffset(), appropriateListBuildingPutByIdFunction(slot, putKind), false);
- stubInfo.initPutByIdReplace(*vm, codeBlock->ownerExecutable(), structure);
- return true;
- }
-
- return false;
+ SuperSamplerScope superSamplerScope(false);
+ if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo) == GiveUpOnCache)
+ ftlThunkAwareRepatchCall(exec->codeBlock(), stubInfo.slowPathCallLocation(), operationIn);
}
-void repatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static void linkSlowFor(VM*, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
-
- bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind);
- if (!cached)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+ MacroAssembler::repatchNearCall(callLinkInfo.callReturnLocation(), CodeLocationLabel(codeRef.code()));
}
-static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator)
{
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
-
- if (!baseValue.isCell())
- return false;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure();
- Structure* oldStructure = structure->previousID();
-
- if (!slot.isCacheable())
- return false;
- if (!structure->propertyAccessesAreCacheable())
- return false;
-
- // Optimize self access.
- if (slot.base() == baseValue) {
- PolymorphicPutByIdList* list;
- RefPtr<JITStubRoutine> stubRoutine;
-
- if (slot.type() == PutPropertySlot::NewProperty) {
- if (structure->isDictionary())
- return false;
-
- // Skip optimizing the case where we need a realloc, if we don't have
- // enough registers to make it happen.
- if (GPRInfo::numberOfRegisters < 6
- && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
- && oldStructure->outOfLineCapacity())
- return false;
-
- // Skip optimizing the case where we need realloc, and the structure has
- // indexing storage.
- if (oldStructure->couldHaveIndexingHeader())
- return false;
-
- if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
- return false;
-
- StructureChain* prototypeChain = structure->prototypeChain(exec);
-
- // We're now committed to creating the stub. Mogrify the meta-data accordingly.
- list = PolymorphicPutByIdList::from(
- putKind, stubInfo,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
-
- emitPutTransitionStub(
- exec, baseValue, propertyName, slot, stubInfo, putKind,
- structure, oldStructure, prototypeChain,
- CodeLocationLabel(list->currentSlowPathTarget()),
- stubRoutine);
-
- list->addAccess(
- PutByIdAccess::transition(
- *vm, codeBlock->ownerExecutable(),
- oldStructure, structure, prototypeChain,
- stubRoutine));
- } else {
- // We're now committed to creating the stub. Mogrify the meta-data accordingly.
- list = PolymorphicPutByIdList::from(
- putKind, stubInfo,
- stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
-
- emitPutReplaceStub(
- exec, baseValue, propertyName, slot, stubInfo, putKind,
- structure, CodeLocationLabel(list->currentSlowPathTarget()), stubRoutine);
-
- list->addAccess(
- PutByIdAccess::replace(
- *vm, codeBlock->ownerExecutable(),
- structure, stubRoutine));
- }
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
-
- if (list->isFull())
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
-
- return true;
- }
-
- return false;
+ linkSlowFor(vm, callLinkInfo, vm->getCTIStub(generator));
}
-void buildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo)
{
- GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
-
- bool cached = tryBuildPutByIdList(exec, baseValue, propertyName, slot, stubInfo, putKind);
- if (!cached)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
+ linkSlowFor(vm, callLinkInfo, virtualThunk);
+ callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
}
-static bool tryRepatchIn(
- ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
- const PropertySlot& slot, StructureStubInfo& stubInfo)
+static bool isWebAssemblyToJSCallee(VM& vm, JSCell* callee)
{
- if (!base->structure()->propertyAccessesAreCacheable())
- return false;
-
- if (wasFound) {
- if (!slot.isCacheable())
- return false;
- }
-
- CodeBlock* codeBlock = exec->codeBlock();
- VM* vm = &exec->vm();
- Structure* structure = base->structure();
-
- PropertyOffset offsetIgnored;
- size_t count = normalizePrototypeChainForChainAccess(exec, base, wasFound ? slot.slotBase() : JSValue(), ident, offsetIgnored);
- if (count == InvalidPrototypeChain)
- return false;
-
- PolymorphicAccessStructureList* polymorphicStructureList;
- int listIndex;
-
- CodeLocationLabel successLabel = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
- CodeLocationLabel slowCaseLabel;
-
- if (stubInfo.accessType == access_unset) {
- polymorphicStructureList = new PolymorphicAccessStructureList();
- stubInfo.initInList(polymorphicStructureList, 0);
- slowCaseLabel = stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.deltaCallToSlowCase);
- listIndex = 0;
- } else {
- RELEASE_ASSERT(stubInfo.accessType == access_in_list);
- polymorphicStructureList = stubInfo.u.inList.structureList;
- listIndex = stubInfo.u.inList.listSize;
- slowCaseLabel = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
-
- if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
- return false;
- }
-
- StructureChain* chain = structure->prototypeChain(exec);
- RefPtr<JITStubRoutine> stubRoutine;
-
- {
- GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
- GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
- GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
-
- CCallHelpers stubJit(vm);
-
- bool needToRestoreScratch;
- if (scratchGPR == InvalidGPRReg) {
- scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
- stubJit.pushToSave(scratchGPR);
- needToRestoreScratch = true;
- } else
- needToRestoreScratch = false;
-
- MacroAssembler::JumpList failureCases;
- failureCases.append(stubJit.branchPtr(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
- MacroAssembler::TrustedImmPtr(structure)));
-
- CodeBlock* codeBlock = exec->codeBlock();
- if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
-
- Structure* currStructure = structure;
- WriteBarrier<Structure>* it = chain->head();
- for (unsigned i = 0; i < count; ++i, ++it) {
- JSObject* prototype = asObject(currStructure->prototypeForLookup(exec));
- Structure* protoStructure = prototype->structure();
- addStructureTransitionCheck(
- prototype, protoStructure, exec->codeBlock(), stubInfo, stubJit,
- failureCases, scratchGPR);
- if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
- vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
- currStructure = it->get();
- }
-
-#if USE(JSVALUE64)
- stubJit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(wasFound))), resultGPR);
+#if ENABLE(WEBASSEMBLY)
+ // The WebAssembly -> JS stub sets it caller frame's callee to a singleton which lives on the VM.
+ return callee == vm.webAssemblyToJSCallee.get();
#else
- stubJit.move(MacroAssembler::TrustedImm32(wasFound), resultGPR);
-#endif
-
- MacroAssembler::Jump success, fail;
-
- emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
-
- LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
-
- linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
-
- stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
- patchBuffer,
- ("DFG In (found = %s) stub for %s, return point %p",
- wasFound ? "yes" : "no", toCString(*exec->codeBlock()).data(),
- successLabel.executableAddress()));
- }
-
- polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, true);
- stubInfo.u.inList.listSize++;
-
- RepatchBuffer repatchBuffer(codeBlock);
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
-
- return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
+ UNUSED_PARAM(vm);
+ UNUSED_PARAM(callee);
+ return false;
+#endif // ENABLE(WEBASSEMBLY)
}
-void repatchIn(
- ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
- const PropertySlot& slot, StructureStubInfo& stubInfo)
+static JSCell* webAssemblyOwner(VM& vm)
{
- if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo))
- return;
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
+#if ENABLE(WEBASSEMBLY)
+ // Each WebAssembly.Instance shares the stubs from their WebAssembly.Module, which are therefore the appropriate owner.
+ return vm.topJSWebAssemblyInstance->module();
+#else
+ UNUSED_PARAM(vm);
+ RELEASE_ASSERT_NOT_REACHED();
+ return nullptr;
+#endif // ENABLE(WEBASSEMBLY)
}
-static void linkSlowFor(RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
+void linkFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
+ JSFunction* callee, MacroAssemblerCodePtr codePtr)
{
- if (kind == CodeForCall) {
- repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
+ ASSERT(!callLinkInfo.stub());
+
+ CallFrame* callerFrame = exec->callerFrame();
+ VM& vm = callerFrame->vm();
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+
+ // WebAssembly -> JS stubs don't have a valid CodeBlock.
+ JSCell* owner = isWebAssemblyToJSCallee(vm, callerFrame->callee()) ? webAssemblyOwner(vm) : callerCodeBlock;
+ ASSERT(owner);
+
+ ASSERT(!callLinkInfo.isLinked());
+ callLinkInfo.setCallee(vm, owner, callee);
+ callLinkInfo.setLastSeenCallee(vm, owner, callee);
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
+ MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), CodeLocationLabel(codePtr));
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->linkIncomingCall(callerFrame, &callLinkInfo);
+
+ if (callLinkInfo.specializationKind() == CodeForCall && callLinkInfo.allowStubs()) {
+ linkSlowFor(&vm, callLinkInfo, linkPolymorphicCallThunkGenerator);
return;
}
- ASSERT(kind == CodeForConstruct);
- repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
+
+ linkSlowFor(&vm, callLinkInfo);
}
-void linkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind)
+void linkDirectFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
+ MacroAssemblerCodePtr codePtr)
{
- ASSERT(!callLinkInfo.stub);
-
- // If you're being call-linked from a DFG caller then you obviously didn't get inlined.
- if (calleeCodeBlock)
- calleeCodeBlock->m_shouldAlwaysBeInlined = false;
+ ASSERT(!callLinkInfo.stub());
- CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
+ CodeBlock* callerCodeBlock = exec->codeBlock();
+
VM* vm = callerCodeBlock->vm();
- RepatchBuffer repatchBuffer(callerCodeBlock);
-
ASSERT(!callLinkInfo.isLinked());
- callLinkInfo.callee.set(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
- callLinkInfo.lastSeenCallee.set(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee);
- repatchBuffer.relink(callLinkInfo.hotPathOther, codePtr);
+ callLinkInfo.setCodeBlock(*vm, callerCodeBlock, jsCast<FunctionCodeBlock*>(calleeCodeBlock));
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
+ if (callLinkInfo.callType() == CallLinkInfo::DirectTailCall)
+ MacroAssembler::repatchJumpToNop(callLinkInfo.patchableJump());
+ MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), CodeLocationLabel(codePtr));
if (calleeCodeBlock)
- calleeCodeBlock->linkIncomingCall(exec->callerFrame(), &callLinkInfo);
-
- if (kind == CodeForCall) {
- repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
- return;
- }
-
- ASSERT(kind == CodeForConstruct);
- linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForConstruct);
+ calleeCodeBlock->linkIncomingCall(exec, &callLinkInfo);
}
-void linkSlowFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
+void linkSlowFor(
+ ExecState* exec, CallLinkInfo& callLinkInfo)
{
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
- RepatchBuffer repatchBuffer(callerCodeBlock);
+ linkSlowFor(vm, callLinkInfo);
+}
+
+static void revertCall(VM* vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
+{
+ if (callLinkInfo.isDirect()) {
+ callLinkInfo.clearCodeBlock();
+ if (callLinkInfo.callType() == CallLinkInfo::DirectTailCall)
+ MacroAssembler::repatchJump(callLinkInfo.patchableJump(), callLinkInfo.slowPathStart());
+ else
+ MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), callLinkInfo.slowPathStart());
+ } else {
+ MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(
+ MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0);
+ linkSlowFor(vm, callLinkInfo, codeRef);
+ callLinkInfo.clearCallee();
+ }
+ callLinkInfo.clearSeen();
+ callLinkInfo.clearStub();
+ callLinkInfo.clearSlowStub();
+ if (callLinkInfo.isOnList())
+ callLinkInfo.remove();
+}
+
+void unlinkFor(VM& vm, CallLinkInfo& callLinkInfo)
+{
+ if (Options::dumpDisassembly())
+ dataLog("Unlinking call at ", callLinkInfo.hotPathOther(), "\n");
- linkSlowFor(repatchBuffer, vm, callLinkInfo, kind);
+ revertCall(&vm, callLinkInfo, vm.getCTIStub(linkCallThunkGenerator));
+}
+
+void linkVirtualFor(ExecState* exec, CallLinkInfo& callLinkInfo)
+{
+ CallFrame* callerFrame = exec->callerFrame();
+ VM& vm = callerFrame->vm();
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+
+ if (shouldDumpDisassemblyFor(callerCodeBlock))
+ dataLog("Linking virtual call at ", *callerCodeBlock, " ", callerFrame->codeOrigin(), "\n");
+
+ MacroAssemblerCodeRef virtualThunk = virtualThunkFor(&vm, callLinkInfo);
+ revertCall(&vm, callLinkInfo, virtualThunk);
+ callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, vm, nullptr, true));
}
-void linkClosureCall(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, Structure* structure, ExecutableBase* executable, MacroAssemblerCodePtr codePtr)
+namespace {
+struct CallToCodePtr {
+ CCallHelpers::Call call;
+ MacroAssemblerCodePtr codePtr;
+};
+} // annonymous namespace
+
+void linkPolymorphicCall(
+ ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant)
{
- ASSERT(!callLinkInfo.stub);
+ RELEASE_ASSERT(callLinkInfo.allowStubs());
- CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
- VM* vm = callerCodeBlock->vm();
+ // Currently we can't do anything for non-function callees.
+ // https://bugs.webkit.org/show_bug.cgi?id=140685
+ if (!newVariant || !newVariant.executable()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+
+ CallFrame* callerFrame = exec->callerFrame();
+ VM& vm = callerFrame->vm();
+ CodeBlock* callerCodeBlock = callerFrame->codeBlock();
+ bool isWebAssembly = isWebAssemblyToJSCallee(vm, callerFrame->callee());
+
+ // WebAssembly -> JS stubs don't have a valid CodeBlock.
+ JSCell* owner = isWebAssembly ? webAssemblyOwner(vm) : callerCodeBlock;
+ ASSERT(owner);
+
+ CallVariantList list;
+ if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub())
+ list = stub->variants();
+ else if (JSFunction* oldCallee = callLinkInfo.callee())
+ list = CallVariantList{ CallVariant(oldCallee) };
+
+ list = variantListWithVariant(list, newVariant);
+
+ // If there are any closure calls then it makes sense to treat all of them as closure calls.
+ // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG;
+ // the DFG doesn't really want to deal with a combination of closure and non-closure callees.
+ bool isClosureCall = false;
+ for (CallVariant variant : list) {
+ if (variant.isClosureCall()) {
+ list = despecifiedVariantList(list);
+ isClosureCall = true;
+ break;
+ }
+ }
- GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR);
+ if (isClosureCall)
+ callLinkInfo.setHasSeenClosure();
+
+ Vector<PolymorphicCallCase> callCases;
+
+ // Figure out what our cases are.
+ for (CallVariant variant : list) {
+ CodeBlock* codeBlock;
+ if (isWebAssembly || variant.executable()->isHostFunction())
+ codeBlock = nullptr;
+ else {
+ ExecutableBase* executable = variant.executable();
+ codeBlock = jsCast<FunctionExecutable*>(executable)->codeBlockForCall();
+ // If we cannot handle a callee, either because we don't have a CodeBlock or because arity mismatch,
+ // assume that it's better for this whole thing to be a virtual call.
+ if (!codeBlock || exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.isVarargs()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+ }
+
+ callCases.append(PolymorphicCallCase(variant, codeBlock));
+ }
- CCallHelpers stubJit(vm, callerCodeBlock);
+ // If we are over the limit, just use a normal virtual call.
+ unsigned maxPolymorphicCallVariantListSize;
+ if (isWebAssembly)
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForWebAssemblyToJS();
+ else if (callerCodeBlock->jitType() == JITCode::topTierJIT())
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier();
+ else
+ maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize();
+
+ if (list.size() > maxPolymorphicCallVariantListSize) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
+
+ GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR());
+
+ CCallHelpers stubJit(&vm, callerCodeBlock);
CCallHelpers::JumpList slowPath;
+ std::unique_ptr<CallFrameShuffler> frameShuffler;
+ if (callLinkInfo.frameShuffleData()) {
+ ASSERT(callLinkInfo.isTailCall());
+ frameShuffler = std::make_unique<CallFrameShuffler>(stubJit, *callLinkInfo.frameShuffleData());
+#if USE(JSVALUE32_64)
+ // We would have already checked that the callee is a cell, and we can
+ // use the additional register this buys us.
+ frameShuffler->assumeCalleeIsCell();
+#endif
+ frameShuffler->lockGPR(calleeGPR);
+ }
+ GPRReg comparisonValueGPR;
+
+ if (isClosureCall) {
+ GPRReg scratchGPR;
+ if (frameShuffler)
+ scratchGPR = frameShuffler->acquireGPR();
+ else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(calleeGPR);
+ // Verify that we have a function and stash the executable in scratchGPR.
+
#if USE(JSVALUE64)
- // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
- // being set. So we do this the hard way.
- GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR);
- stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch);
- slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch));
+ slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, GPRInfo::tagMaskRegister));
#else
- // We would have already checked that the callee is a cell.
+ // We would have already checked that the callee is a cell.
#endif
- slowPath.append(
- stubJit.branchPtr(
- CCallHelpers::NotEqual,
- CCallHelpers::Address(calleeGPR, JSCell::structureOffset()),
- CCallHelpers::TrustedImmPtr(structure)));
+ slowPath.append(
+ stubJit.branch8(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(calleeGPR, JSCell::typeInfoTypeOffset()),
+ CCallHelpers::TrustedImm32(JSFunctionType)));
- slowPath.append(
- stubJit.branchPtr(
- CCallHelpers::NotEqual,
+ stubJit.loadPtr(
CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()),
- CCallHelpers::TrustedImmPtr(executable)));
+ scratchGPR);
+
+ comparisonValueGPR = scratchGPR;
+ } else
+ comparisonValueGPR = calleeGPR;
+
+ Vector<int64_t> caseValues(callCases.size());
+ Vector<CallToCodePtr> calls(callCases.size());
+ std::unique_ptr<uint32_t[]> fastCounts;
+
+ if (!isWebAssembly && callerCodeBlock->jitType() != JITCode::topTierJIT())
+ fastCounts = std::make_unique<uint32_t[]>(callCases.size());
+
+ for (size_t i = 0; i < callCases.size(); ++i) {
+ if (fastCounts)
+ fastCounts[i] = 0;
+
+ CallVariant variant = callCases[i].variant();
+ int64_t newCaseValue;
+ if (isClosureCall)
+ newCaseValue = bitwise_cast<intptr_t>(variant.executable());
+ else
+ newCaseValue = bitwise_cast<intptr_t>(variant.function());
+
+ if (!ASSERT_DISABLED) {
+ for (size_t j = 0; j < i; ++j) {
+ if (caseValues[j] != newCaseValue)
+ continue;
+
+ dataLog("ERROR: Attempt to add duplicate case value.\n");
+ dataLog("Existing case values: ");
+ CommaPrinter comma;
+ for (size_t k = 0; k < i; ++k)
+ dataLog(comma, caseValues[k]);
+ dataLog("\n");
+ dataLog("Attempting to add: ", newCaseValue, "\n");
+ dataLog("Variant list: ", listDump(callCases), "\n");
+ RELEASE_ASSERT_NOT_REACHED();
+ }
+ }
+
+ caseValues[i] = newCaseValue;
+ }
- stubJit.loadPtr(
- CCallHelpers::Address(calleeGPR, JSFunction::offsetOfScopeChain()),
- GPRInfo::returnValueGPR);
+ GPRReg fastCountsBaseGPR;
+ if (frameShuffler)
+ fastCountsBaseGPR = frameShuffler->acquireGPR();
+ else {
+ fastCountsBaseGPR =
+ AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
+ }
+ stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR);
+ if (!frameShuffler && callLinkInfo.isTailCall())
+ stubJit.emitRestoreCalleeSaves();
+ BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr);
+ CCallHelpers::JumpList done;
+ while (binarySwitch.advance(stubJit)) {
+ size_t caseIndex = binarySwitch.caseIndex();
+
+ CallVariant variant = callCases[caseIndex].variant();
+
+ ASSERT(variant.executable()->hasJITCodeForCall());
+ MacroAssemblerCodePtr codePtr =
+ variant.executable()->generatedJITCodeForCall()->addressForCall(ArityCheckNotRequired);
+
+ if (fastCounts) {
+ stubJit.add32(
+ CCallHelpers::TrustedImm32(1),
+ CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
+ }
+ if (frameShuffler) {
+ CallFrameShuffler(stubJit, frameShuffler->snapshot()).prepareForTailCall();
+ calls[caseIndex].call = stubJit.nearTailCall();
+ } else if (callLinkInfo.isTailCall()) {
+ stubJit.prepareForTailCallSlow();
+ calls[caseIndex].call = stubJit.nearTailCall();
+ } else
+ calls[caseIndex].call = stubJit.nearCall();
+ calls[caseIndex].codePtr = codePtr;
+ done.append(stubJit.jump());
+ }
+
+ slowPath.link(&stubJit);
+ binarySwitch.fallThrough().link(&stubJit);
-#if USE(JSVALUE64)
- stubJit.store64(
- GPRInfo::returnValueGPR,
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain)));
+ if (frameShuffler) {
+ frameShuffler->releaseGPR(calleeGPR);
+ frameShuffler->releaseGPR(comparisonValueGPR);
+ frameShuffler->releaseGPR(fastCountsBaseGPR);
+#if USE(JSVALUE32_64)
+ frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT1, GPRInfo::regT0));
#else
- stubJit.storePtr(
- GPRInfo::returnValueGPR,
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(
- CCallHelpers::TrustedImm32(JSValue::CellTag),
- CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
#endif
-
- AssemblyHelpers::Call call = stubJit.nearCall();
- AssemblyHelpers::Jump done = stubJit.jump();
-
- slowPath.link(&stubJit);
- stubJit.move(calleeGPR, GPRInfo::regT0);
+ frameShuffler->prepareForSlowPath();
+ } else {
+ stubJit.move(calleeGPR, GPRInfo::regT0);
#if USE(JSVALUE32_64)
- stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+ stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
#endif
- stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation.executableAddress()), GPRInfo::nonArgGPR2);
- stubJit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR2);
- AssemblyHelpers::Jump slow = stubJit.jump();
-
- LinkBuffer patchBuffer(*vm, &stubJit, callerCodeBlock);
-
- patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
- patchBuffer.link(done, callLinkInfo.callReturnLocation.labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(virtualCallThunkGenerator).code()));
-
- RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
- FINALIZE_DFG_CODE(
- patchBuffer,
- ("DFG closure call stub for %s, return point %p, target %p (%s)",
- toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation.labelAtOffset(0).executableAddress(),
- codePtr.executableAddress(), toCString(pointerDump(calleeCodeBlock)).data())),
- *vm, callerCodeBlock->ownerExecutable(), structure, executable, callLinkInfo.codeOrigin));
+ }
+ stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2);
+ stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4);
- RepatchBuffer repatchBuffer(callerCodeBlock);
+ stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4);
+ AssemblyHelpers::Jump slow = stubJit.jump();
+
+ LinkBuffer patchBuffer(vm, stubJit, owner, JITCompilationCanFail);
+ if (patchBuffer.didFailToAllocate()) {
+ linkVirtualFor(exec, callLinkInfo);
+ return;
+ }
- repatchBuffer.replaceWithJump(
- RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin),
+ RELEASE_ASSERT(callCases.size() == calls.size());
+ for (CallToCodePtr callToCodePtr : calls) {
+ // Tail call special-casing ensures proper linking on ARM Thumb2, where a tail call jumps to an address
+ // with a non-decorated bottom bit but a normal call calls an address with a decorated bottom bit.
+ bool isTailCall = callToCodePtr.call.isFlagSet(CCallHelpers::Call::Tail);
+ patchBuffer.link(
+ callToCodePtr.call, FunctionPtr(isTailCall ? callToCodePtr.codePtr.dataLocation() : callToCodePtr.codePtr.executableAddress()));
+ }
+ if (isWebAssembly || JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
+ patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0));
+ else
+ patchBuffer.link(done, callLinkInfo.hotPathOther().labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(vm.getCTIStub(linkPolymorphicCallThunkGenerator).code()));
+
+ auto stubRoutine = adoptRef(*new PolymorphicCallStubRoutine(
+ FINALIZE_CODE_FOR(
+ callerCodeBlock, patchBuffer,
+ ("Polymorphic call stub for %s, return point %p, targets %s",
+ isWebAssembly ? "WebAssembly" : toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(),
+ toCString(listDump(callCases)).data())),
+ vm, owner, exec->callerFrame(), callLinkInfo, callCases,
+ WTFMove(fastCounts)));
+
+ MacroAssembler::replaceWithJump(
+ MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
CodeLocationLabel(stubRoutine->code().code()));
- linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForCall);
+ // The original slow path is unreachable on 64-bits, but still
+ // reachable on 32-bits since a non-cell callee will always
+ // trigger the slow path
+ linkSlowFor(&vm, callLinkInfo);
- callLinkInfo.stub = stubRoutine.release();
+ // If there had been a previous stub routine, that one will die as soon as the GC runs and sees
+ // that it's no longer on stack.
+ callLinkInfo.setStub(WTFMove(stubRoutine));
- ASSERT(!calleeCodeBlock || calleeCodeBlock->isIncomingCallAlreadyLinked(&callLinkInfo));
+ // The call link info no longer has a call cache apart from the jump to the polymorphic call
+ // stub.
+ if (callLinkInfo.isOnList())
+ callLinkInfo.remove();
}
-void resetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void resetGetByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo, GetByIDKind kind)
{
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdOptimize);
- CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
- repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
- RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureOffset()),
- reinterpret_cast<void*>(unusedPointer));
- }
- repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
-#if USE(JSVALUE64)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
-#endif
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), appropriateOptimizingGetByIdFunction(kind));
+ InlineAccess::rewireStubAsJump(*codeBlock->vm(), stubInfo, stubInfo.slowPathStartLocation());
}
-void resetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void resetPutByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
{
- V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(repatchBuffer, stubInfo.callReturnLocation).executableAddress());
+ V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(codeBlock, stubInfo.slowPathCallLocation()).executableAddress());
V_JITOperation_ESsiJJI optimizedFunction;
- if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictBuildList)
+ if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictOptimize)
optimizedFunction = operationPutByIdStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictBuildList)
+ else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictOptimize)
optimizedFunction = operationPutByIdNonStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictBuildList)
+ else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictOptimize)
optimizedFunction = operationPutByIdDirectStrictOptimize;
else {
- ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictBuildList);
+ ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictOptimize);
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
- repatchCall(repatchBuffer, stubInfo.callReturnLocation, optimizedFunction);
- CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
- repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
- RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureOffset()),
- reinterpret_cast<void*>(unusedPointer));
- }
- repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
-#if USE(JSVALUE64)
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
-#endif
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ ftlThunkAwareRepatchCall(codeBlock, stubInfo.slowPathCallLocation(), optimizedFunction);
+ InlineAccess::rewireStubAsJump(*codeBlock->vm(), stubInfo, stubInfo.slowPathStartLocation());
}
-void resetIn(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
+void resetIn(CodeBlock*, StructureStubInfo& stubInfo)
{
- repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ MacroAssembler::repatchJump(stubInfo.patchableJumpForIn(), stubInfo.slowPathStartLocation());
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/Repatch.h b/Source/JavaScriptCore/jit/Repatch.h
index faa787613..52a060ce4 100644
--- a/Source/JavaScriptCore/jit/Repatch.h
+++ b/Source/JavaScriptCore/jit/Repatch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,47 +23,39 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef Repatch_h
-#define Repatch_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
#include "CCallHelpers.h"
+#include "CallVariant.h"
#include "JITOperations.h"
+#include "PutKind.h"
namespace JSC {
-void repatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
+enum class GetByIDKind {
+ Normal,
+ Try
+};
+
+void repatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&, GetByIDKind);
void buildGetByIDList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
void buildGetByIDProtoList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
-void repatchPutByID(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
-void buildPutByIdList(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void repatchPutByID(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void buildPutByIdList(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
void repatchIn(ExecState*, JSCell*, const Identifier&, bool wasFound, const PropertySlot&, StructureStubInfo&);
-void linkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr, CodeSpecializationKind);
-void linkSlowFor(ExecState*, CallLinkInfo&, CodeSpecializationKind);
-void linkClosureCall(ExecState*, CallLinkInfo&, CodeBlock*, Structure*, ExecutableBase*, MacroAssemblerCodePtr);
-void resetGetByID(RepatchBuffer&, StructureStubInfo&);
-void resetPutByID(RepatchBuffer&, StructureStubInfo&);
-void resetIn(RepatchBuffer&, StructureStubInfo&);
-
-} // namespace JSC
-
-#else // ENABLE(JIT)
-
-#include <wtf/Assertions.h>
-
-namespace JSC {
-
-class RepatchBuffer;
-struct StructureStubInfo;
-
-inline NO_RETURN_DUE_TO_CRASH void resetGetByID(RepatchBuffer&, StructureStubInfo&) { RELEASE_ASSERT_NOT_REACHED(); }
-inline NO_RETURN_DUE_TO_CRASH void resetPutByID(RepatchBuffer&, StructureStubInfo&) { RELEASE_ASSERT_NOT_REACHED(); }
-inline NO_RETURN void resetIn(RepatchBuffer&, StructureStubInfo&) { RELEASE_ASSERT_NOT_REACHED(); }
+void linkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr);
+void linkDirectFor(ExecState*, CallLinkInfo&, CodeBlock*, MacroAssemblerCodePtr);
+void linkSlowFor(ExecState*, CallLinkInfo&);
+void unlinkFor(VM&, CallLinkInfo&);
+void linkVirtualFor(ExecState*, CallLinkInfo&);
+void linkPolymorphicCall(ExecState*, CallLinkInfo&, CallVariant);
+void resetGetByID(CodeBlock*, StructureStubInfo&, GetByIDKind);
+void resetPutByID(CodeBlock*, StructureStubInfo&);
+void resetIn(CodeBlock*, StructureStubInfo&);
+void ftlThunkAwareRepatchCall(CodeBlock*, CodeLocationCall, FunctionPtr newCalleeFunction);
} // namespace JSC
#endif // ENABLE(JIT)
-#endif // Repatch_h
diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp
new file mode 100644
index 000000000..93d670d6c
--- /dev/null
+++ b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "ScratchRegisterAllocator.h"
+
+#if ENABLE(JIT)
+
+#include "JSCInlines.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "VM.h"
+
+namespace JSC {
+
+ScratchRegisterAllocator::ScratchRegisterAllocator(const RegisterSet& usedRegisters)
+ : m_usedRegisters(usedRegisters)
+ , m_numberOfReusedRegisters(0)
+{
+}
+
+ScratchRegisterAllocator::~ScratchRegisterAllocator() { }
+
+void ScratchRegisterAllocator::lock(GPRReg reg)
+{
+ if (reg == InvalidGPRReg)
+ return;
+ unsigned index = GPRInfo::toIndex(reg);
+ if (index == GPRInfo::InvalidIndex)
+ return;
+ m_lockedRegisters.setGPRByIndex(index);
+}
+
+void ScratchRegisterAllocator::lock(FPRReg reg)
+{
+ if (reg == InvalidFPRReg)
+ return;
+ unsigned index = FPRInfo::toIndex(reg);
+ if (index == FPRInfo::InvalidIndex)
+ return;
+ m_lockedRegisters.setFPRByIndex(index);
+}
+
+void ScratchRegisterAllocator::lock(JSValueRegs regs)
+{
+ lock(regs.tagGPR());
+ lock(regs.payloadGPR());
+}
+
+template<typename BankInfo>
+typename BankInfo::RegisterType ScratchRegisterAllocator::allocateScratch()
+{
+ // First try to allocate a register that is totally free.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg)
+ && !m_usedRegisters.get(reg)
+ && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ return reg;
+ }
+ }
+
+ // Since that failed, try to allocate a register that is not yet
+ // locked or used for scratch.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ m_numberOfReusedRegisters++;
+ return reg;
+ }
+ }
+
+ // We failed.
+ CRASH();
+ // Make some silly compilers happy.
+ return static_cast<typename BankInfo::RegisterType>(-1);
+}
+
+GPRReg ScratchRegisterAllocator::allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
+FPRReg ScratchRegisterAllocator::allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
+
+ScratchRegisterAllocator::PreservedState ScratchRegisterAllocator::preserveReusedRegistersByPushing(MacroAssembler& jit, ExtraStackSpace extraStackSpace)
+{
+ if (!didReuseRegisters())
+ return PreservedState(0, extraStackSpace);
+
+ RegisterSet registersToSpill;
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ FPRReg reg = FPRInfo::toRegister(i);
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToSpill.set(reg);
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+ GPRReg reg = GPRInfo::toRegister(i);
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToSpill.set(reg);
+ }
+
+ unsigned extraStackBytesAtTopOfStack = extraStackSpace == ExtraStackSpace::SpaceForCCall ? maxFrameExtentForSlowPathCall : 0;
+ unsigned stackAdjustmentSize = ScratchRegisterAllocator::preserveRegistersToStackForCall(jit, registersToSpill, extraStackBytesAtTopOfStack);
+
+ return PreservedState(stackAdjustmentSize, extraStackSpace);
+}
+
+void ScratchRegisterAllocator::restoreReusedRegistersByPopping(MacroAssembler& jit, const ScratchRegisterAllocator::PreservedState& preservedState)
+{
+ RELEASE_ASSERT(preservedState);
+ if (!didReuseRegisters())
+ return;
+
+ RegisterSet registersToFill;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ GPRReg reg = GPRInfo::toRegister(i);
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToFill.set(reg);
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ FPRReg reg = FPRInfo::toRegister(i);
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
+ registersToFill.set(reg);
+ }
+
+ unsigned extraStackBytesAtTopOfStack =
+ preservedState.extraStackSpaceRequirement == ExtraStackSpace::SpaceForCCall ? maxFrameExtentForSlowPathCall : 0;
+ RegisterSet dontRestore; // Empty set. We want to restore everything.
+ ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToFill, dontRestore,
+ preservedState.numberOfBytesPreserved, extraStackBytesAtTopOfStack);
+}
+
+RegisterSet ScratchRegisterAllocator::usedRegistersForCall() const
+{
+ RegisterSet result = m_usedRegisters;
+ result.exclude(RegisterSet::registersToNotSaveForJSCall());
+ return result;
+}
+
+unsigned ScratchRegisterAllocator::desiredScratchBufferSizeForCall() const
+{
+ return usedRegistersForCall().numberOfSetRegisters() * sizeof(JSValue);
+}
+
+void ScratchRegisterAllocator::preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
+{
+ RegisterSet usedRegisters = usedRegistersForCall();
+ if (!usedRegisters.numberOfSetRegisters())
+ return;
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storePtr(reg, static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count);
+ count++;
+ }
+ if (GPRInfo::toIndex(reg) != GPRInfo::InvalidIndex
+ && scratchGPR == InvalidGPRReg
+ && !m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg))
+ scratchGPR = reg;
+ }
+ RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count), scratchGPR);
+ count++;
+ jit.storeDouble(reg, scratchGPR);
+ }
+ }
+ RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSizeForCall());
+
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
+}
+
+void ScratchRegisterAllocator::restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
+{
+ RegisterSet usedRegisters = usedRegistersForCall();
+ if (!usedRegisters.numberOfSetRegisters())
+ return;
+
+ if (scratchGPR == InvalidGPRReg) {
+ // Find a scratch register.
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
+ continue;
+ scratchGPR = GPRInfo::toRegister(i);
+ break;
+ }
+ }
+ RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
+
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
+
+ // Restore double registers first.
+ unsigned count = usedRegisters.numberOfSetGPRs();
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
+ jit.loadDouble(scratchGPR, reg);
+ }
+ }
+
+ count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg))
+ jit.loadPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), reg);
+ }
+}
+
+unsigned ScratchRegisterAllocator::preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraBytesAtTopOfStack)
+{
+ RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
+ if (!usedRegisters.numberOfSetRegisters())
+ return 0;
+
+ unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
+ stackOffset += extraBytesAtTopOfStack;
+ stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
+ jit.subPtr(
+ MacroAssembler::TrustedImm32(stackOffset),
+ MacroAssembler::stackPointerRegister);
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storePtr(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
+ count++;
+ }
+ }
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ jit.storeDouble(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
+ count++;
+ }
+ }
+
+ RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
+
+ return stackOffset;
+}
+
+void ScratchRegisterAllocator::restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraBytesAtTopOfStack)
+{
+ RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
+ if (!usedRegisters.numberOfSetRegisters()) {
+ RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == 0);
+ return;
+ }
+
+ unsigned count = 0;
+ for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ if (!ignore.get(reg))
+ jit.loadPtr(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
+ count++;
+ }
+ }
+ for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
+ if (usedRegisters.get(reg)) {
+ if (!ignore.get(reg))
+ jit.loadDouble(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
+ count++;
+ }
+ }
+
+ unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
+ stackOffset += extraBytesAtTopOfStack;
+ stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
+
+ RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
+ RELEASE_ASSERT(stackOffset == numberOfStackBytesUsedForRegisterPreservation);
+
+ jit.addPtr(
+ MacroAssembler::TrustedImm32(stackOffset),
+ MacroAssembler::stackPointerRegister);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
index 1967226c5..5805c2f29 100644
--- a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
+++ b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,76 +23,35 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ScratchRegisterAllocator_h
-#define ScratchRegisterAllocator_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
#include "MacroAssembler.h"
+#include "RegisterSet.h"
#include "TempRegisterSet.h"
namespace JSC {
+struct ScratchBuffer;
+
// This class provides a low-level register allocator for use in stubs.
class ScratchRegisterAllocator {
public:
- ScratchRegisterAllocator(const TempRegisterSet& usedRegisters)
- : m_usedRegisters(usedRegisters)
- , m_numberOfReusedRegisters(0)
- {
- }
+ ScratchRegisterAllocator() { }
+ ScratchRegisterAllocator(const RegisterSet& usedRegisters);
+ ~ScratchRegisterAllocator();
- void lock(GPRReg reg)
- {
- unsigned index = GPRInfo::toIndex(reg);
- if (index == GPRInfo::InvalidIndex)
- return;
- m_lockedRegisters.setGPRByIndex(index);
- }
- void lock(FPRReg reg)
- {
- unsigned index = FPRInfo::toIndex(reg);
- if (index == FPRInfo::InvalidIndex)
- return;
- m_lockedRegisters.setFPRByIndex(index);
- }
+ void lock(GPRReg);
+ void lock(FPRReg);
+ void lock(JSValueRegs);
template<typename BankInfo>
- typename BankInfo::RegisterType allocateScratch()
- {
- // First try to allocate a register that is totally free.
- for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
- typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
- if (!m_lockedRegisters.get(reg)
- && !m_usedRegisters.get(reg)
- && !m_scratchRegisters.get(reg)) {
- m_scratchRegisters.set(reg);
- return reg;
- }
- }
-
- // Since that failed, try to allocate a register that is not yet
- // locked or used for scratch.
- for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
- typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
- if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) {
- m_scratchRegisters.set(reg);
- m_numberOfReusedRegisters++;
- return reg;
- }
- }
-
- // We failed.
- CRASH();
- // Make some silly compilers happy.
- return static_cast<typename BankInfo::RegisterType>(-1);
- }
+ typename BankInfo::RegisterType allocateScratch();
- GPRReg allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
- FPRReg allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
+ GPRReg allocateScratchGPR();
+ FPRReg allocateScratchFPR();
bool didReuseRegisters() const
{
@@ -103,105 +62,43 @@ public:
{
return m_numberOfReusedRegisters;
}
+
+ RegisterSet usedRegisters() const { return m_usedRegisters; }
- void preserveReusedRegistersByPushing(MacroAssembler& jit)
- {
- if (!didReuseRegisters())
- return;
-
- for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
- if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i))
- jit.pushToSave(FPRInfo::toRegister(i));
- }
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
- if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
- jit.pushToSave(GPRInfo::toRegister(i));
- }
- }
-
- void restoreReusedRegistersByPopping(MacroAssembler& jit)
- {
- if (!didReuseRegisters())
- return;
-
- for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
- jit.popToRestore(GPRInfo::toRegister(i));
- }
- for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
- if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i))
- jit.popToRestore(FPRInfo::toRegister(i));
- }
- }
-
- unsigned desiredScratchBufferSize() const { return m_usedRegisters.numberOfSetRegisters() * sizeof(JSValue); }
+ enum class ExtraStackSpace { SpaceForCCall, NoExtraSpace };
+
+ struct PreservedState {
+ PreservedState()
+ : numberOfBytesPreserved(std::numeric_limits<unsigned>::max())
+ , extraStackSpaceRequirement(ExtraStackSpace::SpaceForCCall)
+ { }
+
+ PreservedState(unsigned numberOfBytes, ExtraStackSpace extraStackSpace)
+ : numberOfBytesPreserved(numberOfBytes)
+ , extraStackSpaceRequirement(extraStackSpace)
+ { }
+
+ explicit operator bool() const { return numberOfBytesPreserved != std::numeric_limits<unsigned>::max(); }
+
+ unsigned numberOfBytesPreserved;
+ ExtraStackSpace extraStackSpaceRequirement;
+ };
+
+ PreservedState preserveReusedRegistersByPushing(MacroAssembler& jit, ExtraStackSpace);
+ void restoreReusedRegistersByPopping(MacroAssembler& jit, const PreservedState&);
- void preserveUsedRegistersToScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
- {
- unsigned count = 0;
- for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getGPRByIndex(i)) {
-#if USE(JSVALUE64)
- jit.store64(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
-#else
- jit.store32(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
-#endif
- }
- if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i))
- scratchGPR = GPRInfo::toRegister(i);
- }
- RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
- for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getFPRByIndex(i)) {
- jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
- jit.storeDouble(FPRInfo::toRegister(i), scratchGPR);
- }
- }
- RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSize());
-
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
- jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
- }
+ RegisterSet usedRegistersForCall() const;
- void restoreUsedRegistersFromScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
- {
- if (scratchGPR == InvalidGPRReg) {
- // Find a scratch register.
- for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
- continue;
- scratchGPR = GPRInfo::toRegister(i);
- break;
- }
- }
- RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
-
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
- jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
-
- // Restore double registers first.
- unsigned count = m_usedRegisters.numberOfSetGPRs();
- for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getFPRByIndex(i)) {
- jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
- jit.loadDouble(scratchGPR, FPRInfo::toRegister(i));
- }
- }
-
- count = 0;
- for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_usedRegisters.getGPRByIndex(i)) {
-#if USE(JSVALUE64)
- jit.load64(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
-#else
- jit.load32(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
-#endif
- }
- }
- }
+ unsigned desiredScratchBufferSizeForCall() const;
+ void preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
+ void restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
+
+ static unsigned preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraPaddingInBytes);
+ static void restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraPaddingInBytes);
+
private:
- TempRegisterSet m_usedRegisters;
+ RegisterSet m_usedRegisters;
TempRegisterSet m_lockedRegisters;
TempRegisterSet m_scratchRegisters;
unsigned m_numberOfReusedRegisters;
@@ -210,6 +107,3 @@ private:
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // ScratchRegisterAllocator_h
-
diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp b/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
new file mode 100644
index 000000000..3807ec9a3
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2015-2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "SetupVarargsFrame.h"
+
+#if ENABLE(JIT)
+
+#include "Interpreter.h"
+#include "JSCInlines.h"
+#include "StackAlignment.h"
+
+namespace JSC {
+
+void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR)
+{
+ jit.move(numUsedSlotsGPR, resultGPR);
+ // We really want to make sure the size of the new call frame is a multiple of
+ // stackAlignmentRegisters(), however it is easier to accomplish this by
+ // rounding numUsedSlotsGPR to the next multiple of stackAlignmentRegisters().
+ // Together with the rounding below, we will assure that the new call frame is
+ // located on a stackAlignmentRegisters() boundary and a multiple of
+ // stackAlignmentRegisters() in size.
+ jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
+ jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
+
+ jit.addPtr(lengthGPR, resultGPR);
+ jit.addPtr(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters + (lengthIncludesThis ? 0 : 1)), resultGPR);
+
+ // resultGPR now has the required frame size in Register units
+ // Round resultGPR to next multiple of stackAlignmentRegisters()
+ jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
+ jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
+
+ // Now resultGPR has the right stack frame offset in Register units.
+ jit.negPtr(resultGPR);
+ jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR);
+ jit.addPtr(GPRInfo::callFrameRegister, resultGPR);
+}
+
+static void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
+{
+ CCallHelpers::JumpList end;
+
+ if (argCountRecovery.isConstant()) {
+ // FIXME: We could constant-fold a lot of the computation below in this case.
+ // https://bugs.webkit.org/show_bug.cgi?id=141486
+ jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
+ } else
+ jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
+ if (firstVarArgOffset) {
+ CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
+ jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
+ CCallHelpers::Jump endVarArgs = jit.jump();
+ sufficientArguments.link(&jit);
+ jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
+ endVarArgs.link(&jit);
+ }
+ slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(JSC::maxArguments + 1)));
+
+ emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);
+
+ slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfSoftStackLimit()), scratchGPR2));
+
+ // Before touching stack values, we should update the stack pointer to protect them from signal stack.
+ jit.addPtr(CCallHelpers::TrustedImm32(sizeof(CallerFrameAndPC)), scratchGPR2, CCallHelpers::stackPointerRegister);
+
+ // Initialize ArgumentCount.
+ jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, CallFrameSlot::argumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
+
+ // Copy arguments.
+ jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
+ CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
+ // scratchGPR1: argumentCount
+
+ CCallHelpers::Label copyLoop = jit.label();
+ int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
+#if USE(JSVALUE64)
+ jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
+ jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+#else // USE(JSVALUE64), so this begins the 32-bit case
+ jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
+ jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
+ jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
+ jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
+#endif // USE(JSVALUE64), end of 32-bit case
+ jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
+
+ done.link(&jit);
+}
+
+void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame* inlineCallFrame, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
+{
+ ValueRecovery argumentCountRecovery;
+ VirtualRegister firstArgumentReg;
+ if (inlineCallFrame) {
+ if (inlineCallFrame->isVarargs()) {
+ argumentCountRecovery = ValueRecovery::displacedInJSStack(
+ inlineCallFrame->argumentCountRegister, DataFormatInt32);
+ } else {
+ argumentCountRecovery = ValueRecovery::constant(
+ jsNumber(inlineCallFrame->arguments.size()));
+ }
+ if (inlineCallFrame->arguments.size() > 1)
+ firstArgumentReg = inlineCallFrame->arguments[1].virtualRegister();
+ else
+ firstArgumentReg = VirtualRegister(0);
+ } else {
+ argumentCountRecovery = ValueRecovery::displacedInJSStack(
+ VirtualRegister(CallFrameSlot::argumentCount), DataFormatInt32);
+ firstArgumentReg = VirtualRegister(CallFrame::argumentOffset(0));
+ }
+ emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, argumentCountRecovery, firstArgumentReg, firstVarArgOffset, slowCase);
+}
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
+
diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.h b/Source/JavaScriptCore/jit/SetupVarargsFrame.h
new file mode 100644
index 000000000..8639a2a4b
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SetupVarargsFrame.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "CCallHelpers.h"
+#include "VirtualRegister.h"
+
+namespace JSC {
+
+void emitSetVarargsFrame(CCallHelpers&, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR);
+
+// Assumes that SP refers to the last in-use stack location, and after this returns SP will point to
+// the newly created frame plus the native header. scratchGPR2 may be the same as numUsedSlotsGPR.
+void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame*, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/SlowPathCall.h b/Source/JavaScriptCore/jit/SlowPathCall.h
index f0aa28e83..1dfc21a42 100644
--- a/Source/JavaScriptCore/jit/SlowPathCall.h
+++ b/Source/JavaScriptCore/jit/SlowPathCall.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SlowPathCall_h
-#define SlowPathCall_h
+#pragma once
#include "CommonSlowPaths.h"
#include "MacroAssemblerCodeRef.h"
@@ -45,7 +44,7 @@ public:
JIT::Call call()
{
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
#endif
m_jit->updateTopCallFrame();
@@ -73,7 +72,7 @@ public:
#endif
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != (unsigned)-1)
+ if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
#endif
@@ -90,5 +89,3 @@ private:
} // namespace JS
#endif // ENABLE(JIT)
-
-#endif // SlowPathCall_h
diff --git a/Source/JavaScriptCore/jit/SnippetOperand.h b/Source/JavaScriptCore/jit/SnippetOperand.h
new file mode 100644
index 000000000..5da3ec018
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SnippetOperand.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2015 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#if ENABLE(JIT)
+
+#include "ResultType.h"
+
+namespace JSC {
+
+class SnippetOperand {
+ enum ConstOrVarType {
+ Variable,
+ ConstInt32,
+ ConstDouble
+ };
+
+public:
+ SnippetOperand()
+ : m_resultType(ResultType::unknownType())
+ { }
+
+ SnippetOperand(ResultType resultType)
+ : m_resultType(resultType)
+ { }
+
+ bool mightBeNumber() const { return m_resultType.mightBeNumber(); }
+ bool definitelyIsNumber() const { return m_resultType.definitelyIsNumber(); }
+
+ bool isConst() const { return m_type != Variable; }
+ bool isConstInt32() const { return m_type == ConstInt32; }
+ bool isConstDouble() const { return m_type == ConstDouble; }
+ bool isPositiveConstInt32() const { return isConstInt32() && asConstInt32() > 0; }
+
+ int64_t asRawBits() const { return m_val.rawBits; }
+
+ int32_t asConstInt32() const
+ {
+ ASSERT(m_type == ConstInt32);
+ return m_val.int32Val;
+ }
+
+ double asConstDouble() const
+ {
+ ASSERT(m_type == ConstDouble);
+ return m_val.doubleVal;
+ }
+
+ double asConstNumber() const
+ {
+ if (isConstInt32())
+ return asConstInt32();
+ ASSERT(isConstDouble());
+ return asConstDouble();
+ }
+
+ void setConstInt32(int32_t value)
+ {
+ m_type = ConstInt32;
+ m_val.int32Val = value;
+ }
+
+ void setConstDouble(double value)
+ {
+ m_type = ConstDouble;
+ m_val.doubleVal = value;
+ }
+
+private:
+ ResultType m_resultType;
+ ConstOrVarType m_type { Variable };
+ union {
+ int32_t int32Val;
+ double doubleVal;
+ int64_t rawBits;
+ } m_val;
+};
+
+} // namespace JSC
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 6ec1e71a7..05f41f4dc 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -23,14 +23,13 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef SpecializedThunkJIT_h
-#define SpecializedThunkJIT_h
+#pragma once
#if ENABLE(JIT)
-#include "Executable.h"
+#include "JIT.h"
+#include "JITInlines.h"
#include "JSInterfaceJIT.h"
-#include "JSStack.h"
#include "LinkBuffer.h"
namespace JSC {
@@ -41,13 +40,17 @@ namespace JSC {
SpecializedThunkJIT(VM* vm, int expectedArgCount)
: JSInterfaceJIT(vm)
{
+ emitFunctionPrologue();
+ emitSaveThenMaterializeTagRegisters();
// Check that we have the expected number of arguments
- m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
+ m_failures.append(branch32(NotEqual, payloadFor(CallFrameSlot::argumentCount), TrustedImm32(expectedArgCount + 1)));
}
explicit SpecializedThunkJIT(VM* vm)
: JSInterfaceJIT(vm)
{
+ emitFunctionPrologue();
+ emitSaveThenMaterializeTagRegisters();
}
void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
@@ -65,14 +68,18 @@ namespace JSC {
void loadJSStringArgument(VM& vm, int argument, RegisterID dst)
{
loadCellArgument(argument, dst);
- m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::structureOffset()), TrustedImmPtr(vm.stringStructure.get())));
+ m_failures.append(branchStructure(NotEqual,
+ Address(dst, JSCell::structureIDOffset()),
+ vm.stringStructure.get()));
}
void loadArgumentWithSpecificClass(const ClassInfo* classInfo, int argument, RegisterID dst, RegisterID scratch)
{
loadCellArgument(argument, dst);
- loadPtr(Address(dst, JSCell::structureOffset()), scratch);
+ emitLoadStructure(dst, scratch, dst);
appendFailure(branchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfo)));
+ // We have to reload the argument since emitLoadStructure clobbered it.
+ loadCellArgument(argument, dst);
}
void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
@@ -97,7 +104,9 @@ namespace JSC {
{
if (src != regT0)
move(src, regT0);
- loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
+
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
#else
@@ -105,7 +114,8 @@ namespace JSC {
{
ASSERT_UNUSED(payload, payload == regT0);
ASSERT_UNUSED(tag, tag == regT1);
- loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
#endif
@@ -121,14 +131,7 @@ namespace JSC {
move(tagTypeNumberRegister, regT0);
done.link(this);
#else
-#if !CPU(X86)
- // The src register is not clobbered by moveDoubleToInts with ARM, MIPS and SH4 macro assemblers, so let's use it.
moveDoubleToInts(src, regT0, regT1);
-#else
- storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
- loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
-#endif
Jump lowNonZero = branchTestPtr(NonZero, regT1);
Jump highNonZero = branchTestPtr(NonZero, regT0);
move(TrustedImm32(0), regT0);
@@ -136,7 +139,8 @@ namespace JSC {
lowNonZero.link(this);
highNonZero.link(this);
#endif
- loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
@@ -145,7 +149,8 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsInt32();
- loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
@@ -154,13 +159,14 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsJSCell();
- loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
+ emitRestoreSavedTagRegisters();
+ emitFunctionEpilogue();
ret();
}
MacroAssemblerCodeRef finalize(MacroAssemblerCodePtr fallback, const char* thunkKind)
{
- LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
for (unsigned i = 0; i < m_calls.size(); i++)
patchBuffer.link(m_calls[i].first, m_calls[i].second);
@@ -184,7 +190,6 @@ namespace JSC {
}
private:
-
void tagReturnAsInt32()
{
#if USE(JSVALUE64)
@@ -208,5 +213,3 @@ namespace JSC {
}
#endif // ENABLE(JIT)
-
-#endif // SpecializedThunkJIT_h
diff --git a/Source/JavaScriptCore/jit/SpillRegistersMode.h b/Source/JavaScriptCore/jit/SpillRegistersMode.h
new file mode 100644
index 000000000..4f95375a3
--- /dev/null
+++ b/Source/JavaScriptCore/jit/SpillRegistersMode.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum SpillRegistersMode { NeedToSpill, DontSpill };
+
+}
diff --git a/Source/JavaScriptCore/jit/TagRegistersMode.cpp b/Source/JavaScriptCore/jit/TagRegistersMode.cpp
new file mode 100644
index 000000000..deb691318
--- /dev/null
+++ b/Source/JavaScriptCore/jit/TagRegistersMode.cpp
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+#include "TagRegistersMode.h"
+
+#include <wtf/PrintStream.h>
+
+namespace WTF {
+
+using namespace JSC;
+
+void printInternal(PrintStream& out, TagRegistersMode mode)
+{
+ switch (mode) {
+ case DoNotHaveTagRegisters:
+ out.print("DoNotHaveTagRegisters");
+ return;
+ case HaveTagRegisters:
+ out.print("HaveTagRegisters");
+ return;
+ }
+
+ RELEASE_ASSERT_NOT_REACHED();
+}
+
+} // namespace WTF
+
diff --git a/Source/JavaScriptCore/jit/TagRegistersMode.h b/Source/JavaScriptCore/jit/TagRegistersMode.h
new file mode 100644
index 000000000..b46e78032
--- /dev/null
+++ b/Source/JavaScriptCore/jit/TagRegistersMode.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2016 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+namespace JSC {
+
+enum TagRegistersMode {
+ DoNotHaveTagRegisters,
+ HaveTagRegisters
+};
+
+} // namespace JSC
+
+namespace WTF {
+
+class PrintStream;
+void printInternal(PrintStream&, JSC::TagRegistersMode);
+
+} // namespace WTF
diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.cpp b/Source/JavaScriptCore/jit/TempRegisterSet.cpp
index 9d80bbc57..9c2e73d43 100644
--- a/Source/JavaScriptCore/jit/TempRegisterSet.cpp
+++ b/Source/JavaScriptCore/jit/TempRegisterSet.cpp
@@ -28,12 +28,15 @@
#if ENABLE(JIT)
+#include "JSCInlines.h"
#include "RegisterSet.h"
namespace JSC {
TempRegisterSet::TempRegisterSet(const RegisterSet& other)
{
+ clearAll();
+
for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
GPRReg reg = GPRInfo::toRegister(i);
if (other.get(reg))
diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.h b/Source/JavaScriptCore/jit/TempRegisterSet.h
index 0915e796a..9983229ef 100644
--- a/Source/JavaScriptCore/jit/TempRegisterSet.h
+++ b/Source/JavaScriptCore/jit/TempRegisterSet.h
@@ -23,10 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef TempRegisterSet_h
-#define TempRegisterSet_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
@@ -41,8 +38,7 @@ class TempRegisterSet {
public:
TempRegisterSet()
{
- for (unsigned i = numberOfBytesInTempRegisterSet; i--;)
- m_set[i] = 0;
+ clearAll();
}
TempRegisterSet(const RegisterSet&);
@@ -118,6 +114,16 @@ public:
return getBit(GPRInfo::numberOfRegisters + index);
}
+ // Return the index'th free FPR.
+ FPRReg getFreeFPR(unsigned index = 0) const
+ {
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (!getFPRByIndex(i) && !index--)
+ return FPRInfo::toRegister(i);
+ }
+ return InvalidFPRReg;
+ }
+
template<typename BankInfo>
void setByIndex(unsigned index)
{
@@ -164,6 +170,12 @@ public:
}
private:
+ void clearAll()
+ {
+ for (unsigned i = numberOfBytesInTempRegisterSet; i--;)
+ m_set[i] = 0;
+ }
+
void setBit(unsigned i)
{
ASSERT(i < totalNumberOfRegisters);
@@ -205,6 +217,3 @@ struct TempRegisterSet { };
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // TempRegisterSet_h
-
diff --git a/Source/JavaScriptCore/jit/ThunkGenerator.h b/Source/JavaScriptCore/jit/ThunkGenerator.h
index a9d7e04ee..d38ec2334 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerator.h
+++ b/Source/JavaScriptCore/jit/ThunkGenerator.h
@@ -23,10 +23,7 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ThunkGenerator_h
-#define ThunkGenerator_h
-
-#include <wtf/Platform.h>
+#pragma once
#if ENABLE(JIT)
@@ -39,6 +36,3 @@ typedef MacroAssemblerCodeRef (*ThunkGenerator)(VM*);
} // namespace JSC
#endif // ENABLE(JIT)
-
-#endif // ThunkGenerator_h
-
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index f8f5cbaf5..9a65506b3 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2012-2014, 2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,12 +27,18 @@
#include "ThunkGenerators.h"
#include "CodeBlock.h"
+#include "DFGSpeculativeJIT.h"
+#include "JITExceptions.h"
#include "JITOperations.h"
#include "JSArray.h"
-#include "JSArrayIterator.h"
-#include "JSStack.h"
-#include "Operations.h"
+#include "JSBoundFunction.h"
+#include "MathCommon.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "JSCInlines.h"
+#include "JSWebAssemblyInstance.h"
+#include "JSWebAssemblyRuntimeError.h"
#include "SpecializedThunkJIT.h"
+#include "WasmExceptionType.h"
#include <wtf/InlineASM.h>
#include <wtf/StringPrintStream.h>
#include <wtf/text/StringImpl.h>
@@ -43,17 +49,14 @@ namespace JSC {
inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
{
-#if !ASSERT_DISABLED
+ if (ASSERT_DISABLED)
+ return;
CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
- jit.breakpoint();
+ jit.abortWithReason(TGInvalidPointer);
isNonZero.link(&jit);
jit.pushToSave(pointerGPR);
jit.load8(pointerGPR, pointerGPR);
jit.popToRestore(pointerGPR);
-#else
- UNUSED_PARAM(jit);
- UNUSED_PARAM(pointerGPR);
-#endif
}
// We will jump here if the JIT code tries to make a call, but the
@@ -66,86 +69,100 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
// even though we won't use it.
jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
- // The CallFrame register points to the (failed) callee frame, so we need to pop back one frame.
- jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister);
+ jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
- jit.setupArgumentsExecState();
+ jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
}
static void slowPathFor(
- CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
+ CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
{
- jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2);
- emitPointerValidation(jit, GPRInfo::nonArgGPR2);
- jit.emitPutReturnPCToCallFrameHeader(GPRInfo::nonArgGPR2);
+ jit.emitFunctionPrologue();
jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
- jit.setupArgumentsExecState();
+#if OS(WINDOWS) && CPU(X86_64)
+ // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
+ // Other argument values are shift by 1. Use space on the stack for our two return values.
+ // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
+ // and space for the 16 byte return area.
+ jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+ jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
+ jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
-
+ jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
+ jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
+ jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+#else
+ if (maxFrameExtentForSlowPathCall)
+ jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+ jit.setupArgumentsWithExecState(GPRInfo::regT2);
+ jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR0);
+ jit.call(GPRInfo::nonArgGPR0);
+ if (maxFrameExtentForSlowPathCall)
+ jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
+#endif
+
// This slow call will return the address of one of the following:
// 1) Exception throwing thunk.
// 2) Host call return value returner thingy.
// 3) The function to call.
- jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::nonPreservedNonReturnGPR);
- jit.emitPutReturnPCToCallFrameHeader(CCallHelpers::TrustedImmPtr(0));
- emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR);
- jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
+ // The second return value GPR will hold a non-zero value for tail calls.
+
emitPointerValidation(jit, GPRInfo::returnValueGPR);
+ jit.emitFunctionEpilogue();
+
+ RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
+ CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
+
+ jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
+ jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
+
+ doNotTrash.link(&jit);
jit.jump(GPRInfo::returnValueGPR);
}
-static MacroAssemblerCodeRef linkForThunkGenerator(
- VM* vm, CodeSpecializationKind kind)
+MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
{
// The return address is on the stack or in the link register. We will hence
// save the return address to the call frame while we make a C++ function call
// to perform linking and lazy compilation if necessary. We expect the callee
// to be in regT0/regT1 (payload/tag), the CallFrame to have already
// been adjusted, and all other registers to be available for use.
-
CCallHelpers jit(vm);
- slowPathFor(jit, vm, kind == CodeForCall ? operationLinkCall : operationLinkConstruct);
+ slowPathFor(jit, vm, operationLinkCall);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(
- patchBuffer,
- ("Link %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
-}
-
-MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
-{
- return linkForThunkGenerator(vm, CodeForCall);
-}
-
-MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
-{
- return linkForThunkGenerator(vm, CodeForConstruct);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
}
// For closure optimizations, we only include calls, since if you're using closures for
// object construction then you're going to lose big time anyway.
-MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
+MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
{
CCallHelpers jit(vm);
- slowPathFor(jit, vm, operationLinkClosureCall);
+ slowPathFor(jit, vm, operationLinkPolymorphicCall);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("Link closure call slow path thunk"));
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
}
-static MacroAssemblerCodeRef virtualForThunkGenerator(
- VM* vm, CodeSpecializationKind kind)
+// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
+// path virtual call so that we can enable fast tail calls for megamorphic
+// virtual calls by using the shuffler.
+// https://bugs.webkit.org/show_bug.cgi?id=148831
+MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
@@ -155,6 +172,12 @@ static MacroAssemblerCodeRef virtualForThunkGenerator(
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
+
+ // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
+ // slow path execution for the profiler.
+ jit.add32(
+ CCallHelpers::TrustedImm32(1),
+ CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
@@ -169,189 +192,131 @@ static MacroAssemblerCodeRef virtualForThunkGenerator(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
- jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
- slowCase.append(
- jit.branchPtr(
- CCallHelpers::NotEqual,
- CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
- CCallHelpers::TrustedImmPtr(JSFunction::info())));
+ slowCase.append(jit.branchIfNotType(GPRInfo::regT0, JSFunctionType));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
- GPRInfo::nonArgGPR2);
- slowCase.append(
- jit.branch32(
- CCallHelpers::LessThan,
- CCallHelpers::Address(
- GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
- CCallHelpers::TrustedImm32(0)));
-
- // Now we know that we have a CodeBlock, and we're committed to making a fast
- // call.
-
+ GPRInfo::regT4);
jit.loadPtr(
- CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
- GPRInfo::regT1);
-#if USE(JSVALUE64)
- jit.store64(
- GPRInfo::regT1,
- CCallHelpers::Address(
- GPRInfo::callFrameRegister,
- static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
-#else
- jit.storePtr(
- GPRInfo::regT1,
- CCallHelpers::Address(
- GPRInfo::callFrameRegister,
- static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
- OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- jit.store32(
- CCallHelpers::TrustedImm32(JSValue::CellTag),
CCallHelpers::Address(
- GPRInfo::callFrameRegister,
- static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
- OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
-#endif
+ GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
+ callLinkInfo.specializationKind())),
+ GPRInfo::regT4);
+ slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
- jit.loadPtr(
- CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
- GPRInfo::regT0);
+ // Now we know that we have a CodeBlock, and we're committed to making a fast
+ // call.
// Make a tail call. This will return back to JIT code.
- emitPointerValidation(jit, GPRInfo::regT0);
- jit.jump(GPRInfo::regT0);
+ emitPointerValidation(jit, GPRInfo::regT4);
+ if (callLinkInfo.isTailCall()) {
+ jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
+ jit.prepareForTailCallSlow(GPRInfo::regT4);
+ }
+ jit.jump(GPRInfo::regT4);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
- slowPathFor(jit, vm, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
+ slowPathFor(jit, vm, operationVirtualCall);
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
- ("Virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
+ ("Virtual %s slow path thunk",
+ callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
}
-MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
-{
- return virtualForThunkGenerator(vm, CodeForCall);
-}
-
-MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
-{
- return virtualForThunkGenerator(vm, CodeForConstruct);
-}
+enum ThunkEntryType { EnterViaCall, EnterViaJumpWithSavedTags, EnterViaJumpWithoutSavedTags };
-static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
+static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
{
+ // FIXME: This should be able to log ShadowChicken prologue packets.
+ // https://bugs.webkit.org/show_bug.cgi?id=155689
+
int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
JSInterfaceJIT jit(vm);
-
- jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
- jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
-#if CPU(X86)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+ switch (entryType) {
+ case EnterViaCall:
+ jit.emitFunctionPrologue();
+ break;
+ case EnterViaJumpWithSavedTags:
+#if USE(JSVALUE64)
+ // We're coming from a specialized thunk that has saved the prior tag registers' contents.
+ // Restore them now.
+#if CPU(ARM64)
+ jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
+#else
+ jit.pop(JSInterfaceJIT::tagMaskRegister);
+ jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
+#endif
+#endif
+ break;
+ case EnterViaJumpWithoutSavedTags:
+ jit.move(JSInterfaceJIT::framePointerRegister, JSInterfaceJIT::stackPointerRegister);
+ break;
+ }
- jit.peek(JSInterfaceJIT::regT1);
- jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1);
+ jit.emitPutToCallFrameHeader(0, CallFrameSlot::codeBlock);
+ jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
+#if CPU(X86)
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
// call the function
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
+ jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::regT1);
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
#elif CPU(X86_64)
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.peek(JSInterfaceJIT::regT1);
- jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1);
-
#if !OS(WINDOWS)
// Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
-
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
+ jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::esi);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#else
// Calling convention: f(ecx, edx, r8, r9, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- // Leave space for the callee parameter home addresses and align the stack.
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ // Leave space for the callee parameter home addresses.
+ // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
+ jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, X86Registers::edx);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
#elif CPU(ARM64)
- COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
- COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
- // Load caller frame's scope chain into this callframe so that whatever we call can
- // get to its global data.
- jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutReturnPCToCallFrameHeader(ARM64Registers::lr);
-
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
+ jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, ARM64Registers::x1);
jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
- jit.move(ARM64Registers::x3, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
-
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
-
-#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
- // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
- jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
- jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
-
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
- jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT3);
-
+#elif CPU(ARM) || CPU(MIPS)
#if CPU(MIPS)
// Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
@@ -361,8 +326,7 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
// Host function signature is f(ExecState*).
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
- jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
- jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
+ jit.emitGetFromCallFrameHeaderPtr(CallFrameSlot::callee, JSInterfaceJIT::argumentGPR1);
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
@@ -370,12 +334,10 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
// Restore stack space
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
#endif
-
- jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
#else
#error "JIT not supported on this platform."
UNUSED_PARAM(executableOffsetToFunction);
- breakpoint();
+ abortWithReason(TGNotSupported);
#endif
// Check for an exception
@@ -385,40 +347,43 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
#else
JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
JSInterfaceJIT::NotEqual,
- JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
- JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
+ JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
+ JSInterfaceJIT::TrustedImm32(0));
#endif
+ jit.emitFunctionEpilogue();
// Return.
jit.ret();
// Handle an exception
exceptionHandler.link(&jit);
- // Grab the return address.
- jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
-
- jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
- jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
-
+ jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
#if CPU(X86) && USE(JSVALUE32_64)
jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
- jit.push(JSInterfaceJIT::callFrameRegister);
+ jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
+ jit.push(JSInterfaceJIT::regT0);
#else
+#if OS(WINDOWS)
+ // Allocate space on stack for the 4 parameter registers.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+#endif
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
#endif
jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
jit.call(JSInterfaceJIT::regT3);
#if CPU(X86) && USE(JSVALUE32_64)
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
+#elif OS(WINDOWS)
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJumpWithSavedTags ? "Tail With Saved Tags " : entryType == EnterViaJumpWithoutSavedTags ? "Tail Without Saved Tags " : "", toCString(kind).data()));
}
MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
@@ -426,43 +391,80 @@ MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
return nativeForGenerator(vm, CodeForCall);
}
+MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
+{
+ return nativeForGenerator(vm, CodeForCall, EnterViaJumpWithSavedTags);
+}
+
+MacroAssemblerCodeRef nativeTailCallWithoutSavedTagsGenerator(VM* vm)
+{
+ return nativeForGenerator(vm, CodeForCall, EnterViaJumpWithoutSavedTags);
+}
+
MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
{
return nativeForGenerator(vm, CodeForConstruct);
}
-MacroAssemblerCodeRef arityFixup(VM* vm)
+MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
{
JSInterfaceJIT jit(vm);
- // We enter with fixup count in regT0
+ // We enter with fixup count in argumentGPR0
+ // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
#if USE(JSVALUE64)
+#if OS(WINDOWS)
+ const GPRReg extraTemp = JSInterfaceJIT::regT0;
+#else
+ const GPRReg extraTemp = JSInterfaceJIT::regT5;
+#endif
# if CPU(X86_64)
jit.pop(JSInterfaceJIT::regT4);
# endif
- jit.neg64(JSInterfaceJIT::regT0);
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
- jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
- jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
-
- // Move current frame down regT0 number of slots
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
+
+ // Check to see if we have extra slots we can use
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
+ jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
+ JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
+ JSInterfaceJIT::Label fillExtraSlots(jit.label());
+ jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
+ jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
+ jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
+ jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
+ JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
+ noExtraSlot.link(&jit);
+
+ jit.neg64(JSInterfaceJIT::argumentGPR0);
+
+ // Adjust call frame register and stack pointer to account for missing args.
+ // We need to change the stack pointer first before performing copy/fill loops.
+ // This stack space below the stack pointer is considered unsed by OS. Therefore,
+ // OS may corrupt this space when constructing a signal stack.
+ jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
+ jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
+ jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
+ jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
+
+ // Move current frame down argumentGPR0 number of slots
JSInterfaceJIT::Label copyLoop(jit.label());
- jit.load64(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
- jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.load64(JSInterfaceJIT::regT3, extraTemp);
+ jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
- // Fill in regT0 missing arg slots with undefined
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
- jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
+ // Fill in argumentGPR0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
- jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
-
- // Adjust call frame register to account for missing args
- jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
- jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
+
+ done.link(&jit);
# if CPU(X86_64)
jit.push(JSInterfaceJIT::regT4);
@@ -472,34 +474,54 @@ MacroAssemblerCodeRef arityFixup(VM* vm)
# if CPU(X86)
jit.pop(JSInterfaceJIT::regT4);
# endif
- jit.neg32(JSInterfaceJIT::regT0);
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
- jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
- jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
-
- // Move current frame down regT0 number of slots
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, CallFrameSlot::argumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(CallFrame::headerSizeInRegisters), JSInterfaceJIT::argumentGPR2);
+
+ // Check to see if we have extra slots we can use
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
+ jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
+ JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
+ JSInterfaceJIT::Label fillExtraSlots(jit.label());
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
+ jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
+ jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
+ jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
+ JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
+ noExtraSlot.link(&jit);
+
+ jit.neg32(JSInterfaceJIT::argumentGPR0);
+
+ // Move current frame down argumentGPR0 number of slots
JSInterfaceJIT::Label copyLoop(jit.label());
- jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
- jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
- jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
- jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
+ jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
+ jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
- // Fill in regT0 missing arg slots with undefined
- jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
+ // Fill in argumentGPR0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
- jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
- jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
- jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
+ jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
+
+ // Adjust call frame register and stack pointer to account for missing args
+ jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
+ jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
+ jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
+ jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
- // Adjust call frame register to account for missing args
- jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
- jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
+ done.link(&jit);
# if CPU(X86)
jit.push(JSInterfaceJIT::regT4);
@@ -507,10 +529,20 @@ MacroAssemblerCodeRef arityFixup(VM* vm)
jit.ret();
#endif
- LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("fixup arity"));
}
+MacroAssemblerCodeRef unreachableGenerator(VM* vm)
+{
+ JSInterfaceJIT jit(vm);
+
+ jit.breakpoint();
+
+ LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
+}
+
static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
{
// load string
@@ -554,7 +586,7 @@ MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
SpecializedThunkJIT jit(vm, 1);
stringCharLoad(jit, vm);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
}
MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
@@ -563,7 +595,7 @@ MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
stringCharLoad(jit, vm);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charAt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
}
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
@@ -573,7 +605,28 @@ MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
+}
+
+MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
+{
+ SpecializedThunkJIT jit(vm, 1);
+ MacroAssembler::Jump nonIntArgJump;
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
+
+ SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
+ jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
+ jit.returnInt32(SpecializedThunkJIT::regT1);
+
+ if (jit.supportsFloatingPointTruncate()) {
+ nonIntArgJump.link(&jit);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
+ jit.appendFailure(jit.jump());
+ } else
+ jit.appendFailure(nonIntArgJump);
+
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
}
MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
@@ -585,25 +638,15 @@ MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "sqrt");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
}
#define UnaryDoubleOpWrapper(function) function##Wrapper
enum MathThunkCallingConvention { };
typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
-extern "C" {
-
-double jsRound(double) REFERENCED_FROM_ASM;
-double jsRound(double d)
-{
- double integer = ceil(d);
- return integer - (integer - d > 0.5);
-}
-
-}
-#if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
+#if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
@@ -611,7 +654,9 @@ double jsRound(double d)
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
+ "pushq %rax\n" \
"call " GLOBAL_REFERENCE(function) "\n" \
+ "popq %rcx\n" \
"ret\n" \
);\
extern "C" { \
@@ -619,7 +664,7 @@ double jsRound(double d)
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__)
+#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
@@ -643,19 +688,19 @@ double jsRound(double d)
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
+#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
- "subl $8, %esp\n" \
+ "subl $20, %esp\n" \
"movsd %xmm0, (%esp) \n" \
"call " GLOBAL_REFERENCE(function) "\n" \
"fstpl (%esp) \n" \
"movsd (%esp), %xmm0 \n" \
- "addl $8, %esp\n" \
+ "addl $20, %esp\n" \
"ret\n" \
);\
extern "C" { \
@@ -663,7 +708,7 @@ double jsRound(double d)
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
+#elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
@@ -696,12 +741,39 @@ double jsRound(double d)
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
"b " GLOBAL_REFERENCE(function) "\n" \
+ ".previous" \
); \
extern "C" { \
MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
+
+// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
+static double (_cdecl *floorFunction)(double) = floor;
+static double (_cdecl *ceilFunction)(double) = ceil;
+static double (_cdecl *truncFunction)(double) = trunc;
+static double (_cdecl *expFunction)(double) = exp;
+static double (_cdecl *logFunction)(double) = log;
+static double (_cdecl *jsRoundFunction)(double) = jsRound;
+
+#define defineUnaryDoubleOpWrapper(function) \
+ extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
+ { \
+ __asm \
+ { \
+ __asm sub esp, 20 \
+ __asm movsd mmword ptr [esp], xmm0 \
+ __asm call function##Function \
+ __asm fstp qword ptr [esp] \
+ __asm movsd xmm0, mmword ptr [esp] \
+ __asm add esp, 20 \
+ __asm ret \
+ } \
+ } \
+ static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
+
#else
#define defineUnaryDoubleOpWrapper(function) \
@@ -713,10 +785,8 @@ defineUnaryDoubleOpWrapper(exp);
defineUnaryDoubleOpWrapper(log);
defineUnaryDoubleOpWrapper(floor);
defineUnaryDoubleOpWrapper(ceil);
+defineUnaryDoubleOpWrapper(trunc);
-static const double oneConstant = 1.0;
-static const double negativeHalfConstant = -0.5;
-static const double zeroConstant = 0.0;
static const double halfConstant = 0.5;
MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
@@ -729,18 +799,21 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
-#if CPU(ARM64)
- SpecializedThunkJIT::JumpList doubleResult;
- jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
- jit.returnInt32(SpecializedThunkJIT::regT0);
- doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
-#else
+
+ if (jit.supportsFloatingPointRounding()) {
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.floorDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
+ }
+
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
+ jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
@@ -756,8 +829,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
-#endif // CPU(ARM64)
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "floor");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
}
MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
@@ -770,17 +842,40 @@ MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
-#if CPU(ARM64)
- jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
-#else
- jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
-#endif // CPU(ARM64)
+ if (jit.supportsFloatingPointRounding())
+ jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ else
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
+
SpecializedThunkJIT::JumpList doubleResult;
jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "ceil");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
+}
+
+MacroAssemblerCodeRef truncThunkGenerator(VM* vm)
+{
+ SpecializedThunkJIT jit(vm, 1);
+ if (!UnaryDoubleOpWrapper(trunc) || !jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+ MacroAssembler::Jump nonIntJump;
+ jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ nonIntJump.link(&jit);
+ jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
+ if (jit.supportsFloatingPointRounding())
+ jit.roundTowardZeroDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+ else
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(trunc));
+
+ SpecializedThunkJIT::JumpList doubleResult;
+ jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ doubleResult.link(&jit);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "trunc");
}
MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
@@ -796,12 +891,12 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
+ jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
intResult = jit.jump();
@@ -814,7 +909,7 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "round");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
}
MacroAssemblerCodeRef expThunkGenerator(VM* vm)
@@ -827,7 +922,7 @@ MacroAssemblerCodeRef expThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "exp");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
}
MacroAssemblerCodeRef logThunkGenerator(VM* vm)
@@ -840,7 +935,7 @@ MacroAssemblerCodeRef logThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "log");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
}
MacroAssemblerCodeRef absThunkGenerator(VM* vm)
@@ -848,71 +943,56 @@ MacroAssemblerCodeRef absThunkGenerator(VM* vm)
SpecializedThunkJIT jit(vm, 1);
if (!jit.supportsFloatingPointAbs())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+
+#if USE(JSVALUE64)
+ unsigned virtualRegisterIndex = CallFrame::argumentOffset(0);
+ jit.load64(AssemblyHelpers::addressFor(virtualRegisterIndex), GPRInfo::regT0);
+ MacroAssembler::Jump notInteger = jit.branch64(MacroAssembler::Below, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister);
+
+ // Abs Int32.
+ jit.rshift32(GPRInfo::regT0, MacroAssembler::TrustedImm32(31), GPRInfo::regT1);
+ jit.add32(GPRInfo::regT1, GPRInfo::regT0);
+ jit.xor32(GPRInfo::regT1, GPRInfo::regT0);
+
+ // IntMin cannot be inverted.
+ MacroAssembler::Jump integerIsIntMin = jit.branchTest32(MacroAssembler::Signed, GPRInfo::regT0);
+
+ // Box and finish.
+ jit.or64(GPRInfo::tagTypeNumberRegister, GPRInfo::regT0);
+ MacroAssembler::Jump doneWithIntegers = jit.jump();
+
+ // Handle Doubles.
+ notInteger.link(&jit);
+ jit.appendFailure(jit.branchTest64(MacroAssembler::Zero, GPRInfo::regT0, GPRInfo::tagTypeNumberRegister));
+ jit.unboxDoubleWithoutAssertions(GPRInfo::regT0, GPRInfo::regT0, FPRInfo::fpRegT0);
+ MacroAssembler::Label absFPR0Label = jit.label();
+ jit.absDouble(FPRInfo::fpRegT0, FPRInfo::fpRegT1);
+ jit.boxDouble(FPRInfo::fpRegT1, GPRInfo::regT0);
+
+ // Tail.
+ doneWithIntegers.link(&jit);
+ jit.returnJSValue(GPRInfo::regT0);
+
+ // We know the value of regT0 is IntMin. We could load that value from memory but
+ // it is simpler to just convert it.
+ integerIsIntMin.link(&jit);
+ jit.convertInt32ToDouble(GPRInfo::regT0, FPRInfo::fpRegT0);
+ jit.jump().linkTo(absFPR0Label, &jit);
+#else
MacroAssembler::Jump nonIntJump;
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntJump);
jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
- jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
+ jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
// Shame about the double int conversion here.
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "abs");
-}
-
-MacroAssemblerCodeRef powThunkGenerator(VM* vm)
-{
- SpecializedThunkJIT jit(vm, 2);
- if (!jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
-
- jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- MacroAssembler::Jump nonIntExponent;
- jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
- jit.appendFailure(jit.branch32(MacroAssembler::LessThan, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(0)));
-
- MacroAssembler::Jump exponentIsZero = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0);
- MacroAssembler::Label startLoop(jit.label());
-
- MacroAssembler::Jump exponentIsEven = jit.branchTest32(MacroAssembler::Zero, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1));
- jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
- exponentIsEven.link(&jit);
- jit.mulDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- jit.rshift32(MacroAssembler::TrustedImm32(1), SpecializedThunkJIT::regT0);
- jit.branchTest32(MacroAssembler::NonZero, SpecializedThunkJIT::regT0).linkTo(startLoop, &jit);
-
- exponentIsZero.link(&jit);
-
- {
- SpecializedThunkJIT::JumpList doubleResult;
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
- jit.returnInt32(SpecializedThunkJIT::regT0);
- doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- }
-
- if (jit.supportsFloatingPointSqrt()) {
- nonIntExponent.link(&jit);
- jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
- jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
- jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
- jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- jit.divDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
-
- SpecializedThunkJIT::JumpList doubleResult;
- jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT0);
- jit.returnInt32(SpecializedThunkJIT::regT0);
- doubleResult.link(&jit);
- jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- } else
- jit.appendFailure(nonIntExponent);
-
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "pow");
+#endif
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
}
MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
@@ -931,8 +1011,7 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg0Jump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
- jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
- jit.jump(doneLoadingArg0);
+ jit.appendFailure(jit.jump());
} else
jit.appendFailure(nonIntArg0Jump);
@@ -940,117 +1019,160 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg1Jump.link(&jit);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
- jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
- jit.jump(doneLoadingArg1);
+ jit.appendFailure(jit.jump());
} else
jit.appendFailure(nonIntArg1Jump);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "imul");
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
+}
+
+MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
+{
+ SpecializedThunkJIT jit(vm, 0);
+ if (!jit.supportsFloatingPoint())
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+
+#if USE(JSVALUE64)
+ jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+
+ return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
+#else
+ return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+#endif
}
-static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind)
+MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM* vm)
{
- typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32;
- typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr;
- typedef SpecializedThunkJIT::Address Address;
- typedef SpecializedThunkJIT::BaseIndex BaseIndex;
- typedef SpecializedThunkJIT::Jump Jump;
+ CCallHelpers jit(vm);
+
+ jit.emitFunctionPrologue();
- SpecializedThunkJIT jit(vm);
- // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
- jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1);
+ // Set up our call frame.
+ jit.storePtr(CCallHelpers::TrustedImmPtr(nullptr), CCallHelpers::addressFor(CallFrameSlot::codeBlock));
+ jit.store32(CCallHelpers::TrustedImm32(0), CCallHelpers::tagFor(CallFrameSlot::argumentCount));
- // Early exit if we don't have a thunk for this form of iteration
- jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue)));
+ unsigned extraStackNeeded = 0;
+ if (unsigned stackMisalignment = sizeof(CallerFrameAndPC) % stackAlignmentBytes())
+ extraStackNeeded = stackAlignmentBytes() - stackMisalignment;
- jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0);
+ // We need to forward all of the arguments that we were passed. We aren't allowed to do a tail
+ // call here as far as I can tell. At least not so long as the generic path doesn't do a tail
+ // call, since that would be way too weird.
- jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1);
+ // The formula for the number of stack bytes needed given some number of parameters (including
+ // this) is:
+ //
+ // stackAlign((numParams + CallFrameHeaderSize) * sizeof(Register) - sizeof(CallerFrameAndPC))
+ //
+ // Probably we want to write this as:
+ //
+ // stackAlign((numParams + (CallFrameHeaderSize - CallerFrameAndPCSize)) * sizeof(Register))
+ //
+ // That's really all there is to this. We have all the registers we need to do it.
- // Pull out the butterfly from iteratedObject
- jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSCell::structureOffset()), SpecializedThunkJIT::regT2);
+ jit.load32(CCallHelpers::payloadFor(CallFrameSlot::argumentCount), GPRInfo::regT1);
+ jit.add32(CCallHelpers::TrustedImm32(CallFrame::headerSizeInRegisters - CallerFrameAndPC::sizeInRegisters), GPRInfo::regT1, GPRInfo::regT2);
+ jit.lshift32(CCallHelpers::TrustedImm32(3), GPRInfo::regT2);
+ jit.add32(CCallHelpers::TrustedImm32(stackAlignmentBytes() - 1), GPRInfo::regT2);
+ jit.and32(CCallHelpers::TrustedImm32(-stackAlignmentBytes()), GPRInfo::regT2);
- jit.load8(Address(SpecializedThunkJIT::regT2, Structure::indexingTypeOffset()), SpecializedThunkJIT::regT3);
- jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
- Jump nullButterfly = jit.branchTestPtr(SpecializedThunkJIT::Zero, SpecializedThunkJIT::regT2);
+ if (extraStackNeeded)
+ jit.add32(CCallHelpers::TrustedImm32(extraStackNeeded), GPRInfo::regT2);
- Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength()));
-
- nullButterfly.link(&jit);
-
- // Return the termination signal to indicate that we've finished
- jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0);
- jit.returnJSCell(SpecializedThunkJIT::regT0);
+ // At this point regT1 has the actual argument count and regT2 has the amount of stack we will
+ // need.
- notDone.link(&jit);
+ jit.subPtr(GPRInfo::regT2, CCallHelpers::stackPointerRegister);
+
+ // Do basic callee frame setup, including 'this'.
- if (kind == ArrayIterateKey) {
- jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
- jit.returnInt32(SpecializedThunkJIT::regT1);
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-key");
-
- }
- ASSERT(kind == ArrayIterateValue);
+ jit.loadCell(CCallHelpers::addressFor(CallFrameSlot::callee), GPRInfo::regT3);
+
+ jit.store32(GPRInfo::regT1, CCallHelpers::calleeFramePayloadSlot(CallFrameSlot::argumentCount));
- // Okay, now we're returning a value so make sure we're inside the vector size
- jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength())));
+ JSValueRegs valueRegs = JSValueRegs::withTwoAvailableRegs(GPRInfo::regT0, GPRInfo::regT2);
+ jit.loadValue(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfBoundThis()), valueRegs);
+ jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(0));
+
+ jit.loadPtr(CCallHelpers::Address(GPRInfo::regT3, JSBoundFunction::offsetOfTargetFunction()), GPRInfo::regT3);
+ jit.storeCell(GPRInfo::regT3, CCallHelpers::calleeFrameSlot(CallFrameSlot::callee));
- // So now we perform inline loads for int32, value/undecided, and double storage
- Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithUndecided));
- Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithContiguous));
+ // OK, now we can start copying. This is a simple matter of copying parameters from the caller's
+ // frame to the callee's frame. Note that we know that regT1 (the argument count) must be at
+ // least 1.
+ jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
+ CCallHelpers::Jump done = jit.branchTest32(CCallHelpers::Zero, GPRInfo::regT1);
- undecidedStorage.link(&jit);
+ CCallHelpers::Label loop = jit.label();
+ jit.sub32(CCallHelpers::TrustedImm32(1), GPRInfo::regT1);
+ jit.loadValue(CCallHelpers::addressFor(virtualRegisterForArgument(1)).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight), valueRegs);
+ jit.storeValue(valueRegs, CCallHelpers::calleeArgumentSlot(1).indexedBy(GPRInfo::regT1, CCallHelpers::TimesEight));
+ jit.branchTest32(CCallHelpers::NonZero, GPRInfo::regT1).linkTo(loop, &jit);
- jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ done.link(&jit);
-#if USE(JSVALUE64)
- jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0);
- Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0);
- jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0);
- notHole.link(&jit);
- jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
- jit.returnJSValue(SpecializedThunkJIT::regT0);
-#else
- jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3);
- Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag));
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
- jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0);
- jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
- jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1);
- notHole.link(&jit);
- jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
- jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
- jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1);
- jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
-#endif
- notContiguousStorage.link(&jit);
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::regT3, JSFunction::offsetOfExecutable()),
+ GPRInfo::regT0);
+ jit.loadPtr(
+ CCallHelpers::Address(
+ GPRInfo::regT0, ExecutableBase::offsetOfJITCodeWithArityCheckFor(CodeForCall)),
+ GPRInfo::regT0);
+ CCallHelpers::Jump noCode = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT0);
- Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithInt32));
- jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
- jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
- jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
- jit.returnInt32(SpecializedThunkJIT::regT0);
- notInt32Storage.link(&jit);
+ emitPointerValidation(jit, GPRInfo::regT0);
+ jit.call(GPRInfo::regT0);
- jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithDouble)));
- jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
- jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0);
- jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ jit.emitFunctionEpilogue();
+ jit.ret();
- return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-value");
+ LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ linkBuffer.link(noCode, CodeLocationLabel(vm->jitStubs->ctiNativeTailCallWithoutSavedTags(vm)));
+ return FINALIZE_CODE(
+ linkBuffer, ("Specialized thunk for bound function calls with no arguments"));
}
-MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm)
+#if ENABLE(WEBASSEMBLY)
+MacroAssemblerCodeRef throwExceptionFromWasmThunkGenerator(VM* vm)
{
- return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey);
-}
+ CCallHelpers jit(vm);
-MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm)
-{
- return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue);
-}
-
+ // The thing that jumps here must move ExceptionType into the argumentGPR1 and jump here.
+ // We're allowed to use temp registers here, but not callee saves.
+ {
+ RegisterSet usedRegisters = RegisterSet::stubUnavailableRegisters();
+ usedRegisters.set(GPRInfo::argumentGPR1);
+ jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(usedRegisters);
+ }
+
+ jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ CCallHelpers::Call call = jit.call();
+ jit.jumpToExceptionHandler();
+
+ void (*throwWasmException)(ExecState*, Wasm::ExceptionType) = [] (ExecState* exec, Wasm::ExceptionType type) {
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ {
+ auto throwScope = DECLARE_THROW_SCOPE(*vm);
+ JSGlobalObject* globalObject = vm->topJSWebAssemblyInstance->globalObject();
+
+ JSWebAssemblyRuntimeError* error = JSWebAssemblyRuntimeError::create(exec, *vm, globalObject->WebAssemblyRuntimeErrorStructure(), Wasm::errorMessageForExceptionType(type));
+ throwException(exec, throwScope, error);
+ }
+
+ genericUnwind(vm, exec);
+ ASSERT(!!vm->callFrameForCatch);
+ };
+
+ LinkBuffer linkBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ linkBuffer.link(call, throwWasmException);
+ return FINALIZE_CODE(
+ linkBuffer, ("Throw exception from Wasm"));
}
+#endif // ENABLE(WEBASSEMBLY)
+
+} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.h b/Source/JavaScriptCore/jit/ThunkGenerators.h
index 0e2762890..90740c029 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.h
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.h
@@ -23,30 +23,34 @@
* THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef ThunkGenerators_h
-#define ThunkGenerators_h
+#pragma once
+#include "CodeSpecializationKind.h"
#include "ThunkGenerator.h"
#if ENABLE(JIT)
namespace JSC {
+class CallLinkInfo;
+
MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM*);
+MacroAssemblerCodeRef linkCallThunk(VM*, CallLinkInfo&, CodeSpecializationKind);
MacroAssemblerCodeRef linkCallThunkGenerator(VM*);
-MacroAssemblerCodeRef linkConstructThunkGenerator(VM*);
-
-MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM*);
+MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM*);
-MacroAssemblerCodeRef virtualCallThunkGenerator(VM*);
-MacroAssemblerCodeRef virtualConstructThunkGenerator(VM*);
+MacroAssemblerCodeRef virtualThunkFor(VM*, CallLinkInfo&);
MacroAssemblerCodeRef nativeCallGenerator(VM*);
MacroAssemblerCodeRef nativeConstructGenerator(VM*);
-MacroAssemblerCodeRef arityFixup(VM*);
+MacroAssemblerCodeRef nativeTailCallGenerator(VM*);
+MacroAssemblerCodeRef nativeTailCallWithoutSavedTagsGenerator(VM*);
+MacroAssemblerCodeRef arityFixupGenerator(VM*);
+MacroAssemblerCodeRef unreachableGenerator(VM*);
MacroAssemblerCodeRef charCodeAtThunkGenerator(VM*);
MacroAssemblerCodeRef charAtThunkGenerator(VM*);
+MacroAssemblerCodeRef clz32ThunkGenerator(VM*);
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM*);
MacroAssemblerCodeRef absThunkGenerator(VM*);
MacroAssemblerCodeRef ceilThunkGenerator(VM*);
@@ -55,12 +59,15 @@ MacroAssemblerCodeRef floorThunkGenerator(VM*);
MacroAssemblerCodeRef logThunkGenerator(VM*);
MacroAssemblerCodeRef roundThunkGenerator(VM*);
MacroAssemblerCodeRef sqrtThunkGenerator(VM*);
-MacroAssemblerCodeRef powThunkGenerator(VM*);
MacroAssemblerCodeRef imulThunkGenerator(VM*);
-MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM*);
-MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM*);
+MacroAssemblerCodeRef randomThunkGenerator(VM*);
+MacroAssemblerCodeRef truncThunkGenerator(VM*);
+
+MacroAssemblerCodeRef boundThisNoArgsFunctionCallGenerator(VM*);
+
+#if ENABLE(WEBASSEMBLY)
+MacroAssemblerCodeRef throwExceptionFromWasmThunkGenerator(VM*);
+#endif
}
#endif // ENABLE(JIT)
-
-#endif // ThunkGenerator_h
diff --git a/Source/JavaScriptCore/jit/UnusedPointer.h b/Source/JavaScriptCore/jit/UnusedPointer.h
index af41248d6..631d7d63b 100644
--- a/Source/JavaScriptCore/jit/UnusedPointer.h
+++ b/Source/JavaScriptCore/jit/UnusedPointer.h
@@ -23,8 +23,7 @@
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef UnusedPointer_h
-#define UnusedPointer_h
+#pragma once
namespace JSC {
@@ -33,5 +32,3 @@ static const uintptr_t unusedPointer = 0xd1e7beef;
} // namespace JSC
using JSC::unusedPointer;
-
-#endif // UnusedPointer_h