summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/jit
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2016-04-10 09:28:39 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2016-04-10 09:28:39 +0000
commit32761a6cee1d0dee366b885b7b9c777e67885688 (patch)
treed6bec92bebfb216f4126356e55518842c2f476a1 /Source/JavaScriptCore/jit
parenta4e969f4965059196ca948db781e52f7cfebf19e (diff)
downloadWebKitGtk-tarball-32761a6cee1d0dee366b885b7b9c777e67885688.tar.gz
webkitgtk-2.4.11webkitgtk-2.4.11
Diffstat (limited to 'Source/JavaScriptCore/jit')
-rw-r--r--Source/JavaScriptCore/jit/AssemblyHelpers.cpp402
-rw-r--r--Source/JavaScriptCore/jit/AssemblyHelpers.h1032
-rw-r--r--Source/JavaScriptCore/jit/BinarySwitch.cpp391
-rw-r--r--Source/JavaScriptCore/jit/BinarySwitch.h147
-rw-r--r--Source/JavaScriptCore/jit/CCallHelpers.h654
-rw-r--r--Source/JavaScriptCore/jit/CachedRecovery.cpp71
-rw-r--r--Source/JavaScriptCore/jit/CachedRecovery.h137
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffleData.cpp68
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffleData.h54
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler.cpp774
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler.h804
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp305
-rw-r--r--Source/JavaScriptCore/jit/CallFrameShuffler64.cpp369
-rw-r--r--Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp (renamed from Source/JavaScriptCore/jit/Reg.cpp)36
-rw-r--r--Source/JavaScriptCore/jit/ClosureCallStubRoutine.h (renamed from Source/JavaScriptCore/jit/JITNegGenerator.h)52
-rw-r--r--Source/JavaScriptCore/jit/CompactJITCodeMap.h36
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp73
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h51
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.cpp45
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocator.h68
-rw-r--r--Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp79
-rw-r--r--Source/JavaScriptCore/jit/FPRInfo.h46
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp61
-rw-r--r--Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h37
-rw-r--r--Source/JavaScriptCore/jit/GPRInfo.cpp42
-rw-r--r--Source/JavaScriptCore/jit/GPRInfo.h335
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.cpp1
-rw-r--r--Source/JavaScriptCore/jit/HostCallReturnValue.h7
-rw-r--r--Source/JavaScriptCore/jit/IntrinsicEmitter.cpp134
-rw-r--r--Source/JavaScriptCore/jit/JIT.cpp446
-rw-r--r--Source/JavaScriptCore/jit/JIT.h348
-rw-r--r--Source/JavaScriptCore/jit/JITAddGenerator.cpp136
-rw-r--r--Source/JavaScriptCore/jit/JITAddGenerator.h80
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic.cpp949
-rw-r--r--Source/JavaScriptCore/jit/JITArithmetic32_64.cpp781
-rw-r--r--Source/JavaScriptCore/jit/JITBitAndGenerator.cpp85
-rw-r--r--Source/JavaScriptCore/jit/JITBitAndGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h71
-rw-r--r--Source/JavaScriptCore/jit/JITBitOrGenerator.cpp74
-rw-r--r--Source/JavaScriptCore/jit/JITBitOrGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITBitXorGenerator.cpp73
-rw-r--r--Source/JavaScriptCore/jit/JITBitXorGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITCall.cpp277
-rw-r--r--Source/JavaScriptCore/jit/JITCall32_64.cpp299
-rw-r--r--Source/JavaScriptCore/jit/JITCode.cpp169
-rw-r--r--Source/JavaScriptCore/jit/JITCode.h73
-rw-r--r--Source/JavaScriptCore/jit/JITCompilationEffort.h2
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.cpp6
-rw-r--r--Source/JavaScriptCore/jit/JITDisassembler.h29
-rw-r--r--Source/JavaScriptCore/jit/JITDivGenerator.cpp116
-rw-r--r--Source/JavaScriptCore/jit/JITDivGenerator.h85
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.cpp33
-rw-r--r--Source/JavaScriptCore/jit/JITExceptions.h7
-rw-r--r--Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp74
-rw-r--r--Source/JavaScriptCore/jit/JITInlineCacheGenerator.h30
-rw-r--r--Source/JavaScriptCore/jit/JITInlines.h468
-rw-r--r--Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp84
-rw-r--r--Source/JavaScriptCore/jit/JITLeftShiftGenerator.h49
-rw-r--r--Source/JavaScriptCore/jit/JITMulGenerator.cpp198
-rw-r--r--Source/JavaScriptCore/jit/JITMulGenerator.h83
-rw-r--r--Source/JavaScriptCore/jit/JITNegGenerator.cpp72
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes.cpp1100
-rw-r--r--Source/JavaScriptCore/jit/JITOpcodes32_64.cpp773
-rw-r--r--Source/JavaScriptCore/jit/JITOperationWrappers.h413
-rw-r--r--Source/JavaScriptCore/jit/JITOperations.cpp1731
-rw-r--r--Source/JavaScriptCore/jit/JITOperations.h258
-rw-r--r--Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp46
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess.cpp1183
-rw-r--r--Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp822
-rw-r--r--Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp140
-rw-r--r--Source/JavaScriptCore/jit/JITRightShiftGenerator.h63
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.cpp9
-rw-r--r--Source/JavaScriptCore/jit/JITStubRoutine.h21
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.cpp54
-rw-r--r--Source/JavaScriptCore/jit/JITStubs.h59
-rw-r--r--Source/JavaScriptCore/jit/JITStubsARM.h302
-rw-r--r--Source/JavaScriptCore/jit/JITStubsARMv7.h351
-rw-r--r--Source/JavaScriptCore/jit/JITStubsMSVC64.asm44
-rw-r--r--Source/JavaScriptCore/jit/JITStubsX86.h649
-rw-r--r--Source/JavaScriptCore/jit/JITStubsX86Common.h148
-rw-r--r--Source/JavaScriptCore/jit/JITStubsX86_64.h218
-rw-r--r--Source/JavaScriptCore/jit/JITSubGenerator.cpp91
-rw-r--r--Source/JavaScriptCore/jit/JITSubGenerator.h78
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.cpp59
-rw-r--r--Source/JavaScriptCore/jit/JITThunks.h20
-rw-r--r--Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp15
-rw-r--r--Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h8
-rw-r--r--Source/JavaScriptCore/jit/JITWriteBarrier.h8
-rw-r--r--Source/JavaScriptCore/jit/JSInterfaceJIT.h35
-rw-r--r--Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp301
-rw-r--r--Source/JavaScriptCore/jit/PCToCodeOriginMap.h104
-rw-r--r--Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp137
-rw-r--r--Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h115
-rw-r--r--Source/JavaScriptCore/jit/Reg.h250
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffset.cpp45
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffset.h81
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp80
-rw-r--r--Source/JavaScriptCore/jit/RegisterAtOffsetList.h82
-rw-r--r--Source/JavaScriptCore/jit/RegisterMap.h113
-rw-r--r--Source/JavaScriptCore/jit/RegisterSet.cpp326
-rw-r--r--Source/JavaScriptCore/jit/RegisterSet.h84
-rw-r--r--Source/JavaScriptCore/jit/Repatch.cpp1968
-rw-r--r--Source/JavaScriptCore/jit/Repatch.h39
-rw-r--r--Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp302
-rw-r--r--Source/JavaScriptCore/jit/ScratchRegisterAllocator.h193
-rw-r--r--Source/JavaScriptCore/jit/SetupVarargsFrame.cpp143
-rw-r--r--Source/JavaScriptCore/jit/SetupVarargsFrame.h53
-rw-r--r--Source/JavaScriptCore/jit/SlowPathCall.h4
-rw-r--r--Source/JavaScriptCore/jit/SnippetOperand.h101
-rw-r--r--Source/JavaScriptCore/jit/SpecializedThunkJIT.h63
-rw-r--r--Source/JavaScriptCore/jit/SpillRegistersMode.h35
-rw-r--r--Source/JavaScriptCore/jit/TempRegisterSet.cpp3
-rw-r--r--Source/JavaScriptCore/jit/TempRegisterSet.h21
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerator.h2
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.cpp662
-rw-r--r--Source/JavaScriptCore/jit/ThunkGenerators.h20
116 files changed, 9054 insertions, 16626 deletions
diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
index c1be5932c..ddf1d6359 100644
--- a/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
+++ b/Source/JavaScriptCore/jit/AssemblyHelpers.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,9 +28,6 @@
#if ENABLE(JIT)
-#include "JITOperations.h"
-#include "JSCInlines.h"
-
namespace JSC {
ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin)
@@ -38,7 +35,7 @@ ExecutableBase* AssemblyHelpers::executableFor(const CodeOrigin& codeOrigin)
if (!codeOrigin.inlineCallFrame)
return m_codeBlock->ownerExecutable();
- return codeOrigin.inlineCallFrame->baselineCodeBlock->ownerExecutable();
+ return codeOrigin.inlineCallFrame->executable.get();
}
Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock* codeBlock)
@@ -55,118 +52,6 @@ Vector<BytecodeAndMachineOffset>& AssemblyHelpers::decodedCodeMapFor(CodeBlock*
return result.iterator->value;
}
-AssemblyHelpers::JumpList AssemblyHelpers::branchIfNotType(
- JSValueRegs regs, GPRReg tempGPR, const InferredType::Descriptor& descriptor, TagRegistersMode mode)
-{
- AssemblyHelpers::JumpList result;
-
- switch (descriptor.kind()) {
- case InferredType::Bottom:
- result.append(jump());
- break;
-
- case InferredType::Boolean:
- result.append(branchIfNotBoolean(regs, tempGPR));
- break;
-
- case InferredType::Other:
- result.append(branchIfNotOther(regs, tempGPR));
- break;
-
- case InferredType::Int32:
- result.append(branchIfNotInt32(regs, mode));
- break;
-
- case InferredType::Number:
- result.append(branchIfNotNumber(regs, tempGPR, mode));
- break;
-
- case InferredType::String:
- result.append(branchIfNotCell(regs, mode));
- result.append(branchIfNotString(regs.payloadGPR()));
- break;
-
- case InferredType::Symbol:
- result.append(branchIfNotCell(regs, mode));
- result.append(branchIfNotSymbol(regs.payloadGPR()));
- break;
-
- case InferredType::ObjectWithStructure:
- result.append(branchIfNotCell(regs, mode));
- result.append(
- branchStructure(
- NotEqual,
- Address(regs.payloadGPR(), JSCell::structureIDOffset()),
- descriptor.structure()));
- break;
-
- case InferredType::ObjectWithStructureOrOther: {
- Jump ok = branchIfOther(regs, tempGPR);
- result.append(branchIfNotCell(regs, mode));
- result.append(
- branchStructure(
- NotEqual,
- Address(regs.payloadGPR(), JSCell::structureIDOffset()),
- descriptor.structure()));
- ok.link(this);
- break;
- }
-
- case InferredType::Object:
- result.append(branchIfNotCell(regs, mode));
- result.append(branchIfNotObject(regs.payloadGPR()));
- break;
-
- case InferredType::ObjectOrOther: {
- Jump ok = branchIfOther(regs, tempGPR);
- result.append(branchIfNotCell(regs, mode));
- result.append(branchIfNotObject(regs.payloadGPR()));
- ok.link(this);
- break;
- }
-
- case InferredType::Top:
- break;
- }
-
- return result;
-}
-
-AssemblyHelpers::Jump AssemblyHelpers::branchIfFastTypedArray(GPRReg baseGPR)
-{
- return branch32(
- Equal,
- Address(baseGPR, JSArrayBufferView::offsetOfMode()),
- TrustedImm32(FastTypedArray));
-}
-
-AssemblyHelpers::Jump AssemblyHelpers::branchIfNotFastTypedArray(GPRReg baseGPR)
-{
- return branch32(
- NotEqual,
- Address(baseGPR, JSArrayBufferView::offsetOfMode()),
- TrustedImm32(FastTypedArray));
-}
-
-AssemblyHelpers::Jump AssemblyHelpers::loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR)
-{
- RELEASE_ASSERT(baseGPR != resultGPR);
-
- loadPtr(Address(baseGPR, JSArrayBufferView::offsetOfVector()), resultGPR);
- Jump ok = branchIfToSpace(resultGPR);
- Jump result = branchIfFastTypedArray(baseGPR);
- ok.link(this);
- return result;
-}
-
-void AssemblyHelpers::purifyNaN(FPRReg fpr)
-{
- MacroAssembler::Jump notNaN = branchDouble(DoubleEqual, fpr, fpr);
- static const double NaN = PNaN;
- loadDouble(TrustedImmPtr(&NaN), fpr);
- notNaN.link(this);
-}
-
#if ENABLE(SAMPLING_FLAGS)
void AssemblyHelpers::setSamplingFlag(int32_t flag)
{
@@ -189,7 +74,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
{
#if CPU(X86_64)
Jump checkInt32 = branch64(BelowOrEqual, gpr, TrustedImm64(static_cast<uintptr_t>(0xFFFFFFFFu)));
- abortWithReason(AHIsNotInt32);
+ breakpoint();
checkInt32.link(this);
#else
UNUSED_PARAM(gpr);
@@ -199,14 +84,14 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
{
Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
- abortWithReason(AHIsNotJSInt32);
+ breakpoint();
checkJSInt32.link(this);
}
void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
{
Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
- abortWithReason(AHIsNotJSNumber);
+ breakpoint();
checkJSNumber.link(this);
}
@@ -215,27 +100,15 @@ void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
Jump checkJSInt32 = branch64(AboveOrEqual, gpr, GPRInfo::tagTypeNumberRegister);
Jump checkJSNumber = branchTest64(MacroAssembler::NonZero, gpr, GPRInfo::tagTypeNumberRegister);
checkJSInt32.link(this);
- abortWithReason(AHIsNotJSDouble);
+ breakpoint();
checkJSNumber.link(this);
}
void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
{
Jump checkCell = branchTest64(MacroAssembler::Zero, gpr, GPRInfo::tagMaskRegister);
- abortWithReason(AHIsNotCell);
- checkCell.link(this);
-}
-
-void AssemblyHelpers::jitAssertTagsInPlace()
-{
- Jump ok = branch64(Equal, GPRInfo::tagTypeNumberRegister, TrustedImm64(TagTypeNumber));
- abortWithReason(AHTagTypeNumberNotInPlace);
breakpoint();
- ok.link(this);
-
- ok = branch64(Equal, GPRInfo::tagMaskRegister, TrustedImm64(TagMask));
- abortWithReason(AHTagMaskNotInPlace);
- ok.link(this);
+ checkCell.link(this);
}
#elif USE(JSVALUE32_64)
void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
@@ -246,7 +119,7 @@ void AssemblyHelpers::jitAssertIsInt32(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSInt32(GPRReg gpr)
{
Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
- abortWithReason(AHIsNotJSInt32);
+ breakpoint();
checkJSInt32.link(this);
}
@@ -254,7 +127,7 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
{
Jump checkJSInt32 = branch32(Equal, gpr, TrustedImm32(JSValue::Int32Tag));
Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
- abortWithReason(AHIsNotJSNumber);
+ breakpoint();
checkJSInt32.link(this);
checkJSDouble.link(this);
}
@@ -262,282 +135,33 @@ void AssemblyHelpers::jitAssertIsJSNumber(GPRReg gpr)
void AssemblyHelpers::jitAssertIsJSDouble(GPRReg gpr)
{
Jump checkJSDouble = branch32(Below, gpr, TrustedImm32(JSValue::LowestTag));
- abortWithReason(AHIsNotJSDouble);
+ breakpoint();
checkJSDouble.link(this);
}
void AssemblyHelpers::jitAssertIsCell(GPRReg gpr)
{
Jump checkCell = branch32(Equal, gpr, TrustedImm32(JSValue::CellTag));
- abortWithReason(AHIsNotCell);
+ breakpoint();
checkCell.link(this);
}
-
-void AssemblyHelpers::jitAssertTagsInPlace()
-{
-}
#endif // USE(JSVALUE32_64)
void AssemblyHelpers::jitAssertHasValidCallFrame()
{
Jump checkCFR = branchTestPtr(Zero, GPRInfo::callFrameRegister, TrustedImm32(7));
- abortWithReason(AHCallFrameMisaligned);
+ breakpoint();
checkCFR.link(this);
}
void AssemblyHelpers::jitAssertIsNull(GPRReg gpr)
{
Jump checkNull = branchTestPtr(Zero, gpr);
- abortWithReason(AHIsNotNull);
+ breakpoint();
checkNull.link(this);
}
-
-void AssemblyHelpers::jitAssertArgumentCountSane()
-{
- Jump ok = branch32(Below, payloadFor(JSStack::ArgumentCount), TrustedImm32(10000000));
- abortWithReason(AHInsaneArgumentCount);
- ok.link(this);
-}
-
#endif // !ASSERT_DISABLED
-void AssemblyHelpers::jitReleaseAssertNoException()
-{
- Jump noException;
-#if USE(JSVALUE64)
- noException = branchTest64(Zero, AbsoluteAddress(vm()->addressOfException()));
-#elif USE(JSVALUE32_64)
- noException = branch32(Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
-#endif
- abortWithReason(JITUncoughtExceptionAfterCall);
- noException.link(this);
-}
-
-void AssemblyHelpers::callExceptionFuzz()
-{
- if (!Options::useExceptionFuzz())
- return;
-
- EncodedJSValue* buffer = vm()->exceptionFuzzingBuffer(sizeof(EncodedJSValue) * (GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters));
-
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
- store64(GPRInfo::toRegister(i), buffer + i);
-#else
- store32(GPRInfo::toRegister(i), buffer + i);
-#endif
- }
- for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
- move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
- storeDouble(FPRInfo::toRegister(i), Address(GPRInfo::regT0));
- }
-
- // Set up one argument.
-#if CPU(X86)
- poke(GPRInfo::callFrameRegister, 0);
-#else
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
-#endif
- move(TrustedImmPtr(bitwise_cast<void*>(operationExceptionFuzz)), GPRInfo::nonPreservedNonReturnGPR);
- call(GPRInfo::nonPreservedNonReturnGPR);
-
- for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
- move(TrustedImmPtr(buffer + GPRInfo::numberOfRegisters + i), GPRInfo::regT0);
- loadDouble(Address(GPRInfo::regT0), FPRInfo::toRegister(i));
- }
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
-#if USE(JSVALUE64)
- load64(buffer + i, GPRInfo::toRegister(i));
-#else
- load32(buffer + i, GPRInfo::toRegister(i));
-#endif
- }
-}
-
-AssemblyHelpers::Jump AssemblyHelpers::emitExceptionCheck(ExceptionCheckKind kind, ExceptionJumpWidth width)
-{
- callExceptionFuzz();
-
- if (width == FarJumpWidth)
- kind = (kind == NormalExceptionCheck ? InvertedExceptionCheck : NormalExceptionCheck);
-
- Jump result;
-#if USE(JSVALUE64)
- result = branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
-#elif USE(JSVALUE32_64)
- result = branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
-#endif
-
- if (width == NormalJumpWidth)
- return result;
-
- PatchableJump realJump = patchableJump();
- result.link(this);
-
- return realJump.m_jump;
-}
-
-AssemblyHelpers::Jump AssemblyHelpers::emitNonPatchableExceptionCheck()
-{
- callExceptionFuzz();
-
- Jump result;
-#if USE(JSVALUE64)
- result = branchTest64(NonZero, AbsoluteAddress(vm()->addressOfException()));
-#elif USE(JSVALUE32_64)
- result = branch32(NotEqual, AbsoluteAddress(vm()->addressOfException()), TrustedImm32(0));
-#endif
-
- return result;
-}
-
-void AssemblyHelpers::emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest)
-{
- const Structure* structurePtr = static_cast<const Structure*>(structure.m_value);
-#if USE(JSVALUE64)
- jit.store64(TrustedImm64(structurePtr->idBlob()), MacroAssembler::Address(dest, JSCell::structureIDOffset()));
- if (!ASSERT_DISABLED) {
- Jump correctStructure = jit.branch32(Equal, MacroAssembler::Address(dest, JSCell::structureIDOffset()), TrustedImm32(structurePtr->id()));
- jit.abortWithReason(AHStructureIDIsValid);
- correctStructure.link(&jit);
-
- Jump correctIndexingType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()), TrustedImm32(structurePtr->indexingType()));
- jit.abortWithReason(AHIndexingTypeIsValid);
- correctIndexingType.link(&jit);
-
- Jump correctType = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoTypeOffset()), TrustedImm32(structurePtr->typeInfo().type()));
- jit.abortWithReason(AHTypeInfoIsValid);
- correctType.link(&jit);
-
- Jump correctFlags = jit.branch8(Equal, MacroAssembler::Address(dest, JSCell::typeInfoFlagsOffset()), TrustedImm32(structurePtr->typeInfo().inlineTypeFlags()));
- jit.abortWithReason(AHTypeInfoInlineTypeFlagsAreValid);
- correctFlags.link(&jit);
- }
-#else
- // Do a 32-bit wide store to initialize the cell's fields.
- jit.store32(TrustedImm32(structurePtr->objectInitializationBlob()), MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
- jit.storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
-#endif
-}
-
-#if USE(JSVALUE64)
-template<typename LoadFromHigh, typename StoreToHigh, typename LoadFromLow, typename StoreToLow>
-void emitRandomThunkImpl(AssemblyHelpers& jit, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result, const LoadFromHigh& loadFromHigh, const StoreToHigh& storeToHigh, const LoadFromLow& loadFromLow, const StoreToLow& storeToLow)
-{
- // Inlined WeakRandom::advance().
- // uint64_t x = m_low;
- loadFromLow(scratch0);
- // uint64_t y = m_high;
- loadFromHigh(scratch1);
- // m_low = y;
- storeToLow(scratch1);
-
- // x ^= x << 23;
- jit.move(scratch0, scratch2);
- jit.lshift64(AssemblyHelpers::TrustedImm32(23), scratch2);
- jit.xor64(scratch2, scratch0);
-
- // x ^= x >> 17;
- jit.move(scratch0, scratch2);
- jit.rshift64(AssemblyHelpers::TrustedImm32(17), scratch2);
- jit.xor64(scratch2, scratch0);
-
- // x ^= y ^ (y >> 26);
- jit.move(scratch1, scratch2);
- jit.rshift64(AssemblyHelpers::TrustedImm32(26), scratch2);
- jit.xor64(scratch1, scratch2);
- jit.xor64(scratch2, scratch0);
-
- // m_high = x;
- storeToHigh(scratch0);
-
- // return x + y;
- jit.add64(scratch1, scratch0);
-
- // Extract random 53bit. [0, 53] bit is safe integer number ranges in double representation.
- jit.move(AssemblyHelpers::TrustedImm64((1ULL << 53) - 1), scratch1);
- jit.and64(scratch1, scratch0);
- // Now, scratch0 is always in range of int64_t. Safe to convert it to double with cvtsi2sdq.
- jit.convertInt64ToDouble(scratch0, result);
-
- // Convert `(53bit double integer value) / (1 << 53)` to `(53bit double integer value) * (1.0 / (1 << 53))`.
- // In latter case, `1.0 / (1 << 53)` will become a double value represented as (mantissa = 0 & exp = 970, it means 1e-(2**54)).
- static const double scale = 1.0 / (1ULL << 53);
-
- // Multiplying 1e-(2**54) with the double integer does not change anything of the mantissa part of the double integer.
- // It just reduces the exp part of the given 53bit double integer.
- // (Except for 0.0. This is specially handled and in this case, exp just becomes 0.)
- // Now we get 53bit precision random double value in [0, 1).
- jit.move(AssemblyHelpers::TrustedImmPtr(&scale), scratch1);
- jit.mulDouble(AssemblyHelpers::Address(scratch1), result);
-}
-
-void AssemblyHelpers::emitRandomThunk(JSGlobalObject* globalObject, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result)
-{
- void* lowAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset();
- void* highAddress = reinterpret_cast<uint8_t*>(globalObject) + JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset();
-
- auto loadFromHigh = [&](GPRReg high) {
- load64(highAddress, high);
- };
- auto storeToHigh = [&](GPRReg high) {
- store64(high, highAddress);
- };
- auto loadFromLow = [&](GPRReg low) {
- load64(lowAddress, low);
- };
- auto storeToLow = [&](GPRReg low) {
- store64(low, lowAddress);
- };
-
- emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
-}
-
-void AssemblyHelpers::emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result)
-{
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, scratch3);
- emitLoadStructure(scratch3, scratch3, scratch0);
- loadPtr(Address(scratch3, Structure::globalObjectOffset()), scratch3);
- // Now, scratch3 holds JSGlobalObject*.
-
- auto loadFromHigh = [&](GPRReg high) {
- load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()), high);
- };
- auto storeToHigh = [&](GPRReg high) {
- store64(high, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::highOffset()));
- };
- auto loadFromLow = [&](GPRReg low) {
- load64(Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()), low);
- };
- auto storeToLow = [&](GPRReg low) {
- store64(low, Address(scratch3, JSGlobalObject::weakRandomOffset() + WeakRandom::lowOffset()));
- };
-
- emitRandomThunkImpl(*this, scratch0, scratch1, scratch2, result, loadFromHigh, storeToHigh, loadFromLow, storeToLow);
-}
-#endif
-
-void AssemblyHelpers::restoreCalleeSavesFromVMCalleeSavesBuffer()
-{
-#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
- char* sourceBuffer = bitwise_cast<char*>(m_vm->calleeSaveRegistersBuffer);
-
- RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
- RegisterSet dontRestoreRegisters = RegisterSet::stackRegisters();
- unsigned registerCount = allCalleeSaves->size();
-
- for (unsigned i = 0; i < registerCount; i++) {
- RegisterAtOffset entry = allCalleeSaves->at(i);
- if (dontRestoreRegisters.get(entry.reg()))
- continue;
- if (entry.reg().isGPR())
- loadPtr(static_cast<void*>(sourceBuffer + entry.offset()), entry.reg().gpr());
- else
- loadDouble(TrustedImmPtr(sourceBuffer + entry.offset()), entry.reg().fpr());
- }
-#endif
-}
-
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/AssemblyHelpers.h b/Source/JavaScriptCore/jit/AssemblyHelpers.h
index 918af7dca..36d583139 100644
--- a/Source/JavaScriptCore/jit/AssemblyHelpers.h
+++ b/Source/JavaScriptCore/jit/AssemblyHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,19 +26,15 @@
#ifndef AssemblyHelpers_h
#define AssemblyHelpers_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "CodeBlock.h"
-#include "CopyBarrier.h"
#include "FPRInfo.h"
#include "GPRInfo.h"
-#include "InlineCallFrame.h"
#include "JITCode.h"
#include "MacroAssembler.h"
-#include "MaxFrameExtentForSlowPathCall.h"
-#include "RegisterAtOffsetList.h"
-#include "RegisterSet.h"
-#include "TypeofType.h"
#include "VM.h"
namespace JSC {
@@ -62,331 +58,8 @@ public:
CodeBlock* codeBlock() { return m_codeBlock; }
VM* vm() { return m_vm; }
AssemblerType_T& assembler() { return m_assembler; }
-
- void checkStackPointerAlignment()
- {
- // This check is both unneeded and harder to write correctly for ARM64
-#if !defined(NDEBUG) && !CPU(ARM64)
- Jump stackPointerAligned = branchTestPtr(Zero, stackPointerRegister, TrustedImm32(0xf));
- abortWithReason(AHStackPointerMisaligned);
- stackPointerAligned.link(this);
-#endif
- }
-
- template<typename T>
- void storeCell(T cell, Address address)
- {
-#if USE(JSVALUE64)
- store64(cell, address);
-#else
- store32(cell, address.withOffset(PayloadOffset));
- store32(TrustedImm32(JSValue::CellTag), address.withOffset(TagOffset));
-#endif
- }
-
- void storeValue(JSValueRegs regs, Address address)
- {
-#if USE(JSVALUE64)
- store64(regs.gpr(), address);
-#else
- store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
- store32(regs.tagGPR(), address.withOffset(TagOffset));
-#endif
- }
-
- void storeValue(JSValueRegs regs, BaseIndex address)
- {
-#if USE(JSVALUE64)
- store64(regs.gpr(), address);
-#else
- store32(regs.payloadGPR(), address.withOffset(PayloadOffset));
- store32(regs.tagGPR(), address.withOffset(TagOffset));
-#endif
- }
-
- void storeValue(JSValueRegs regs, void* address)
- {
-#if USE(JSVALUE64)
- store64(regs.gpr(), address);
-#else
- store32(regs.payloadGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + PayloadOffset));
- store32(regs.tagGPR(), bitwise_cast<void*>(bitwise_cast<uintptr_t>(address) + TagOffset));
-#endif
- }
-
- void loadValue(Address address, JSValueRegs regs)
- {
-#if USE(JSVALUE64)
- load64(address, regs.gpr());
-#else
- if (address.base == regs.payloadGPR()) {
- load32(address.withOffset(TagOffset), regs.tagGPR());
- load32(address.withOffset(PayloadOffset), regs.payloadGPR());
- } else {
- load32(address.withOffset(PayloadOffset), regs.payloadGPR());
- load32(address.withOffset(TagOffset), regs.tagGPR());
- }
-#endif
- }
-
- void loadValue(BaseIndex address, JSValueRegs regs)
- {
-#if USE(JSVALUE64)
- load64(address, regs.gpr());
-#else
- if (address.base == regs.payloadGPR() || address.index == regs.payloadGPR()) {
- // We actually could handle the case where the registers are aliased to both
- // tag and payload, but we don't for now.
- RELEASE_ASSERT(address.base != regs.tagGPR());
- RELEASE_ASSERT(address.index != regs.tagGPR());
-
- load32(address.withOffset(TagOffset), regs.tagGPR());
- load32(address.withOffset(PayloadOffset), regs.payloadGPR());
- } else {
- load32(address.withOffset(PayloadOffset), regs.payloadGPR());
- load32(address.withOffset(TagOffset), regs.tagGPR());
- }
-#endif
- }
-
- void moveValueRegs(JSValueRegs srcRegs, JSValueRegs destRegs)
- {
-#if USE(JSVALUE32_64)
- move(srcRegs.tagGPR(), destRegs.tagGPR());
-#endif
- move(srcRegs.payloadGPR(), destRegs.payloadGPR());
- }
-
- void moveValue(JSValue value, JSValueRegs regs)
- {
-#if USE(JSVALUE64)
- move(Imm64(JSValue::encode(value)), regs.gpr());
-#else
- move(Imm32(value.tag()), regs.tagGPR());
- move(Imm32(value.payload()), regs.payloadGPR());
-#endif
- }
-
- void moveTrustedValue(JSValue value, JSValueRegs regs)
- {
-#if USE(JSVALUE64)
- move(TrustedImm64(JSValue::encode(value)), regs.gpr());
-#else
- move(TrustedImm32(value.tag()), regs.tagGPR());
- move(TrustedImm32(value.payload()), regs.payloadGPR());
-#endif
- }
- void storeTrustedValue(JSValue value, Address address)
- {
-#if USE(JSVALUE64)
- store64(TrustedImm64(JSValue::encode(value)), address);
-#else
- store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
- store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
-#endif
- }
-
- void storeTrustedValue(JSValue value, BaseIndex address)
- {
-#if USE(JSVALUE64)
- store64(TrustedImm64(JSValue::encode(value)), address);
-#else
- store32(TrustedImm32(value.tag()), address.withOffset(TagOffset));
- store32(TrustedImm32(value.payload()), address.withOffset(PayloadOffset));
-#endif
- }
-
- void emitSaveCalleeSavesFor(CodeBlock* codeBlock)
- {
- ASSERT(codeBlock);
-
- RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
- RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
- unsigned registerCount = calleeSaves->size();
-
- for (unsigned i = 0; i < registerCount; i++) {
- RegisterAtOffset entry = calleeSaves->at(i);
- if (dontSaveRegisters.get(entry.reg()))
- continue;
- storePtr(entry.reg().gpr(), Address(framePointerRegister, entry.offset()));
- }
- }
-
- enum RestoreTagRegisterMode { UseExistingTagRegisterContents, CopyBaselineCalleeSavedRegistersFromBaseFrame };
-
- void emitSaveOrCopyCalleeSavesFor(CodeBlock* codeBlock, VirtualRegister offsetVirtualRegister, RestoreTagRegisterMode tagRegisterMode, GPRReg temp)
- {
- ASSERT(codeBlock);
-
- RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
- RegisterSet dontSaveRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
- unsigned registerCount = calleeSaves->size();
-
-#if USE(JSVALUE64)
- RegisterSet baselineCalleeSaves = RegisterSet::llintBaselineCalleeSaveRegisters();
-#endif
-
- for (unsigned i = 0; i < registerCount; i++) {
- RegisterAtOffset entry = calleeSaves->at(i);
- if (dontSaveRegisters.get(entry.reg()))
- continue;
-
- GPRReg registerToWrite;
-
-#if USE(JSVALUE32_64)
- UNUSED_PARAM(tagRegisterMode);
- UNUSED_PARAM(temp);
-#else
- if (tagRegisterMode == CopyBaselineCalleeSavedRegistersFromBaseFrame && baselineCalleeSaves.get(entry.reg())) {
- registerToWrite = temp;
- loadPtr(AssemblyHelpers::Address(GPRInfo::callFrameRegister, entry.offset()), registerToWrite);
- } else
-#endif
- registerToWrite = entry.reg().gpr();
-
- storePtr(registerToWrite, Address(framePointerRegister, offsetVirtualRegister.offsetInBytes() + entry.offset()));
- }
- }
-
- void emitRestoreCalleeSavesFor(CodeBlock* codeBlock)
- {
- ASSERT(codeBlock);
-
- RegisterAtOffsetList* calleeSaves = codeBlock->calleeSaveRegisters();
- RegisterSet dontRestoreRegisters = RegisterSet(RegisterSet::stackRegisters(), RegisterSet::allFPRs());
- unsigned registerCount = calleeSaves->size();
-
- for (unsigned i = 0; i < registerCount; i++) {
- RegisterAtOffset entry = calleeSaves->at(i);
- if (dontRestoreRegisters.get(entry.reg()))
- continue;
- loadPtr(Address(framePointerRegister, entry.offset()), entry.reg().gpr());
- }
- }
-
- void emitSaveCalleeSaves()
- {
- emitSaveCalleeSavesFor(codeBlock());
- }
-
- void emitRestoreCalleeSaves()
- {
- emitRestoreCalleeSavesFor(codeBlock());
- }
-
- void copyCalleeSavesToVMCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
- {
-#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
- GPRReg temp1 = usedRegisters.getFreeGPR(0);
-
- move(TrustedImmPtr(m_vm->calleeSaveRegistersBuffer), temp1);
-
- RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
- RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
- unsigned registerCount = allCalleeSaves->size();
-
- for (unsigned i = 0; i < registerCount; i++) {
- RegisterAtOffset entry = allCalleeSaves->at(i);
- if (dontCopyRegisters.get(entry.reg()))
- continue;
- if (entry.reg().isGPR())
- storePtr(entry.reg().gpr(), Address(temp1, entry.offset()));
- else
- storeDouble(entry.reg().fpr(), Address(temp1, entry.offset()));
- }
-#else
- UNUSED_PARAM(usedRegisters);
-#endif
- }
-
- void restoreCalleeSavesFromVMCalleeSavesBuffer();
-
- void copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer(const TempRegisterSet& usedRegisters = { RegisterSet::stubUnavailableRegisters() })
- {
-#if NUMBER_OF_CALLEE_SAVES_REGISTERS > 0
- GPRReg temp1 = usedRegisters.getFreeGPR(0);
- GPRReg temp2 = usedRegisters.getFreeGPR(1);
- FPRReg fpTemp = usedRegisters.getFreeFPR();
- ASSERT(temp2 != InvalidGPRReg);
-
- ASSERT(codeBlock());
-
- // Copy saved calleeSaves on stack or unsaved calleeSaves in register to vm calleeSave buffer
- move(TrustedImmPtr(m_vm->calleeSaveRegistersBuffer), temp1);
-
- RegisterAtOffsetList* allCalleeSaves = m_vm->getAllCalleeSaveRegisterOffsets();
- RegisterAtOffsetList* currentCalleeSaves = codeBlock()->calleeSaveRegisters();
- RegisterSet dontCopyRegisters = RegisterSet::stackRegisters();
- unsigned registerCount = allCalleeSaves->size();
-
- for (unsigned i = 0; i < registerCount; i++) {
- RegisterAtOffset vmEntry = allCalleeSaves->at(i);
- if (dontCopyRegisters.get(vmEntry.reg()))
- continue;
- RegisterAtOffset* currentFrameEntry = currentCalleeSaves->find(vmEntry.reg());
-
- if (vmEntry.reg().isGPR()) {
- GPRReg regToStore;
- if (currentFrameEntry) {
- // Load calleeSave from stack into temp register
- regToStore = temp2;
- loadPtr(Address(framePointerRegister, currentFrameEntry->offset()), regToStore);
- } else
- // Just store callee save directly
- regToStore = vmEntry.reg().gpr();
-
- storePtr(regToStore, Address(temp1, vmEntry.offset()));
- } else {
- FPRReg fpRegToStore;
- if (currentFrameEntry) {
- // Load calleeSave from stack into temp register
- fpRegToStore = fpTemp;
- loadDouble(Address(framePointerRegister, currentFrameEntry->offset()), fpRegToStore);
- } else
- // Just store callee save directly
- fpRegToStore = vmEntry.reg().fpr();
-
- storeDouble(fpRegToStore, Address(temp1, vmEntry.offset()));
- }
- }
-#else
- UNUSED_PARAM(usedRegisters);
-#endif
- }
-
- void emitMaterializeTagCheckRegisters()
- {
-#if USE(JSVALUE64)
- move(MacroAssembler::TrustedImm64(TagTypeNumber), GPRInfo::tagTypeNumberRegister);
- orPtr(MacroAssembler::TrustedImm32(TagBitTypeOther), GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
-#endif
- }
-
#if CPU(X86_64) || CPU(X86)
- static size_t prologueStackPointerDelta()
- {
- // Prologue only saves the framePointerRegister
- return sizeof(void*);
- }
-
- void emitFunctionPrologue()
- {
- push(framePointerRegister);
- move(stackPointerRegister, framePointerRegister);
- }
-
- void emitFunctionEpilogueWithEmptyFrame()
- {
- pop(framePointerRegister);
- }
-
- void emitFunctionEpilogue()
- {
- move(framePointerRegister, stackPointerRegister);
- pop(framePointerRegister);
- }
-
void preserveReturnAddressAfterCall(GPRReg reg)
{
pop(reg);
@@ -404,29 +77,6 @@ public:
#endif // CPU(X86_64) || CPU(X86)
#if CPU(ARM) || CPU(ARM64)
- static size_t prologueStackPointerDelta()
- {
- // Prologue saves the framePointerRegister and linkRegister
- return 2 * sizeof(void*);
- }
-
- void emitFunctionPrologue()
- {
- pushPair(framePointerRegister, linkRegister);
- move(stackPointerRegister, framePointerRegister);
- }
-
- void emitFunctionEpilogueWithEmptyFrame()
- {
- popPair(framePointerRegister, linkRegister);
- }
-
- void emitFunctionEpilogue()
- {
- move(framePointerRegister, stackPointerRegister);
- emitFunctionEpilogueWithEmptyFrame();
- }
-
ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
{
move(linkRegister, reg);
@@ -444,29 +94,6 @@ public:
#endif
#if CPU(MIPS)
- static size_t prologueStackPointerDelta()
- {
- // Prologue saves the framePointerRegister and returnAddressRegister
- return 2 * sizeof(void*);
- }
-
- void emitFunctionPrologue()
- {
- pushPair(framePointerRegister, returnAddressRegister);
- move(stackPointerRegister, framePointerRegister);
- }
-
- void emitFunctionEpilogueWithEmptyFrame()
- {
- popPair(framePointerRegister, returnAddressRegister);
- }
-
- void emitFunctionEpilogue()
- {
- move(framePointerRegister, stackPointerRegister);
- emitFunctionEpilogueWithEmptyFrame();
- }
-
ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
{
move(returnAddressRegister, reg);
@@ -484,26 +111,6 @@ public:
#endif
#if CPU(SH4)
- static size_t prologueStackPointerDelta()
- {
- // Prologue saves the framePointerRegister and link register
- return 2 * sizeof(void*);
- }
-
- void emitFunctionPrologue()
- {
- push(linkRegister);
- push(framePointerRegister);
- move(stackPointerRegister, framePointerRegister);
- }
-
- void emitFunctionEpilogue()
- {
- move(framePointerRegister, stackPointerRegister);
- pop(framePointerRegister);
- pop(linkRegister);
- }
-
ALWAYS_INLINE void preserveReturnAddressAfterCall(RegisterID reg)
{
m_assembler.stspr(reg);
@@ -520,26 +127,16 @@ public:
}
#endif
- void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
+ void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, GPRReg to)
{
- loadPtr(Address(from, entry * sizeof(Register)), to);
+ loadPtr(Address(GPRInfo::callFrameRegister, entry * sizeof(Register)), to);
}
- void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
- {
- load32(Address(from, entry * sizeof(Register)), to);
- }
-#if USE(JSVALUE64)
- void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, GPRReg to, GPRReg from = GPRInfo::callFrameRegister)
- {
- load64(Address(from, entry * sizeof(Register)), to);
- }
-#endif // USE(JSVALUE64)
void emitPutToCallFrameHeader(GPRReg from, JSStack::CallFrameHeaderEntry entry)
{
storePtr(from, Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
}
- void emitPutToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+ void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
{
storePtr(TrustedImmPtr(value), Address(GPRInfo::callFrameRegister, entry * sizeof(Register)));
}
@@ -553,6 +150,10 @@ public:
storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::callerFrameOffset()));
}
+ void emitGetReturnPCFromCallFrameHeaderPtr(RegisterID to)
+ {
+ loadPtr(Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()), to);
+ }
void emitPutReturnPCToCallFrameHeader(RegisterID from)
{
storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
@@ -562,287 +163,14 @@ public:
storePtr(from, Address(GPRInfo::callFrameRegister, CallFrame::returnPCOffset()));
}
- // emitPutToCallFrameHeaderBeforePrologue() and related are used to access callee frame header
- // fields before the code from emitFunctionPrologue() has executed.
- // First, the access is via the stack pointer. Second, the address calculation must also take
- // into account that the stack pointer may not have been adjusted down for the return PC and/or
- // caller's frame pointer. On some platforms, the callee is responsible for pushing the
- // "link register" containing the return address in the function prologue.
-#if USE(JSVALUE64)
- void emitPutToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
- {
- storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta()));
- }
-#else
- void emitPutPayloadToCallFrameHeaderBeforePrologue(GPRReg from, JSStack::CallFrameHeaderEntry entry)
- {
- storePtr(from, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- }
-
- void emitPutTagToCallFrameHeaderBeforePrologue(TrustedImm32 tag, JSStack::CallFrameHeaderEntry entry)
- {
- storePtr(tag, Address(stackPointerRegister, entry * static_cast<ptrdiff_t>(sizeof(Register)) - prologueStackPointerDelta() + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- }
-#endif
-
- JumpList branchIfNotEqual(JSValueRegs regs, JSValue value)
- {
-#if USE(JSVALUE64)
- return branch64(NotEqual, regs.gpr(), TrustedImm64(JSValue::encode(value)));
-#else
- JumpList result;
- result.append(branch32(NotEqual, regs.tagGPR(), TrustedImm32(value.tag())));
- if (value.isEmpty() || value.isUndefinedOrNull())
- return result; // These don't have anything interesting in the payload.
- result.append(branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload())));
- return result;
-#endif
- }
-
- Jump branchIfEqual(JSValueRegs regs, JSValue value)
+ Jump branchIfNotCell(GPRReg reg)
{
#if USE(JSVALUE64)
- return branch64(Equal, regs.gpr(), TrustedImm64(JSValue::encode(value)));
+ return branchTest64(MacroAssembler::NonZero, reg, GPRInfo::tagMaskRegister);
#else
- Jump notEqual;
- // These don't have anything interesting in the payload.
- if (!value.isEmpty() && !value.isUndefinedOrNull())
- notEqual = branch32(NotEqual, regs.payloadGPR(), TrustedImm32(value.payload()));
- Jump result = branch32(Equal, regs.tagGPR(), TrustedImm32(value.tag()));
- if (notEqual.isSet())
- notEqual.link(this);
- return result;
-#endif
- }
-
- enum TagRegistersMode {
- DoNotHaveTagRegisters,
- HaveTagRegisters
- };
-
- Jump branchIfNotCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- if (mode == HaveTagRegisters)
- return branchTest64(NonZero, reg, GPRInfo::tagMaskRegister);
- return branchTest64(NonZero, reg, TrustedImm64(TagMask));
-#else
- UNUSED_PARAM(mode);
return branch32(MacroAssembler::NotEqual, reg, TrustedImm32(JSValue::CellTag));
#endif
}
- Jump branchIfNotCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- return branchIfNotCell(regs.gpr(), mode);
-#else
- return branchIfNotCell(regs.tagGPR(), mode);
-#endif
- }
-
- Jump branchIfCell(GPRReg reg, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- if (mode == HaveTagRegisters)
- return branchTest64(Zero, reg, GPRInfo::tagMaskRegister);
- return branchTest64(Zero, reg, TrustedImm64(TagMask));
-#else
- UNUSED_PARAM(mode);
- return branch32(MacroAssembler::Equal, reg, TrustedImm32(JSValue::CellTag));
-#endif
- }
- Jump branchIfCell(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- return branchIfCell(regs.gpr(), mode);
-#else
- return branchIfCell(regs.tagGPR(), mode);
-#endif
- }
-
- Jump branchIfOther(JSValueRegs regs, GPRReg tempGPR)
- {
-#if USE(JSVALUE64)
- move(regs.gpr(), tempGPR);
- and64(TrustedImm32(~TagBitUndefined), tempGPR);
- return branch64(Equal, tempGPR, TrustedImm64(ValueNull));
-#else
- or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
- return branch32(Equal, tempGPR, TrustedImm32(JSValue::NullTag));
-#endif
- }
-
- Jump branchIfNotOther(JSValueRegs regs, GPRReg tempGPR)
- {
-#if USE(JSVALUE64)
- move(regs.gpr(), tempGPR);
- and64(TrustedImm32(~TagBitUndefined), tempGPR);
- return branch64(NotEqual, tempGPR, TrustedImm64(ValueNull));
-#else
- or32(TrustedImm32(1), regs.tagGPR(), tempGPR);
- return branch32(NotEqual, tempGPR, TrustedImm32(JSValue::NullTag));
-#endif
- }
-
- Jump branchIfInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- if (mode == HaveTagRegisters)
- return branch64(AboveOrEqual, regs.gpr(), GPRInfo::tagTypeNumberRegister);
- return branch64(AboveOrEqual, regs.gpr(), TrustedImm64(TagTypeNumber));
-#else
- UNUSED_PARAM(mode);
- return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
-#endif
- }
-
-#if USE(JSVALUE64)
- Jump branchIfNotInt32(GPRReg gpr, TagRegistersMode mode = HaveTagRegisters)
- {
- if (mode == HaveTagRegisters)
- return branch64(Below, gpr, GPRInfo::tagTypeNumberRegister);
- return branch64(Below, gpr, TrustedImm64(TagTypeNumber));
- }
-#endif
-
- Jump branchIfNotInt32(JSValueRegs regs, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- return branchIfNotInt32(regs.gpr(), mode);
-#else
- UNUSED_PARAM(mode);
- return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::Int32Tag));
-#endif
- }
-
- // Note that the tempGPR is not used in 64-bit mode.
- Jump branchIfNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- UNUSED_PARAM(tempGPR);
- if (mode == HaveTagRegisters)
- return branchTest64(NonZero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
- return branchTest64(NonZero, regs.gpr(), TrustedImm64(TagTypeNumber));
-#else
- UNUSED_PARAM(mode);
- add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
- return branch32(Below, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
-#endif
- }
-
- // Note that the tempGPR is not used in 64-bit mode.
- Jump branchIfNotNumber(JSValueRegs regs, GPRReg tempGPR, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- UNUSED_PARAM(tempGPR);
- if (mode == HaveTagRegisters)
- return branchTest64(Zero, regs.gpr(), GPRInfo::tagTypeNumberRegister);
- return branchTest64(Zero, regs.gpr(), TrustedImm64(TagTypeNumber));
-#else
- UNUSED_PARAM(mode);
- add32(TrustedImm32(1), regs.tagGPR(), tempGPR);
- return branch32(AboveOrEqual, tempGPR, TrustedImm32(JSValue::LowestTag + 1));
-#endif
- }
-
- // Note that the tempGPR is not used in 32-bit mode.
- Jump branchIfBoolean(JSValueRegs regs, GPRReg tempGPR)
- {
-#if USE(JSVALUE64)
- move(regs.gpr(), tempGPR);
- xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
- return branchTest64(Zero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
-#else
- UNUSED_PARAM(tempGPR);
- return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag));
-#endif
- }
-
- // Note that the tempGPR is not used in 32-bit mode.
- Jump branchIfNotBoolean(JSValueRegs regs, GPRReg tempGPR)
- {
-#if USE(JSVALUE64)
- move(regs.gpr(), tempGPR);
- xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), tempGPR);
- return branchTest64(NonZero, tempGPR, TrustedImm32(static_cast<int32_t>(~1)));
-#else
- UNUSED_PARAM(tempGPR);
- return branch32(NotEqual, regs.tagGPR(), TrustedImm32(JSValue::BooleanTag));
-#endif
- }
-
- Jump branchIfObject(GPRReg cellGPR)
- {
- return branch8(
- AboveOrEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
- }
-
- Jump branchIfNotObject(GPRReg cellGPR)
- {
- return branch8(
- Below, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
- }
-
- Jump branchIfType(GPRReg cellGPR, JSType type)
- {
- return branch8(Equal, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
- }
-
- Jump branchIfNotType(GPRReg cellGPR, JSType type)
- {
- return branch8(NotEqual, Address(cellGPR, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
- }
-
- Jump branchIfString(GPRReg cellGPR) { return branchIfType(cellGPR, StringType); }
- Jump branchIfNotString(GPRReg cellGPR) { return branchIfNotType(cellGPR, StringType); }
- Jump branchIfSymbol(GPRReg cellGPR) { return branchIfType(cellGPR, SymbolType); }
- Jump branchIfNotSymbol(GPRReg cellGPR) { return branchIfNotType(cellGPR, SymbolType); }
- Jump branchIfFunction(GPRReg cellGPR) { return branchIfType(cellGPR, JSFunctionType); }
- Jump branchIfNotFunction(GPRReg cellGPR) { return branchIfNotType(cellGPR, JSFunctionType); }
-
- Jump branchIfEmpty(JSValueRegs regs)
- {
-#if USE(JSVALUE64)
- return branchTest64(Zero, regs.gpr());
-#else
- return branch32(Equal, regs.tagGPR(), TrustedImm32(JSValue::EmptyValueTag));
-#endif
- }
-
- JumpList branchIfNotType(
- JSValueRegs, GPRReg tempGPR, const InferredType::Descriptor&, TagRegistersMode);
-
- template<typename T>
- Jump branchStructure(RelationalCondition condition, T leftHandSide, Structure* structure)
- {
-#if USE(JSVALUE64)
- return branch32(condition, leftHandSide, TrustedImm32(structure->id()));
-#else
- return branchPtr(condition, leftHandSide, TrustedImmPtr(structure));
-#endif
- }
-
- Jump branchIfToSpace(GPRReg storageGPR)
- {
- return branchTest32(Zero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits));
- }
-
- Jump branchIfNotToSpace(GPRReg storageGPR)
- {
- return branchTest32(NonZero, storageGPR, TrustedImm32(CopyBarrierBase::spaceBits));
- }
-
- void removeSpaceBits(GPRReg storageGPR)
- {
- andPtr(TrustedImmPtr(~static_cast<uintptr_t>(CopyBarrierBase::spaceBits)), storageGPR);
- }
-
- Jump branchIfFastTypedArray(GPRReg baseGPR);
- Jump branchIfNotFastTypedArray(GPRReg baseGPR);
-
- // Returns a jump to slow path for when we need to execute the barrier. Note that baseGPR and
- // resultGPR must be different.
- Jump loadTypedArrayVector(GPRReg baseGPR, GPRReg resultGPR);
static Address addressForByteOffset(ptrdiff_t byteOffset)
{
@@ -855,10 +183,6 @@ public:
}
static Address addressFor(VirtualRegister virtualRegister)
{
- // NB. It's tempting on some architectures to sometimes use an offset from the stack
- // register because for some offsets that will encode to a smaller instruction. But we
- // cannot do this. We use this in places where the stack pointer has been moved to some
- // unpredictable location.
ASSERT(virtualRegister.isValid());
return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register));
}
@@ -870,7 +194,7 @@ public:
static Address tagFor(VirtualRegister virtualRegister)
{
ASSERT(virtualRegister.isValid());
- return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + TagOffset);
+ return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
}
static Address tagFor(int operand)
{
@@ -880,69 +204,33 @@ public:
static Address payloadFor(VirtualRegister virtualRegister)
{
ASSERT(virtualRegister.isValid());
- return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + PayloadOffset);
+ return Address(GPRInfo::callFrameRegister, virtualRegister.offset() * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
}
static Address payloadFor(int operand)
{
return payloadFor(static_cast<VirtualRegister>(operand));
}
- // Access to our fixed callee CallFrame.
- static Address calleeFrameSlot(int slot)
- {
- ASSERT(slot >= JSStack::CallerFrameAndPCSize);
- return Address(stackPointerRegister, sizeof(Register) * (slot - JSStack::CallerFrameAndPCSize));
- }
-
- // Access to our fixed callee CallFrame.
- static Address calleeArgumentSlot(int argument)
+ Jump branchIfNotObject(GPRReg structureReg)
{
- return calleeFrameSlot(virtualRegisterForArgument(argument).offset());
+ return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
}
- static Address calleeFrameTagSlot(int slot)
+ static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg)
{
- return calleeFrameSlot(slot).withOffset(TagOffset);
- }
-
- static Address calleeFramePayloadSlot(int slot)
- {
- return calleeFrameSlot(slot).withOffset(PayloadOffset);
- }
-
- static Address calleeArgumentTagSlot(int argument)
- {
- return calleeArgumentSlot(argument).withOffset(TagOffset);
- }
-
- static Address calleeArgumentPayloadSlot(int argument)
- {
- return calleeArgumentSlot(argument).withOffset(PayloadOffset);
- }
-
- static Address calleeFrameCallerFrame()
- {
- return calleeFrameSlot(0).withOffset(CallFrame::callerFrameOffset());
- }
-
- static GPRReg selectScratchGPR(GPRReg preserve1 = InvalidGPRReg, GPRReg preserve2 = InvalidGPRReg, GPRReg preserve3 = InvalidGPRReg, GPRReg preserve4 = InvalidGPRReg, GPRReg preserve5 = InvalidGPRReg)
- {
- if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0 && preserve5 != GPRInfo::regT0)
+ if (preserve1 != GPRInfo::regT0 && preserve2 != GPRInfo::regT0 && preserve3 != GPRInfo::regT0 && preserve4 != GPRInfo::regT0)
return GPRInfo::regT0;
- if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1 && preserve5 != GPRInfo::regT1)
+ if (preserve1 != GPRInfo::regT1 && preserve2 != GPRInfo::regT1 && preserve3 != GPRInfo::regT1 && preserve4 != GPRInfo::regT1)
return GPRInfo::regT1;
- if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2 && preserve5 != GPRInfo::regT2)
+ if (preserve1 != GPRInfo::regT2 && preserve2 != GPRInfo::regT2 && preserve3 != GPRInfo::regT2 && preserve4 != GPRInfo::regT2)
return GPRInfo::regT2;
- if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3 && preserve5 != GPRInfo::regT3)
+ if (preserve1 != GPRInfo::regT3 && preserve2 != GPRInfo::regT3 && preserve3 != GPRInfo::regT3 && preserve4 != GPRInfo::regT3)
return GPRInfo::regT3;
- if (preserve1 != GPRInfo::regT4 && preserve2 != GPRInfo::regT4 && preserve3 != GPRInfo::regT4 && preserve4 != GPRInfo::regT4 && preserve5 != GPRInfo::regT4)
- return GPRInfo::regT4;
-
- return GPRInfo::regT5;
+ return GPRInfo::regT4;
}
// Add a debug call. This call has no effect on JIT code execution state.
@@ -1010,8 +298,6 @@ public:
void jitAssertIsCell(GPRReg);
void jitAssertHasValidCallFrame();
void jitAssertIsNull(GPRReg);
- void jitAssertTagsInPlace();
- void jitAssertArgumentCountSane();
#else
void jitAssertIsInt32(GPRReg) { }
void jitAssertIsJSInt32(GPRReg) { }
@@ -1020,14 +306,8 @@ public:
void jitAssertIsCell(GPRReg) { }
void jitAssertHasValidCallFrame() { }
void jitAssertIsNull(GPRReg) { }
- void jitAssertTagsInPlace() { }
- void jitAssertArgumentCountSane() { }
#endif
- void jitReleaseAssertNoException();
-
- void purifyNaN(FPRReg);
-
// These methods convert between doubles, and doubles boxed and JSValues.
#if USE(JSVALUE64)
GPRReg boxDouble(FPRReg fpr, GPRReg gpr)
@@ -1037,28 +317,14 @@ public:
jitAssertIsJSDouble(gpr);
return gpr;
}
- FPRReg unboxDoubleWithoutAssertions(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
- {
- add64(GPRInfo::tagTypeNumberRegister, gpr, resultGPR);
- move64ToDouble(resultGPR, fpr);
- return fpr;
- }
- FPRReg unboxDouble(GPRReg gpr, GPRReg resultGPR, FPRReg fpr)
+ FPRReg unboxDouble(GPRReg gpr, FPRReg fpr)
{
jitAssertIsJSDouble(gpr);
- return unboxDoubleWithoutAssertions(gpr, resultGPR, fpr);
+ add64(GPRInfo::tagTypeNumberRegister, gpr);
+ move64ToDouble(gpr, fpr);
+ return fpr;
}
- void boxDouble(FPRReg fpr, JSValueRegs regs)
- {
- boxDouble(fpr, regs.gpr());
- }
-
- void unboxDoubleNonDestructive(JSValueRegs regs, FPRReg destFPR, GPRReg resultGPR, FPRReg)
- {
- unboxDouble(regs.payloadGPR(), resultGPR, destFPR);
- }
-
// Here are possible arrangements of source, target, scratch:
// - source, target, scratch can all be separate registers.
// - source and target can be the same but scratch is separate.
@@ -1091,71 +357,18 @@ public:
{
moveIntsToDouble(payloadGPR, tagGPR, fpr, scratchFPR);
}
-
- void boxDouble(FPRReg fpr, JSValueRegs regs)
- {
- boxDouble(fpr, regs.tagGPR(), regs.payloadGPR());
- }
- void unboxDouble(JSValueRegs regs, FPRReg fpr, FPRReg scratchFPR)
- {
- unboxDouble(regs.tagGPR(), regs.payloadGPR(), fpr, scratchFPR);
- }
-
- void unboxDoubleNonDestructive(const JSValueRegs regs, FPRReg destFPR, GPRReg, FPRReg scratchFPR)
- {
- unboxDouble(regs, destFPR, scratchFPR);
- }
#endif
- void boxBooleanPayload(GPRReg boolGPR, GPRReg payloadGPR)
- {
-#if USE(JSVALUE64)
- add32(TrustedImm32(ValueFalse), boolGPR, payloadGPR);
-#else
- move(boolGPR, payloadGPR);
-#endif
- }
-
- void boxBooleanPayload(bool value, GPRReg payloadGPR)
+ enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
+ Jump emitExceptionCheck(ExceptionCheckKind kind = NormalExceptionCheck)
{
#if USE(JSVALUE64)
- move(TrustedImm32(ValueFalse + value), payloadGPR);
-#else
- move(TrustedImm32(value), payloadGPR);
+ return branchTest64(kind == NormalExceptionCheck ? NonZero : Zero, AbsoluteAddress(vm()->addressOfException()));
+#elif USE(JSVALUE32_64)
+ return branch32(kind == NormalExceptionCheck ? NotEqual : Equal, AbsoluteAddress(reinterpret_cast<char*>(vm()->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
#endif
}
- void boxBoolean(GPRReg boolGPR, JSValueRegs boxedRegs)
- {
- boxBooleanPayload(boolGPR, boxedRegs.payloadGPR());
-#if USE(JSVALUE32_64)
- move(TrustedImm32(JSValue::BooleanTag), boxedRegs.tagGPR());
-#endif
- }
-
- void boxInt32(GPRReg intGPR, JSValueRegs boxedRegs, TagRegistersMode mode = HaveTagRegisters)
- {
-#if USE(JSVALUE64)
- if (mode == DoNotHaveTagRegisters) {
- move(intGPR, boxedRegs.gpr());
- or64(TrustedImm64(TagTypeNumber), boxedRegs.gpr());
- } else
- or64(GPRInfo::tagTypeNumberRegister, intGPR, boxedRegs.gpr());
-#else
- UNUSED_PARAM(mode);
- move(intGPR, boxedRegs.payloadGPR());
- move(TrustedImm32(JSValue::Int32Tag), boxedRegs.tagGPR());
-#endif
- }
-
- void callExceptionFuzz();
-
- enum ExceptionCheckKind { NormalExceptionCheck, InvertedExceptionCheck };
- enum ExceptionJumpWidth { NormalJumpWidth, FarJumpWidth };
- Jump emitExceptionCheck(
- ExceptionCheckKind = NormalExceptionCheck, ExceptionJumpWidth = NormalJumpWidth);
- Jump emitNonPatchableExceptionCheck();
-
#if ENABLE(SAMPLING_COUNTERS)
static void emitCount(MacroAssembler& jit, AbstractSamplingCounter& counter, int32_t increment = 1)
{
@@ -1181,7 +394,7 @@ public:
{
if (!codeOrigin.inlineCallFrame)
return codeBlock()->isStrictMode();
- return codeOrigin.inlineCallFrame->isStrictMode();
+ return jsCast<FunctionExecutable*>(codeOrigin.inlineCallFrame->executable.get())->isStrictMode();
}
ECMAMode ecmaModeFor(CodeOrigin codeOrigin)
@@ -1208,175 +421,64 @@ public:
return m_baselineCodeBlock;
}
- static VirtualRegister argumentsStart(InlineCallFrame* inlineCallFrame)
+ VirtualRegister baselineArgumentsRegisterFor(InlineCallFrame* inlineCallFrame)
{
if (!inlineCallFrame)
- return VirtualRegister(CallFrame::argumentOffset(0));
- if (inlineCallFrame->arguments.size() <= 1)
- return virtualRegisterForLocal(0);
- ValueRecovery recovery = inlineCallFrame->arguments[1];
- RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
- return recovery.virtualRegister();
+ return baselineCodeBlock()->argumentsRegister();
+
+ return VirtualRegister(baselineCodeBlockForInlineCallFrame(
+ inlineCallFrame)->argumentsRegister().offset() + inlineCallFrame->stackOffset);
}
- static VirtualRegister argumentsStart(const CodeOrigin& codeOrigin)
+ VirtualRegister baselineArgumentsRegisterFor(const CodeOrigin& codeOrigin)
{
- return argumentsStart(codeOrigin.inlineCallFrame);
+ return baselineArgumentsRegisterFor(codeOrigin.inlineCallFrame);
}
- void emitLoadStructure(RegisterID source, RegisterID dest, RegisterID scratch)
- {
-#if USE(JSVALUE64)
- load32(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
- loadPtr(vm()->heap.structureIDTable().base(), scratch);
- loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
-#else
- UNUSED_PARAM(scratch);
- loadPtr(MacroAssembler::Address(source, JSCell::structureIDOffset()), dest);
-#endif
- }
-
- static void emitLoadStructure(AssemblyHelpers& jit, RegisterID base, RegisterID dest, RegisterID scratch)
+ SymbolTable* symbolTableFor(const CodeOrigin& codeOrigin)
{
-#if USE(JSVALUE64)
- jit.load32(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
- jit.loadPtr(jit.vm()->heap.structureIDTable().base(), scratch);
- jit.loadPtr(MacroAssembler::BaseIndex(scratch, dest, MacroAssembler::TimesEight), dest);
-#else
- UNUSED_PARAM(scratch);
- jit.loadPtr(MacroAssembler::Address(base, JSCell::structureIDOffset()), dest);
-#endif
+ return baselineCodeBlockFor(codeOrigin)->symbolTable();
}
- void emitStoreStructureWithTypeInfo(TrustedImmPtr structure, RegisterID dest, RegisterID)
+ int offsetOfLocals(const CodeOrigin& codeOrigin)
{
- emitStoreStructureWithTypeInfo(*this, structure, dest);
+ if (!codeOrigin.inlineCallFrame)
+ return 0;
+ return codeOrigin.inlineCallFrame->stackOffset * sizeof(Register);
}
- void emitStoreStructureWithTypeInfo(RegisterID structure, RegisterID dest, RegisterID scratch)
+ int offsetOfArgumentsIncludingThis(InlineCallFrame* inlineCallFrame)
{
-#if USE(JSVALUE64)
- load64(MacroAssembler::Address(structure, Structure::structureIDOffset()), scratch);
- store64(scratch, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
-#else
- // Store all the info flags using a single 32-bit wide load and store.
- load32(MacroAssembler::Address(structure, Structure::indexingTypeOffset()), scratch);
- store32(scratch, MacroAssembler::Address(dest, JSCell::indexingTypeOffset()));
-
- // Store the StructureID
- storePtr(structure, MacroAssembler::Address(dest, JSCell::structureIDOffset()));
-#endif
+ if (!inlineCallFrame)
+ return CallFrame::argumentOffsetIncludingThis(0) * sizeof(Register);
+ if (inlineCallFrame->arguments.size() <= 1)
+ return 0;
+ ValueRecovery recovery = inlineCallFrame->arguments[1];
+ RELEASE_ASSERT(recovery.technique() == DisplacedInJSStack);
+ return (recovery.virtualRegister().offset() - 1) * sizeof(Register);
}
-
- static void emitStoreStructureWithTypeInfo(AssemblyHelpers& jit, TrustedImmPtr structure, RegisterID dest);
-
- Jump jumpIfIsRememberedOrInEden(GPRReg cell)
+
+ int offsetOfArgumentsIncludingThis(const CodeOrigin& codeOrigin)
{
- return branchTest8(MacroAssembler::NonZero, MacroAssembler::Address(cell, JSCell::cellStateOffset()));
+ return offsetOfArgumentsIncludingThis(codeOrigin.inlineCallFrame);
}
- Jump jumpIfIsRememberedOrInEden(JSCell* cell)
+ void writeBarrier(GPRReg owner, GPRReg scratch1, GPRReg scratch2, WriteBarrierUseKind useKind)
{
- uint8_t* address = reinterpret_cast<uint8_t*>(cell) + JSCell::cellStateOffset();
- return branchTest8(MacroAssembler::NonZero, MacroAssembler::AbsoluteAddress(address));
- }
-
- // Emits the branch structure for typeof. The code emitted by this doesn't fall through. The
- // functor is called at those points where we have pinpointed a type. One way to use this is to
- // have the functor emit the code to put the type string into an appropriate register and then
- // jump out. A secondary functor is used for the call trap and masquerades-as-undefined slow
- // case. It is passed the unlinked jump to the slow case.
- template<typename Functor, typename SlowPathFunctor>
- void emitTypeOf(
- JSValueRegs regs, GPRReg tempGPR, const Functor& functor,
- const SlowPathFunctor& slowPathFunctor)
- {
- // Implements the following branching structure:
- //
- // if (is cell) {
- // if (is object) {
- // if (is function) {
- // return function;
- // } else if (doesn't have call trap and doesn't masquerade as undefined) {
- // return object
- // } else {
- // return slowPath();
- // }
- // } else if (is string) {
- // return string
- // } else {
- // return symbol
- // }
- // } else if (is number) {
- // return number
- // } else if (is null) {
- // return object
- // } else if (is boolean) {
- // return boolean
- // } else {
- // return undefined
- // }
-
- Jump notCell = branchIfNotCell(regs);
-
- GPRReg cellGPR = regs.payloadGPR();
- Jump notObject = branchIfNotObject(cellGPR);
-
- Jump notFunction = branchIfNotFunction(cellGPR);
- functor(TypeofType::Function, false);
-
- notFunction.link(this);
- slowPathFunctor(
- branchTest8(
- NonZero,
- Address(cellGPR, JSCell::typeInfoFlagsOffset()),
- TrustedImm32(MasqueradesAsUndefined | TypeOfShouldCallGetCallData)));
- functor(TypeofType::Object, false);
-
- notObject.link(this);
-
- Jump notString = branchIfNotString(cellGPR);
- functor(TypeofType::String, false);
- notString.link(this);
- functor(TypeofType::Symbol, false);
-
- notCell.link(this);
-
- Jump notNumber = branchIfNotNumber(regs, tempGPR);
- functor(TypeofType::Number, false);
- notNumber.link(this);
-
- JumpList notNull = branchIfNotEqual(regs, jsNull());
- functor(TypeofType::Object, false);
- notNull.link(this);
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(scratch1);
+ UNUSED_PARAM(scratch2);
+ UNUSED_PARAM(useKind);
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+ ASSERT(scratch1 != scratch2);
- Jump notBoolean = branchIfNotBoolean(regs, tempGPR);
- functor(TypeofType::Boolean, false);
- notBoolean.link(this);
-
- functor(TypeofType::Undefined, true);
+#if ENABLE(WRITE_BARRIER_PROFILING)
+ emitCount(WriteBarrierCounters::jitCounterFor(useKind));
+#endif
}
Vector<BytecodeAndMachineOffset>& decodedCodeMapFor(CodeBlock*);
-
- void makeSpaceOnStackForCCall()
- {
- unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
- if (stackOffset)
- subPtr(TrustedImm32(stackOffset), stackPointerRegister);
- }
-
- void reclaimSpaceOnStackForCCall()
- {
- unsigned stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), maxFrameExtentForSlowPathCall);
- if (stackOffset)
- addPtr(TrustedImm32(stackOffset), stackPointerRegister);
- }
-
-#if USE(JSVALUE64)
- void emitRandomThunk(JSGlobalObject*, GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, FPRReg result);
- void emitRandomThunk(GPRReg scratch0, GPRReg scratch1, GPRReg scratch2, GPRReg scratch3, FPRReg result);
-#endif
protected:
VM* m_vm;
diff --git a/Source/JavaScriptCore/jit/BinarySwitch.cpp b/Source/JavaScriptCore/jit/BinarySwitch.cpp
deleted file mode 100644
index f3ddcfca9..000000000
--- a/Source/JavaScriptCore/jit/BinarySwitch.cpp
+++ /dev/null
@@ -1,391 +0,0 @@
-/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "BinarySwitch.h"
-
-#if ENABLE(JIT)
-
-#include "JSCInlines.h"
-#include <wtf/ListDump.h>
-
-namespace JSC {
-
-static const bool verbose = false;
-
-static unsigned globalCounter; // We use a different seed every time we are invoked.
-
-BinarySwitch::BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type type)
- : m_value(value)
- , m_weakRandom(globalCounter++)
- , m_index(0)
- , m_caseIndex(UINT_MAX)
- , m_type(type)
-{
- if (cases.isEmpty())
- return;
-
- if (verbose)
- dataLog("Original cases: ", listDump(cases), "\n");
-
- for (unsigned i = 0; i < cases.size(); ++i)
- m_cases.append(Case(cases[i], i));
-
- std::sort(m_cases.begin(), m_cases.end());
-
- if (verbose)
- dataLog("Sorted cases: ", listDump(m_cases), "\n");
-
- for (unsigned i = 1; i < m_cases.size(); ++i)
- RELEASE_ASSERT(m_cases[i - 1] < m_cases[i]);
-
- build(0, false, m_cases.size());
-}
-
-BinarySwitch::~BinarySwitch()
-{
-}
-
-bool BinarySwitch::advance(MacroAssembler& jit)
-{
- if (m_cases.isEmpty()) {
- m_fallThrough.append(jit.jump());
- return false;
- }
-
- if (m_index == m_branches.size()) {
- RELEASE_ASSERT(m_jumpStack.isEmpty());
- return false;
- }
-
- for (;;) {
- const BranchCode& code = m_branches[m_index++];
- switch (code.kind) {
- case NotEqualToFallThrough:
- switch (m_type) {
- case Int32:
- m_fallThrough.append(jit.branch32(
- MacroAssembler::NotEqual, m_value,
- MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
- break;
- case IntPtr:
- m_fallThrough.append(jit.branchPtr(
- MacroAssembler::NotEqual, m_value,
- MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
- break;
- }
- break;
- case NotEqualToPush:
- switch (m_type) {
- case Int32:
- m_jumpStack.append(jit.branch32(
- MacroAssembler::NotEqual, m_value,
- MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
- break;
- case IntPtr:
- m_jumpStack.append(jit.branchPtr(
- MacroAssembler::NotEqual, m_value,
- MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
- break;
- }
- break;
- case LessThanToPush:
- switch (m_type) {
- case Int32:
- m_jumpStack.append(jit.branch32(
- MacroAssembler::LessThan, m_value,
- MacroAssembler::Imm32(static_cast<int32_t>(m_cases[code.index].value))));
- break;
- case IntPtr:
- m_jumpStack.append(jit.branchPtr(
- MacroAssembler::LessThan, m_value,
- MacroAssembler::ImmPtr(bitwise_cast<const void*>(static_cast<intptr_t>(m_cases[code.index].value)))));
- break;
- }
- break;
- case Pop:
- m_jumpStack.takeLast().link(&jit);
- break;
- case ExecuteCase:
- m_caseIndex = code.index;
- return true;
- }
- }
-}
-
-void BinarySwitch::build(unsigned start, bool hardStart, unsigned end)
-{
- if (verbose)
- dataLog("Building with start = ", start, ", hardStart = ", hardStart, ", end = ", end, "\n");
-
- auto append = [&] (const BranchCode& code) {
- if (verbose)
- dataLog("==> ", code, "\n");
- m_branches.append(code);
- };
-
- unsigned size = end - start;
-
- RELEASE_ASSERT(size);
-
- // This code uses some random numbers to keep things balanced. It's important to keep in mind
- // that this does not improve average-case throughput under the assumption that all cases fire
- // with equal probability. It just ensures that there will not be some switch structure that
- // when combined with some input will always produce pathologically good or pathologically bad
- // performance.
-
- const unsigned leafThreshold = 3;
-
- if (size <= leafThreshold) {
- if (verbose)
- dataLog("It's a leaf.\n");
-
- // It turns out that for exactly three cases or less, it's better to just compare each
- // case individually. This saves 1/6 of a branch on average, and up to 1/3 of a branch in
- // extreme cases where the divide-and-conquer bottoms out in a lot of 3-case subswitches.
- //
- // This assumes that we care about the cost of hitting some case more than we care about
- // bottoming out in a default case. I believe that in most places where we use switch
- // statements, we are more likely to hit one of the cases than we are to fall through to
- // default. Intuitively, if we wanted to improve the performance of default, we would
- // reduce the value of leafThreshold to 2 or even to 1. See below for a deeper discussion.
-
- bool allConsecutive = false;
-
- if ((hardStart || (start && m_cases[start - 1].value == m_cases[start].value - 1))
- && start + size < m_cases.size()
- && m_cases[start + size - 1].value == m_cases[start + size].value - 1) {
- allConsecutive = true;
- for (unsigned i = 0; i < size - 1; ++i) {
- if (m_cases[start + i].value + 1 != m_cases[start + i + 1].value) {
- allConsecutive = false;
- break;
- }
- }
- }
-
- if (verbose)
- dataLog("allConsecutive = ", allConsecutive, "\n");
-
- Vector<unsigned, 3> localCaseIndices;
- for (unsigned i = 0; i < size; ++i)
- localCaseIndices.append(start + i);
-
- std::random_shuffle(
- localCaseIndices.begin(), localCaseIndices.end(),
- [this] (unsigned n) {
- // We use modulo to get a random number in the range we want fully knowing that
- // this introduces a tiny amount of bias, but we're fine with such tiny bias.
- return m_weakRandom.getUint32() % n;
- });
-
- for (unsigned i = 0; i < size - 1; ++i) {
- append(BranchCode(NotEqualToPush, localCaseIndices[i]));
- append(BranchCode(ExecuteCase, localCaseIndices[i]));
- append(BranchCode(Pop));
- }
-
- if (!allConsecutive)
- append(BranchCode(NotEqualToFallThrough, localCaseIndices.last()));
-
- append(BranchCode(ExecuteCase, localCaseIndices.last()));
- return;
- }
-
- if (verbose)
- dataLog("It's not a leaf.\n");
-
- // There are two different strategies we could consider here:
- //
- // Isolate median and split: pick a median and check if the comparison value is equal to it;
- // if so, execute the median case. Otherwise check if the value is less than the median, and
- // recurse left or right based on this. This has two subvariants: we could either first test
- // equality for the median and then do the less-than, or we could first do the less-than and
- // then check equality on the not-less-than path.
- //
- // Ignore median and split: do a less-than comparison on a value that splits the cases in two
- // equal-sized halves. Recurse left or right based on the comparison. Do not test for equality
- // against the median (or anything else); let the recursion handle those equality comparisons
- // once we bottom out in a list that case 3 cases or less (see above).
- //
- // I'll refer to these strategies as Isolate and Ignore. I initially believed that Isolate
- // would be faster since it leads to less branching for some lucky cases. It turns out that
- // Isolate is almost a total fail in the average, assuming all cases are equally likely. How
- // bad Isolate is depends on whether you believe that doing two consecutive branches based on
- // the same comparison is cheaper than doing the compare/branches separately. This is
- // difficult to evaluate. For small immediates that aren't blinded, we just care about
- // avoiding a second compare instruction. For large immediates or when blinding is in play, we
- // also care about the instructions used to materialize the immediate a second time. Isolate
- // can help with both costs since it involves first doing a < compare+branch on some value,
- // followed by a == compare+branch on the same exact value (or vice-versa). Ignore will do a <
- // compare+branch on some value, and then the == compare+branch on that same value will happen
- // much later.
- //
- // To evaluate these costs, I wrote the recurrence relation for Isolate and Ignore, assuming
- // that ComparisonCost is the cost of a compare+branch and ChainedComparisonCost is the cost
- // of a compare+branch on some value that you've just done another compare+branch for. These
- // recurrence relations compute the total cost incurred if you executed the switch statement
- // on each matching value. So the average cost of hitting some case can be computed as
- // Isolate[n]/n or Ignore[n]/n, respectively for the two relations.
- //
- // Isolate[1] = ComparisonCost
- // Isolate[2] = (2 + 1) * ComparisonCost
- // Isolate[3] = (3 + 2 + 1) * ComparisonCost
- // Isolate[n_] := With[
- // {medianIndex = Floor[n/2] + If[EvenQ[n], RandomInteger[], 1]},
- // ComparisonCost + ChainedComparisonCost +
- // (ComparisonCost * (medianIndex - 1) + Isolate[medianIndex - 1]) +
- // (2 * ComparisonCost * (n - medianIndex) + Isolate[n - medianIndex])]
- //
- // Ignore[1] = ComparisonCost
- // Ignore[2] = (2 + 1) * ComparisonCost
- // Ignore[3] = (3 + 2 + 1) * ComparisonCost
- // Ignore[n_] := With[
- // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
- // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
- // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])]
- //
- // This does not account for the average cost of hitting the default case. See further below
- // for a discussion of that.
- //
- // It turns out that for ComparisonCost = 1 and ChainedComparisonCost = 1, Ignore is always
- // better than Isolate. If we assume that ChainedComparisonCost = 0, then Isolate wins for
- // switch statements that have 20 cases or fewer, though the margin of victory is never large
- // - it might sometimes save an average of 0.3 ComparisonCost. For larger switch statements,
- // we see divergence between the two with Ignore winning. This is of course rather
- // unrealistic since the chained comparison is never free. For ChainedComparisonCost = 0.5, we
- // see Isolate winning for 10 cases or fewer, by maybe 0.2 ComparisonCost. Again we see
- // divergence for large switches with Ignore winning, for example if a switch statement has
- // 100 cases then Ignore saves one branch on average.
- //
- // Our current JIT backends don't provide for optimization for chained comparisons, except for
- // reducing the code for materializing the immediate if the immediates are large or blinding
- // comes into play. Probably our JIT backends live somewhere north of
- // ChainedComparisonCost = 0.5.
- //
- // This implies that using the Ignore strategy is likely better. If we wanted to incorporate
- // the Isolate strategy, we'd want to determine the switch size threshold at which the two
- // cross over and then use Isolate for switches that are smaller than that size.
- //
- // The average cost of hitting the default case is similar, but involves a different cost for
- // the base cases: you have to assume that you will always fail each branch. For the Ignore
- // strategy we would get this recurrence relation; the same kind of thing happens to the
- // Isolate strategy:
- //
- // Ignore[1] = ComparisonCost
- // Ignore[2] = (2 + 2) * ComparisonCost
- // Ignore[3] = (3 + 3 + 3) * ComparisonCost
- // Ignore[n_] := With[
- // {medianIndex = If[EvenQ[n], n/2, Floor[n/2] + RandomInteger[]]},
- // (medianIndex * ComparisonCost + Ignore[medianIndex]) +
- // ((n - medianIndex) * ComparisonCost + Ignore[n - medianIndex])]
- //
- // This means that if we cared about the default case more, we would likely reduce
- // leafThreshold. Reducing it to 2 would reduce the average cost of the default case by 1/3
- // in the most extreme cases (num switch cases = 3, 6, 12, 24, ...). But it would also
- // increase the average cost of taking one of the non-default cases by 1/3. Typically the
- // difference is 1/6 in either direction. This makes it a very simple trade-off: if we believe
- // that the default case is more important then we would want leafThreshold to be 2, and the
- // default case would become 1/6 faster on average. But we believe that most switch statements
- // are more likely to take one of the cases than the default, so we use leafThreshold = 3
- // and get a 1/6 speed-up on average for taking an explicit case.
-
- unsigned medianIndex = (start + end) / 2;
-
- if (verbose)
- dataLog("medianIndex = ", medianIndex, "\n");
-
- // We want medianIndex to point to the thing we will do a less-than compare against. We want
- // this less-than compare to split the current sublist into equal-sized sublists, or
- // nearly-equal-sized with some randomness if we're in the odd case. With the above
- // calculation, in the odd case we will have medianIndex pointing at either the element we
- // want or the element to the left of the one we want. Consider the case of five elements:
- //
- // 0 1 2 3 4
- //
- // start will be 0, end will be 5. The average is 2.5, which rounds down to 2. If we do
- // value < 2, then we will split the list into 2 elements on the left and three on the right.
- // That's pretty good, but in this odd case we'd like to at random choose 3 instead to ensure
- // that we don't become unbalanced on the right. This does not improve throughput since one
- // side will always get shafted, and that side might still be odd, in which case it will also
- // have two sides and one of them will get shafted - and so on. We just want to avoid
- // deterministic pathologies.
- //
- // In the even case, we will always end up pointing at the element we want:
- //
- // 0 1 2 3
- //
- // start will be 0, end will be 4. So, the average is 2, which is what we'd like.
- if (size & 1) {
- RELEASE_ASSERT(medianIndex - start + 1 == end - medianIndex);
- medianIndex += m_weakRandom.getUint32() & 1;
- } else
- RELEASE_ASSERT(medianIndex - start == end - medianIndex);
-
- RELEASE_ASSERT(medianIndex > start);
- RELEASE_ASSERT(medianIndex + 1 < end);
-
- if (verbose)
- dataLog("fixed medianIndex = ", medianIndex, "\n");
-
- append(BranchCode(LessThanToPush, medianIndex));
- build(medianIndex, true, end);
- append(BranchCode(Pop));
- build(start, hardStart, medianIndex);
-}
-
-void BinarySwitch::Case::dump(PrintStream& out) const
-{
- out.print("<value: " , value, ", index: ", index, ">");
-}
-
-void BinarySwitch::BranchCode::dump(PrintStream& out) const
-{
- switch (kind) {
- case NotEqualToFallThrough:
- out.print("NotEqualToFallThrough");
- break;
- case NotEqualToPush:
- out.print("NotEqualToPush");
- break;
- case LessThanToPush:
- out.print("LessThanToPush");
- break;
- case Pop:
- out.print("Pop");
- break;
- case ExecuteCase:
- out.print("ExecuteCase");
- break;
- }
-
- if (index != UINT_MAX)
- out.print("(", index, ")");
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
diff --git a/Source/JavaScriptCore/jit/BinarySwitch.h b/Source/JavaScriptCore/jit/BinarySwitch.h
deleted file mode 100644
index 3ac08b701..000000000
--- a/Source/JavaScriptCore/jit/BinarySwitch.h
+++ /dev/null
@@ -1,147 +0,0 @@
-/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef BinarySwitch_h
-#define BinarySwitch_h
-
-#if ENABLE(JIT)
-
-#include "GPRInfo.h"
-#include "MacroAssembler.h"
-#include <wtf/WeakRandom.h>
-
-namespace JSC {
-
-// The BinarySwitch class makes it easy to emit a switch statement over either
-// 32-bit integers or pointers, where the switch uses a tree of branches
-// rather than a jump table. This makes it particularly useful if the case
-// values are too far apart to make a jump table practical, or if there are
-// sufficiently few cases that the total cost of log(numCases) branches is
-// less than the cost of an indirected jump.
-//
-// In an effort to simplify the logic of emitting code for each case, this
-// uses an iterator style, rather than a functor callback style. This makes
-// sense because even the iterator implementation found herein is relatively
-// simple, whereas the code it's used from is usually quite complex - one
-// example being the trie-of-trees string switch implementation, where the
-// code emitted for each case involves recursing to emit code for a sub-trie.
-//
-// Use this like so:
-//
-// BinarySwitch switch(valueReg, casesVector, BinarySwitch::Int32);
-// while (switch.advance(jit)) {
-// int value = switch.caseValue();
-// unsigned index = switch.caseIndex(); // index into casesVector, above
-// ... // generate code for this case
-// ... = jit.jump(); // you have to jump out yourself; falling through causes undefined behavior
-// }
-// switch.fallThrough().link(&jit);
-
-class BinarySwitch {
-public:
- enum Type {
- Int32,
- IntPtr
- };
-
- BinarySwitch(GPRReg value, const Vector<int64_t>& cases, Type);
- ~BinarySwitch();
-
- unsigned caseIndex() const { return m_cases[m_caseIndex].index; }
- int64_t caseValue() const { return m_cases[m_caseIndex].value; }
-
- bool advance(MacroAssembler&);
-
- MacroAssembler::JumpList& fallThrough() { return m_fallThrough; }
-
-private:
- void build(unsigned start, bool hardStart, unsigned end);
-
- GPRReg m_value;
-
- struct Case {
- Case() { }
-
- Case(int64_t value, unsigned index)
- : value(value)
- , index(index)
- {
- }
-
- bool operator<(const Case& other) const
- {
- return value < other.value;
- }
-
- void dump(PrintStream& out) const;
-
- int64_t value;
- unsigned index;
- };
-
- Vector<Case> m_cases;
-
- enum BranchKind {
- NotEqualToFallThrough,
- NotEqualToPush,
- LessThanToPush,
- Pop,
- ExecuteCase
- };
-
- struct BranchCode {
- BranchCode() { }
-
- BranchCode(BranchKind kind, unsigned index = UINT_MAX)
- : kind(kind)
- , index(index)
- {
- }
-
- void dump(PrintStream& out) const;
-
- BranchKind kind;
- unsigned index;
- };
-
- WeakRandom m_weakRandom;
-
- Vector<BranchCode> m_branches;
-
- unsigned m_index;
- unsigned m_caseIndex;
- Vector<MacroAssembler::Jump> m_jumpStack;
-
- MacroAssembler::JumpList m_fallThrough;
-
- Type m_type;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // BinarySwitch_h
-
diff --git a/Source/JavaScriptCore/jit/CCallHelpers.h b/Source/JavaScriptCore/jit/CCallHelpers.h
index e649d39e6..afcccd1ca 100644
--- a/Source/JavaScriptCore/jit/CCallHelpers.h
+++ b/Source/JavaScriptCore/jit/CCallHelpers.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,44 +26,21 @@
#ifndef CCallHelpers_h
#define CCallHelpers_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "AssemblyHelpers.h"
#include "GPRInfo.h"
-#include "StackAlignment.h"
namespace JSC {
-#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64))
-#define POKE_ARGUMENT_OFFSET 4
-#else
-#define POKE_ARGUMENT_OFFSET 0
-#endif
-
class CCallHelpers : public AssemblyHelpers {
public:
CCallHelpers(VM* vm, CodeBlock* codeBlock = 0)
: AssemblyHelpers(vm, codeBlock)
{
}
-
- // The most general helper for setting arguments that fit in a GPR, if you can compute each
- // argument without using any argument registers. You usually want one of the setupArguments*()
- // methods below instead of this. This thing is most useful if you have *a lot* of arguments.
- template<typename Functor>
- void setupArgument(unsigned argumentIndex, const Functor& functor)
- {
- unsigned numberOfRegs = GPRInfo::numberOfArgumentRegisters; // Disguise the constant from clang's tautological compare warning.
- if (argumentIndex < numberOfRegs) {
- functor(GPRInfo::toArgumentRegister(argumentIndex));
- return;
- }
-
- functor(GPRInfo::nonArgGPR0);
- poke(GPRInfo::nonArgGPR0, POKE_ARGUMENT_OFFSET + argumentIndex - GPRInfo::numberOfArgumentRegisters);
- }
-
- void setupArgumentsWithExecState() { setupArgumentsExecState(); }
// These methods used to sort arguments into the correct registers.
// On X86 we use cdecl calling conventions, which pass all arguments on the
@@ -118,13 +95,6 @@ public:
addCallArgument(arg2);
}
- ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2)
- {
- resetCallArguments();
- addCallArgument(arg1);
- addCallArgument(arg2);
- }
-
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
resetCallArguments();
@@ -212,15 +182,6 @@ public:
addCallArgument(arg2);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2)
{
resetCallArguments();
@@ -314,16 +275,6 @@ public:
addCallArgument(arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImmPtr arg3)
{
resetCallArguments();
@@ -333,58 +284,6 @@ public:
addCallArgument(arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- addCallArgument(arg6);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3)
{
resetCallArguments();
@@ -448,37 +347,6 @@ public:
addCallArgument(arg3);
addCallArgument(arg4);
}
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImm32 arg2, GPRReg arg3, TrustedImmPtr arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
{
@@ -491,27 +359,6 @@ public:
addCallArgument(arg5);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
{
resetCallArguments();
@@ -532,17 +379,6 @@ public:
addCallArgument(arg4);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3)
{
resetCallArguments();
@@ -602,17 +438,6 @@ public:
addCallArgument(arg4);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5)
{
resetCallArguments();
@@ -678,16 +503,6 @@ public:
addCallArgument(arg4);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
{
resetCallArguments();
@@ -742,33 +557,8 @@ public:
addCallArgument(arg4);
addCallArgument(arg5);
}
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- addCallArgument(arg6);
- }
-
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
- {
- resetCallArguments();
- addCallArgument(GPRInfo::callFrameRegister);
- addCallArgument(arg1);
- addCallArgument(arg2);
- addCallArgument(arg3);
- addCallArgument(arg4);
- addCallArgument(arg5);
- addCallArgument(arg6);
- }
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7)
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6)
{
resetCallArguments();
addCallArgument(GPRInfo::callFrameRegister);
@@ -778,7 +568,6 @@ public:
addCallArgument(arg4);
addCallArgument(arg5);
addCallArgument(arg6);
- addCallArgument(arg7);
}
ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
@@ -941,6 +730,12 @@ public:
setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
}
+#if CPU(MIPS) || (OS(WINDOWS) && CPU(X86_64))
+#define POKE_ARGUMENT_OFFSET 4
+#else
+#define POKE_ARGUMENT_OFFSET 0
+#endif
+
#if CPU(X86_64) || CPU(ARM64)
ALWAYS_INLINE void setupArguments(FPRReg arg1)
{
@@ -954,27 +749,14 @@ public:
ALWAYS_INLINE void setupArgumentsWithExecState(FPRReg arg1, GPRReg arg2)
{
-#if OS(WINDOWS) && CPU(X86_64)
- // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments.
- // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
- moveDouble(arg1, FPRInfo::argumentFPR1);
- move(arg2, GPRInfo::argumentGPR2);
-#else
moveDouble(arg1, FPRInfo::argumentFPR0);
move(arg2, GPRInfo::argumentGPR1);
-#endif
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, FPRReg arg3)
{
-#if OS(WINDOWS) && CPU(X86_64)
- // On Windows, arguments map to designated registers based on the argument positions, even when there are interlaced scalar and floating point arguments.
- // See http://msdn.microsoft.com/en-us/library/zthk2dkh.aspx
- moveDouble(arg3, FPRInfo::argumentFPR3);
-#else
moveDouble(arg3, FPRInfo::argumentFPR0);
-#endif
setupStubArguments(arg1, arg2);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
@@ -1108,12 +890,12 @@ public:
poke(arg3, 4);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32, FPRReg arg2, GPRReg arg3)
+ ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, FPRReg arg2, GPRReg arg3)
{
setupArgumentsWithExecState(arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32, FPRReg arg4)
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, FPRReg arg4)
{
setupArgumentsWithExecState(arg1, arg2, arg4);
}
@@ -1157,12 +939,6 @@ public:
move(arg1, GPRInfo::argumentGPR0);
}
- ALWAYS_INLINE void setupArguments(TrustedImmPtr arg1, GPRReg arg2)
- {
- move(arg2, GPRInfo::argumentGPR1);
- move(arg1, GPRInfo::argumentGPR0);
- }
-
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2)
{
setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1>(arg1, arg2);
@@ -1180,12 +956,6 @@ public:
move(arg4, GPRInfo::argumentGPR3);
}
- ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
- {
- setupThreeStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, GPRInfo::argumentGPR2>(arg1, arg2, arg3);
- move(arg4, GPRInfo::argumentGPR3);
- }
-
ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImmPtr arg4)
{
setupTwoStubArgsGPR<GPRInfo::argumentGPR0, GPRInfo::argumentGPR2>(arg1, arg3);
@@ -1230,14 +1000,6 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
-#if OS(WINDOWS) && CPU(X86_64)
- ALWAYS_INLINE void setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32 arg1)
- {
- move(arg1, GPRInfo::argumentGPR2);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- }
-#endif
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2)
{
setupStubArguments(arg1, arg2);
@@ -1371,14 +1133,6 @@ public:
move(arg3, GPRInfo::argumentGPR3);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, TrustedImm32 arg3)
- {
- move(arg1, GPRInfo::argumentGPR1);
- move(arg2, GPRInfo::argumentGPR2);
- move(arg3, GPRInfo::argumentGPR3);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
{
@@ -1403,14 +1157,6 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3)
- {
- move(arg3, GPRInfo::argumentGPR3);
- move(arg1, GPRInfo::argumentGPR1);
- move(arg2, GPRInfo::argumentGPR2);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3)
{
move(arg3, GPRInfo::argumentGPR3);
@@ -1419,14 +1165,6 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
- {
- move(arg3, GPRInfo::argumentGPR3);
- move(arg1, GPRInfo::argumentGPR1);
- move(arg2, GPRInfo::argumentGPR2);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3)
{
move(arg2, GPRInfo::argumentGPR2);
@@ -1506,20 +1244,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
{
poke(arg4, POKE_ARGUMENT_OFFSET);
@@ -1544,55 +1268,12 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
- {
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
-#if CPU(X86_64)
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4)
- {
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-#endif
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
{
poke(arg4, POKE_ARGUMENT_OFFSET);
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
- {
- poke(arg6, POKE_ARGUMENT_OFFSET + 2);
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImmPtr arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1675,24 +1356,6 @@ public:
poke(arg4, POKE_ARGUMENT_OFFSET);
setupArgumentsWithExecState(arg1, arg2, arg3);
}
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImmPtr arg4)
- {
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
- {
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImm32 arg2, GPRReg arg3, GPRReg arg4)
- {
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
{
@@ -1722,40 +1385,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
- {
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImm32 arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1763,13 +1392,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1792,14 +1414,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
- {
- poke(arg6, POKE_ARGUMENT_OFFSET + 2);
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5, TrustedImm32 arg6)
{
poke(arg6, POKE_ARGUMENT_OFFSET + 2);
@@ -1808,15 +1422,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, TrustedImmPtr arg7)
- {
- poke(arg7, POKE_ARGUMENT_OFFSET + 3);
- poke(arg6, POKE_ARGUMENT_OFFSET + 2);
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, GPRReg arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET + 1);
@@ -1864,22 +1469,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, GPRReg arg5, TrustedImm32 arg6)
- {
- poke(arg6, POKE_ARGUMENT_OFFSET + 2);
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, TrustedImmPtr arg6)
- {
- poke(arg6, POKE_ARGUMENT_OFFSET + 2);
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7)
{
poke(arg7, POKE_ARGUMENT_OFFSET + 3);
@@ -1898,16 +1487,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, GPRReg arg5, GPRReg arg6, GPRReg arg7, TrustedImmPtr arg8)
- {
- poke(arg8, POKE_ARGUMENT_OFFSET + 4);
- poke(arg7, POKE_ARGUMENT_OFFSET + 3);
- poke(arg6, POKE_ARGUMENT_OFFSET + 2);
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7)
{
poke(arg7, POKE_ARGUMENT_OFFSET + 3);
@@ -1917,15 +1496,6 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4, TrustedImm32 arg5, GPRReg arg6, GPRReg arg7)
- {
- poke(arg7, POKE_ARGUMENT_OFFSET + 3);
- poke(arg6, POKE_ARGUMENT_OFFSET + 2);
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
-
ALWAYS_INLINE void setupArguments(GPRReg arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, GPRReg arg5)
{
poke(arg5, POKE_ARGUMENT_OFFSET);
@@ -1933,13 +1503,6 @@ public:
move(arg3, GPRInfo::argumentGPR2);
move(arg4, GPRInfo::argumentGPR3);
}
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
- {
- poke(arg5, POKE_ARGUMENT_OFFSET + 1);
- poke(arg4, POKE_ARGUMENT_OFFSET);
- setupArgumentsWithExecState(arg1, arg2, arg3);
- }
#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
#if NUMBER_OF_ARGUMENT_REGISTERS >= 5
@@ -1948,13 +1511,6 @@ public:
setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR3, GPRInfo::argumentGPR4>(arg1, arg3, arg4);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImmPtr arg4)
- {
- setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
- move(arg4, GPRInfo::argumentGPR4);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
{
setupTwoStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4>(arg1, arg4);
@@ -1963,14 +1519,6 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
- {
- setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4, GPRInfo::argumentGPR5>(arg1, arg4, arg5);
- move(arg2, GPRInfo::argumentGPR2);
- move(arg3, GPRInfo::argumentGPR3);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4)
{
setupStubArguments134(arg1, arg3, arg4);
@@ -1985,76 +1533,6 @@ public:
move(arg4, GPRInfo::argumentGPR4);
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
- {
- move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
- move(arg1, GPRInfo::argumentGPR1);
- move(arg3, GPRInfo::argumentGPR3);
- move(arg4, GPRInfo::argumentGPR4);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm64 arg4)
- {
- move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
- move(arg1, GPRInfo::argumentGPR1);
- move(arg3, GPRInfo::argumentGPR3);
- move(arg4, GPRInfo::argumentGPR4);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, TrustedImmPtr arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- move(arg2, GPRInfo::argumentGPR2); // In case arg2 is argumentGPR1.
- move(arg1, GPRInfo::argumentGPR1);
- move(arg3, GPRInfo::argumentGPR3);
- move(arg4, GPRInfo::argumentGPR4);
- move(arg5, GPRInfo::argumentGPR5);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, TrustedImmPtr arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- move(arg3, GPRInfo::argumentGPR3);
- move(arg1, GPRInfo::argumentGPR1);
- move(arg2, GPRInfo::argumentGPR2);
- move(arg4, GPRInfo::argumentGPR4);
- move(arg5, GPRInfo::argumentGPR5);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImm32 arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4, TrustedImm32 arg5)
- {
- setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg2, arg4);
- move(arg1, GPRInfo::argumentGPR1);
- move(arg3, GPRInfo::argumentGPR3);
- move(arg5, GPRInfo::argumentGPR5);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(TrustedImmPtr arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4, TrustedImm32 arg5)
- {
- setupTwoStubArgsGPR<GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg2, arg3);
- move(arg1, GPRInfo::argumentGPR1);
- move(arg4, GPRInfo::argumentGPR4);
- move(arg5, GPRInfo::argumentGPR5);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, TrustedImm32 arg4)
- {
- setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR3>(arg1, arg2, arg3);
- move(arg4, GPRInfo::argumentGPR4);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
-
- ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, GPRReg arg4)
- {
- setupThreeStubArgsGPR<GPRInfo::argumentGPR1, GPRInfo::argumentGPR2, GPRInfo::argumentGPR4>(arg1, arg2, arg4);
- move(arg3, GPRInfo::argumentGPR3);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- }
ALWAYS_INLINE void setupArguments(GPRReg arg1, TrustedImmPtr arg2, GPRReg arg3, GPRReg arg4, TrustedImmPtr arg5)
{
@@ -2070,15 +1548,6 @@ public:
move(arg4, GPRInfo::argumentGPR3);
}
#endif
-
- void setupArguments(JSValueRegs arg1)
- {
-#if USE(JSVALUE64)
- setupArguments(arg1.gpr());
-#else
- setupArguments(arg1.payloadGPR(), arg1.tagGPR());
-#endif
- }
void setupResults(GPRReg destA, GPRReg destB)
{
@@ -2101,110 +1570,13 @@ public:
swap(destA, destB);
}
- void setupResults(JSValueRegs regs)
- {
-#if USE(JSVALUE64)
- move(GPRInfo::returnValueGPR, regs.gpr());
-#else
- setupResults(regs.payloadGPR(), regs.tagGPR());
-#endif
- }
-
void jumpToExceptionHandler()
{
- // genericUnwind() leaves the handler CallFrame* in vm->callFrameForCatch,
+ // genericUnwind() leaves the handler CallFrame* in vm->callFrameForThrow,
// and the address of the handler in vm->targetMachinePCForThrow.
loadPtr(&vm()->targetMachinePCForThrow, GPRInfo::regT1);
jump(GPRInfo::regT1);
}
-
- void prepareForTailCallSlow(GPRReg calleeGPR = InvalidGPRReg)
- {
- GPRReg temp1 = calleeGPR == GPRInfo::regT0 ? GPRInfo::regT3 : GPRInfo::regT0;
- GPRReg temp2 = calleeGPR == GPRInfo::regT1 ? GPRInfo::regT3 : GPRInfo::regT1;
- GPRReg temp3 = calleeGPR == GPRInfo::regT2 ? GPRInfo::regT3 : GPRInfo::regT2;
-
- GPRReg newFramePointer = temp1;
- GPRReg newFrameSizeGPR = temp2;
- {
- // The old frame size is its number of arguments (or number of
- // parameters in case of arity fixup), plus the frame header size,
- // aligned
- GPRReg oldFrameSizeGPR = temp2;
- {
- GPRReg argCountGPR = oldFrameSizeGPR;
- load32(Address(framePointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), argCountGPR);
-
- {
- GPRReg numParametersGPR = temp1;
- {
- GPRReg codeBlockGPR = numParametersGPR;
- loadPtr(Address(framePointerRegister, JSStack::CodeBlock * static_cast<int>(sizeof(Register))), codeBlockGPR);
- load32(Address(codeBlockGPR, CodeBlock::offsetOfNumParameters()), numParametersGPR);
- }
-
- ASSERT(numParametersGPR != argCountGPR);
- Jump argumentCountWasNotFixedUp = branch32(BelowOrEqual, numParametersGPR, argCountGPR);
- move(numParametersGPR, argCountGPR);
- argumentCountWasNotFixedUp.link(this);
- }
-
- add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), argCountGPR, oldFrameSizeGPR);
- and32(TrustedImm32(-stackAlignmentRegisters()), oldFrameSizeGPR);
- // We assume < 2^28 arguments
- mul32(TrustedImm32(sizeof(Register)), oldFrameSizeGPR, oldFrameSizeGPR);
- }
-
- // The new frame pointer is at framePointer + oldFrameSize - newFrameSize
- ASSERT(newFramePointer != oldFrameSizeGPR);
- addPtr(framePointerRegister, oldFrameSizeGPR, newFramePointer);
-
- // The new frame size is just the number of arguments plus the
- // frame header size, aligned
- ASSERT(newFrameSizeGPR != newFramePointer);
- load32(Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)),
- newFrameSizeGPR);
- add32(TrustedImm32(stackAlignmentRegisters() + JSStack::CallFrameHeaderSize - 1), newFrameSizeGPR);
- and32(TrustedImm32(-stackAlignmentRegisters()), newFrameSizeGPR);
- // We assume < 2^28 arguments
- mul32(TrustedImm32(sizeof(Register)), newFrameSizeGPR, newFrameSizeGPR);
- }
-
- GPRReg tempGPR = temp3;
- ASSERT(tempGPR != newFramePointer && tempGPR != newFrameSizeGPR);
-
- // We don't need the current frame beyond this point. Masquerade as our
- // caller.
-#if CPU(ARM) || CPU(SH4) || CPU(ARM64)
- loadPtr(Address(framePointerRegister, sizeof(void*)), linkRegister);
- subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
-#elif CPU(MIPS)
- loadPtr(Address(framePointerRegister, sizeof(void*)), returnAddressRegister);
- subPtr(TrustedImm32(2 * sizeof(void*)), newFrameSizeGPR);
-#elif CPU(X86) || CPU(X86_64)
- loadPtr(Address(framePointerRegister, sizeof(void*)), tempGPR);
- push(tempGPR);
- subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
-#else
- UNREACHABLE_FOR_PLATFORM();
-#endif
- subPtr(newFrameSizeGPR, newFramePointer);
- loadPtr(Address(framePointerRegister), framePointerRegister);
-
-
- // We need to move the newFrameSizeGPR slots above the stack pointer by
- // newFramePointer registers. We use pointer-sized chunks.
- MacroAssembler::Label copyLoop(label());
-
- subPtr(TrustedImm32(sizeof(void*)), newFrameSizeGPR);
- loadPtr(BaseIndex(stackPointerRegister, newFrameSizeGPR, TimesOne), tempGPR);
- storePtr(tempGPR, BaseIndex(newFramePointer, newFrameSizeGPR, TimesOne));
-
- branchTest32(MacroAssembler::NonZero, newFrameSizeGPR).linkTo(copyLoop, this);
-
- // Ready for a jump!
- move(newFramePointer, stackPointerRegister);
- }
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/CachedRecovery.cpp b/Source/JavaScriptCore/jit/CachedRecovery.cpp
deleted file mode 100644
index f4aacc6c8..000000000
--- a/Source/JavaScriptCore/jit/CachedRecovery.cpp
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "CachedRecovery.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-// We prefer loading doubles and undetermined JSValues into FPRs
-// because it would otherwise use up GPRs. Two in JSVALUE32_64.
-bool CachedRecovery::loadsIntoFPR() const
-{
- switch (recovery().technique()) {
- case DoubleDisplacedInJSStack:
- case DisplacedInJSStack:
-#if USE(JSVALUE64)
- case CellDisplacedInJSStack:
-#endif
- return true;
-
- default:
- return false;
- }
-}
-
-// Integers, booleans and cells can be loaded into GPRs
-bool CachedRecovery::loadsIntoGPR() const
-{
- switch (recovery().technique()) {
- case Int32DisplacedInJSStack:
-#if USE(JSVALUE64)
- case Int52DisplacedInJSStack:
- case StrictInt52DisplacedInJSStack:
- case DisplacedInJSStack:
-#endif
- case BooleanDisplacedInJSStack:
- case CellDisplacedInJSStack:
- return true;
-
- default:
- return false;
- }
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CachedRecovery.h b/Source/JavaScriptCore/jit/CachedRecovery.h
deleted file mode 100644
index 5fe39dee7..000000000
--- a/Source/JavaScriptCore/jit/CachedRecovery.h
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CachedRecovery_h
-#define CachedRecovery_h
-
-#if ENABLE(JIT)
-
-#include "ValueRecovery.h"
-#include "VirtualRegister.h"
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-// A CachedRecovery is a wrapper around a ValueRecovery that records where said
-// value should go on the stack and/or in registers. Whenever we perform an
-// operation changing the ValueRecovery, we update the CachedRecovery's member
-// in place.
-class CachedRecovery {
-public:
- CachedRecovery(ValueRecovery recovery)
- : m_recovery { recovery }
- {
- }
-
- CachedRecovery(CachedRecovery&) = delete;
- CachedRecovery(CachedRecovery&&) = delete;
- CachedRecovery& operator=(CachedRecovery&) = delete;
- CachedRecovery& operator=(CachedRecovery&&) = delete;
-
- const Vector<VirtualRegister, 1>& targets() const { return m_targets; }
-
- void addTarget(VirtualRegister reg)
- {
- ASSERT(m_targets.isEmpty() || m_targets.last() < reg);
- m_targets.append(reg);
- }
-
- void removeTarget(VirtualRegister reg)
- {
- ASSERT_UNUSED(reg, m_targets.last() == reg);
- m_targets.shrink(m_targets.size() - 1);
- }
-
- void clearTargets()
- {
- m_targets.clear();
- }
-
- void setWantedJSValueRegs(JSValueRegs jsValueRegs)
- {
- ASSERT(m_wantedFPR == InvalidFPRReg);
- m_wantedJSValueRegs = jsValueRegs;
- }
-
- void setWantedFPR(FPRReg fpr)
- {
- ASSERT(!m_wantedJSValueRegs);
- m_wantedFPR = fpr;
- }
-
- // Determine whether converting this recovery into a JSValue will
- // require additional GPRs and/or FPRs.
- // This is guaranteed to only depend on the DataFormat, and the
- // result of these calls will stay valid after loads and/or stores.
- bool boxingRequiresGPR() const
- {
-#if USE(JSVALUE64)
- return recovery().dataFormat() == DataFormatDouble;
-#else
- return false;
-#endif
- }
- bool boxingRequiresFPR() const
- {
-#if USE(JSVALUE64)
- switch (recovery().dataFormat()) {
- case DataFormatInt52:
- case DataFormatStrictInt52:
- return true;
-
- default:
- return false;
- }
-#else
- return false;
-#endif
- }
-
- // This is used to determine what kind of register we need to be
- // able to load a recovery. We only use it when a direct load is
- // currently impossible, to determine whether we should spill a
- // GPR or an FPR for loading this value.
- bool loadsIntoGPR() const;
- bool loadsIntoFPR() const;
-
- ValueRecovery recovery() const { return m_recovery; }
-
- void setRecovery(ValueRecovery recovery) { m_recovery = recovery; }
-
- JSValueRegs wantedJSValueRegs() const { return m_wantedJSValueRegs; }
-
- FPRReg wantedFPR() const { return m_wantedFPR; }
-private:
- ValueRecovery m_recovery;
- JSValueRegs m_wantedJSValueRegs;
- FPRReg m_wantedFPR { InvalidFPRReg };
- Vector<VirtualRegister, 1> m_targets;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // CachedRecovery_h
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp b/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp
deleted file mode 100644
index 567202c15..000000000
--- a/Source/JavaScriptCore/jit/CallFrameShuffleData.cpp
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "CallFrameShuffleData.h"
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "CodeBlock.h"
-
-namespace JSC {
-
-#if USE(JSVALUE64)
-
-void CallFrameShuffleData::setupCalleeSaveRegisters(CodeBlock* codeBlock)
-{
- RegisterSet calleeSaveRegisters { RegisterSet::vmCalleeSaveRegisters() };
- RegisterAtOffsetList* registerSaveLocations = codeBlock->calleeSaveRegisters();
-
- for (size_t i = 0; i < registerSaveLocations->size(); ++i) {
- RegisterAtOffset entry { registerSaveLocations->at(i) };
- if (!calleeSaveRegisters.get(entry.reg()))
- continue;
-
- VirtualRegister saveSlot { entry.offsetAsIndex() };
- registers[entry.reg()]
- = ValueRecovery::displacedInJSStack(saveSlot, DataFormatJS);
- }
-
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- if (!calleeSaveRegisters.get(reg))
- continue;
-
- if (registers[reg])
- continue;
-
- registers[reg] = ValueRecovery::inRegister(reg, DataFormatJS);
- }
-}
-
-#endif // USE(JSVALUE64)
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffleData.h b/Source/JavaScriptCore/jit/CallFrameShuffleData.h
deleted file mode 100644
index d85e55b3e..000000000
--- a/Source/JavaScriptCore/jit/CallFrameShuffleData.h
+++ /dev/null
@@ -1,54 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CallFrameShuffleData_h
-#define CallFrameShuffleData_h
-
-#if ENABLE(JIT)
-
-#include "RegisterMap.h"
-#include "ValueRecovery.h"
-
-namespace JSC {
-
-struct CallFrameShuffleData {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- unsigned numLocals;
- ValueRecovery callee;
- Vector<ValueRecovery> args;
-#if USE(JSVALUE64)
- RegisterMap<ValueRecovery> registers;
- GPRReg tagTypeNumber { InvalidGPRReg };
-
- void setupCalleeSaveRegisters(CodeBlock*);
-#endif
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // CallFrameShuffleData_h
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler.cpp
deleted file mode 100644
index 45af55dd6..000000000
--- a/Source/JavaScriptCore/jit/CallFrameShuffler.cpp
+++ /dev/null
@@ -1,774 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "CallFrameShuffler.h"
-
-#if ENABLE(JIT)
-
-#include "CachedRecovery.h"
-#include "CCallHelpers.h"
-#include "CodeBlock.h"
-
-namespace JSC {
-
-CallFrameShuffler::CallFrameShuffler(CCallHelpers& jit, const CallFrameShuffleData& data)
- : m_jit(jit)
- , m_oldFrame(data.numLocals + JSStack::CallerFrameAndPCSize, nullptr)
- , m_newFrame(data.args.size() + JSStack::CallFrameHeaderSize, nullptr)
- , m_alignedOldFrameSize(JSStack::CallFrameHeaderSize
- + roundArgumentCountToAlignFrame(jit.codeBlock()->numParameters()))
- , m_alignedNewFrameSize(JSStack::CallFrameHeaderSize
- + roundArgumentCountToAlignFrame(data.args.size()))
- , m_frameDelta(m_alignedNewFrameSize - m_alignedOldFrameSize)
- , m_lockedRegisters(RegisterSet::allRegisters())
-{
- // We are allowed all the usual registers...
- for (unsigned i = GPRInfo::numberOfRegisters; i--; )
- m_lockedRegisters.clear(GPRInfo::toRegister(i));
- for (unsigned i = FPRInfo::numberOfRegisters; i--; )
- m_lockedRegisters.clear(FPRInfo::toRegister(i));
- // ... as well as the runtime registers.
- m_lockedRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
-
- ASSERT(!data.callee.isInJSStack() || data.callee.virtualRegister().isLocal());
- addNew(VirtualRegister(JSStack::Callee), data.callee);
-
- for (size_t i = 0; i < data.args.size(); ++i) {
- ASSERT(!data.args[i].isInJSStack() || data.args[i].virtualRegister().isLocal());
- addNew(virtualRegisterForArgument(i), data.args[i]);
- }
-
-#if USE(JSVALUE64)
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- if (!data.registers[reg].isSet())
- continue;
-
- if (reg.isGPR())
- addNew(JSValueRegs(reg.gpr()), data.registers[reg]);
- else
- addNew(reg.fpr(), data.registers[reg]);
- }
-
- m_tagTypeNumber = data.tagTypeNumber;
- if (m_tagTypeNumber != InvalidGPRReg)
- lockGPR(m_tagTypeNumber);
-#endif
-}
-
-void CallFrameShuffler::dump(PrintStream& out) const
-{
- static const char* delimiter = " +-------------------------------+ ";
- static const char* dangerDelimiter = " X-------------------------------X ";
- static const char* dangerBoundsDelimiter = " XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX ";
- static const char* emptySpace = " ";
- out.print(" ");
- out.print(" Old frame ");
- out.print(" New frame ");
- out.print("\n");
- int totalSize = m_alignedOldFrameSize + std::max(numLocals(), m_alignedNewFrameSize) + 3;
- for (int i = 0; i < totalSize; ++i) {
- VirtualRegister old { m_alignedOldFrameSize - i - 1 };
- VirtualRegister newReg { old + m_frameDelta };
-
- if (!isValidOld(old) && old != firstOld() - 1
- && !isValidNew(newReg) && newReg != firstNew() - 1)
- continue;
-
- out.print(" ");
- if (dangerFrontier() >= firstNew()
- && (newReg == dangerFrontier() || newReg == firstNew() - 1))
- out.print(dangerBoundsDelimiter);
- else if (isValidOld(old))
- out.print(isValidNew(newReg) && isDangerNew(newReg) ? dangerDelimiter : delimiter);
- else if (old == firstOld() - 1)
- out.print(delimiter);
- else
- out.print(emptySpace);
- if (dangerFrontier() >= firstNew()
- && (newReg == dangerFrontier() || newReg == firstNew() - 1))
- out.print(dangerBoundsDelimiter);
- else if (isValidNew(newReg) || newReg == firstNew() - 1)
- out.print(isDangerNew(newReg) ? dangerDelimiter : delimiter);
- else
- out.print(emptySpace);
- out.print("\n");
- if (old == firstOld())
- out.print(" sp --> ");
- else if (!old.offset())
- out.print(" fp --> ");
- else
- out.print(" ");
- if (isValidOld(old)) {
- if (getOld(old)) {
- auto str = toCString(old);
- if (isValidNew(newReg) && isDangerNew(newReg))
- out.printf(" X %18s X ", str.data());
- else
- out.printf(" | %18s | ", str.data());
- } else if (isValidNew(newReg) && isDangerNew(newReg))
- out.printf(" X%30s X ", "");
- else
- out.printf(" |%30s | ", "");
- } else
- out.print(emptySpace);
- if (isValidNew(newReg)) {
- const char d = isDangerNew(newReg) ? 'X' : '|';
- auto str = toCString(newReg);
- if (getNew(newReg)) {
- if (getNew(newReg)->recovery().isConstant())
- out.printf(" %c%8s <- constant %c ", d, str.data(), d);
- else {
- auto recoveryStr = toCString(getNew(newReg)->recovery());
- out.printf(" %c%8s <- %18s %c ", d, str.data(),
- recoveryStr.data(), d);
- }
- } else if (newReg == VirtualRegister { JSStack::ArgumentCount })
- out.printf(" %c%8s <- %18zu %c ", d, str.data(), argCount(), d);
- else
- out.printf(" %c%30s %c ", d, "", d);
- } else
- out.print(emptySpace);
- if (newReg == firstNew() - m_newFrameOffset && !isSlowPath())
- out.print(" <-- new sp before jump (current ", m_newFrameBase, ") ");
- if (newReg == firstNew())
- out.print(" <-- new fp after prologue");
- out.print("\n");
- }
- out.print(" ");
- out.print(" Live registers ");
- out.print(" Wanted registers ");
- out.print("\n");
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- CachedRecovery* oldCachedRecovery { m_registers[reg] };
- CachedRecovery* newCachedRecovery { m_newRegisters[reg] };
- if (!oldCachedRecovery && !newCachedRecovery)
- continue;
- out.print(" ");
- if (oldCachedRecovery) {
- auto str = toCString(reg);
- out.printf(" %8s ", str.data());
- } else
- out.print(emptySpace);
-#if USE(JSVALUE32_64)
- if (newCachedRecovery) {
- JSValueRegs wantedJSValueRegs { newCachedRecovery->wantedJSValueRegs() };
- if (reg.isFPR())
- out.print(reg, " <- ", newCachedRecovery->recovery());
- else {
- if (reg.gpr() == wantedJSValueRegs.tagGPR())
- out.print(reg.gpr(), " <- tag(", newCachedRecovery->recovery(), ")");
- else
- out.print(reg.gpr(), " <- payload(", newCachedRecovery->recovery(), ")");
- }
- }
-#else
- if (newCachedRecovery)
- out.print(" ", reg, " <- ", newCachedRecovery->recovery());
-#endif
- out.print("\n");
- }
- out.print(" Locked registers: ");
- bool firstLocked { true };
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- if (m_lockedRegisters.get(reg)) {
- out.print(firstLocked ? "" : ", ", reg);
- firstLocked = false;
- }
- }
- out.print("\n");
-
- if (isSlowPath())
- out.print(" Using fp-relative addressing for slow path call\n");
- else
- out.print(" Using sp-relative addressing for jump (using ", m_newFrameBase, " as new sp)\n");
- if (m_oldFrameOffset)
- out.print(" Old frame offset is ", m_oldFrameOffset, "\n");
- if (m_newFrameOffset)
- out.print(" New frame offset is ", m_newFrameOffset, "\n");
-#if USE(JSVALUE64)
- if (m_tagTypeNumber != InvalidGPRReg)
- out.print(" TagTypeNumber is currently in ", m_tagTypeNumber, "\n");
-#endif
-}
-
-CachedRecovery* CallFrameShuffler::getCachedRecovery(ValueRecovery recovery)
-{
- ASSERT(!recovery.isConstant());
- if (recovery.isInGPR())
- return m_registers[recovery.gpr()];
- if (recovery.isInFPR())
- return m_registers[recovery.fpr()];
-#if USE(JSVALUE32_64)
- if (recovery.technique() == InPair) {
- ASSERT(m_registers[recovery.tagGPR()] == m_registers[recovery.payloadGPR()]);
- return m_registers[recovery.payloadGPR()];
- }
-#endif
- ASSERT(recovery.isInJSStack());
- return getOld(recovery.virtualRegister());
-}
-
-CachedRecovery* CallFrameShuffler::setCachedRecovery(ValueRecovery recovery, CachedRecovery* cachedRecovery)
-{
- ASSERT(!recovery.isConstant());
- if (recovery.isInGPR())
- return m_registers[recovery.gpr()] = cachedRecovery;
- if (recovery.isInFPR())
- return m_registers[recovery.fpr()] = cachedRecovery;
-#if USE(JSVALUE32_64)
- if (recovery.technique() == InPair) {
- m_registers[recovery.tagGPR()] = cachedRecovery;
- return m_registers[recovery.payloadGPR()] = cachedRecovery;
- }
-#endif
- ASSERT(recovery.isInJSStack());
- setOld(recovery.virtualRegister(), cachedRecovery);
- return cachedRecovery;
-}
-
-void CallFrameShuffler::spill(CachedRecovery& cachedRecovery)
-{
- ASSERT(!isSlowPath());
- ASSERT(cachedRecovery.recovery().isInRegisters());
-
- VirtualRegister spillSlot { 0 };
- for (VirtualRegister slot = firstOld(); slot <= lastOld(); slot += 1) {
- if (slot >= newAsOld(firstNew()))
- break;
-
- if (getOld(slot))
- continue;
-
- spillSlot = slot;
- break;
- }
- // We must have enough slots to be able to fit the whole callee's
- // frame for the slow path - unless we are in the FTL. In that
- // case, we are allowed to extend the frame *once*, since we are
- // guaranteed to have enough available space for that.
- if (spillSlot >= newAsOld(firstNew()) || !spillSlot.isLocal()) {
- RELEASE_ASSERT(!m_didExtendFrame);
- extendFrameIfNeeded();
- spill(cachedRecovery);
- return;
- }
-
- if (verbose)
- dataLog(" * Spilling ", cachedRecovery.recovery(), " into ", spillSlot, "\n");
- auto format = emitStore(cachedRecovery, addressForOld(spillSlot));
- ASSERT(format != DataFormatNone);
- updateRecovery(cachedRecovery, ValueRecovery::displacedInJSStack(spillSlot, format));
-}
-
-void CallFrameShuffler::emitDeltaCheck()
-{
- if (ASSERT_DISABLED)
- return;
-
- GPRReg scratchGPR { getFreeGPR() };
- if (scratchGPR != InvalidGPRReg) {
- if (verbose)
- dataLog(" Using ", scratchGPR, " for the fp-sp delta check\n");
- m_jit.move(MacroAssembler::stackPointerRegister, scratchGPR);
- m_jit.subPtr(GPRInfo::callFrameRegister, scratchGPR);
- MacroAssembler::Jump ok = m_jit.branch32(
- MacroAssembler::Equal, scratchGPR,
- MacroAssembler::TrustedImm32(-numLocals() * sizeof(Register)));
- m_jit.abortWithReason(JITUnexpectedCallFrameSize);
- ok.link(&m_jit);
- } else if (verbose)
- dataLog(" Skipping the fp-sp delta check since there is too much pressure");
-}
-
-void CallFrameShuffler::extendFrameIfNeeded()
-{
- ASSERT(!m_didExtendFrame);
-
- VirtualRegister firstRead { firstOld() };
- for (; firstRead <= virtualRegisterForLocal(0); firstRead += 1) {
- if (getOld(firstRead))
- break;
- }
- size_t availableSize = static_cast<size_t>(firstRead.offset() - firstOld().offset());
- size_t wantedSize = m_newFrame.size() + m_newFrameOffset;
-
- if (availableSize < wantedSize) {
- size_t delta = WTF::roundUpToMultipleOf(stackAlignmentRegisters(), wantedSize - availableSize);
- m_oldFrame.grow(m_oldFrame.size() + delta);
- for (size_t i = 0; i < delta; ++i)
- m_oldFrame[m_oldFrame.size() - i - 1] = nullptr;
- m_jit.subPtr(MacroAssembler::TrustedImm32(delta * sizeof(Register)), MacroAssembler::stackPointerRegister);
-
- if (isSlowPath())
- m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize;
- else
- m_oldFrameOffset = numLocals();
-
- if (verbose)
- dataLogF(" Not enough space - extending the old frame %zu slot\n", delta);
- }
-
- m_didExtendFrame = true;
-}
-
-void CallFrameShuffler::prepareForSlowPath()
-{
- ASSERT(isUndecided());
- emitDeltaCheck();
-
- m_frameDelta = numLocals() + JSStack::CallerFrameAndPCSize;
- m_newFrameBase = MacroAssembler::stackPointerRegister;
- m_newFrameOffset = -JSStack::CallerFrameAndPCSize;
-
- if (verbose)
- dataLog("\n\nPreparing frame for slow path call:\n");
-
- // When coming from the FTL, we need to extend the frame. In other
- // cases, we may end up extending the frame if we previously
- // spilled things (e.g. in polymorphic cache).
- extendFrameIfNeeded();
-
- if (verbose)
- dataLog(*this);
-
- prepareAny();
-
- if (verbose)
- dataLog("Ready for slow path call!\n");
-}
-
-void CallFrameShuffler::prepareForTailCall()
-{
- ASSERT(isUndecided());
- emitDeltaCheck();
-
- // We'll use sp-based indexing so that we can load the
- // caller's frame pointer into the fpr immediately
- m_oldFrameBase = MacroAssembler::stackPointerRegister;
- m_oldFrameOffset = numLocals();
- m_newFrameBase = acquireGPR();
-#if CPU(X86)
- // We load the frame pointer manually, but we need to ask the
- // algorithm to move the return PC for us (it'd probably
- // require a write to the danger zone). Since it'd be awkward
- // to ask for half a value move, we ask that the whole thing
- // be moved for us.
- addNew(VirtualRegister { 0 },
- ValueRecovery::displacedInJSStack(VirtualRegister(0), DataFormatJS));
-
- // sp will point to head0 and we will move it up half a slot
- // manually
- m_newFrameOffset = 0;
-#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
- // We load the the frame pointer and link register
- // manually. We could ask the algorithm to load them for us,
- // and it would allow us to use the link register as an extra
- // temporary - but it'd mean that the frame pointer can also
- // be used as an extra temporary, so we keep the link register
- // locked instead.
-
- // sp will point to head1 since the callee's prologue pushes
- // the call frame and link register.
- m_newFrameOffset = -1;
-#elif CPU(ARM64)
- // We load the frame pointer and link register manually. We
- // could ask the algorithm to load the link register for us
- // (which would allow for its use as an extra temporary), but
- // since its not in GPRInfo, we can't do it.
-
- // sp will point to head2 since the callee's prologue pushes the
- // call frame and link register
- m_newFrameOffset = -2;
-#elif CPU(X86_64)
- // We load the frame pointer manually, but we ask the
- // algorithm to move the return PC for us (it'd probably
- // require a write in the danger zone)
- addNew(VirtualRegister { 1 },
- ValueRecovery::displacedInJSStack(VirtualRegister(1), DataFormatJS));
-
- // sp will point to head1 since the callee's prologue pushes
- // the call frame register
- m_newFrameOffset = -1;
-#else
- UNREACHABLE_FOR_PLATFORM();
-#endif
-
- if (verbose)
- dataLog(" Emitting code for computing the new frame base\n");
-
- // We compute the new frame base by first computing the top of the
- // old frame (taking into account an argument count higher than
- // the number of parameters), then substracting to it the aligned
- // new frame size (adjusted).
- m_jit.load32(MacroAssembler::Address(GPRInfo::callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), m_newFrameBase);
- MacroAssembler::Jump argumentCountOK =
- m_jit.branch32(MacroAssembler::BelowOrEqual, m_newFrameBase,
- MacroAssembler::TrustedImm32(m_jit.codeBlock()->numParameters()));
- m_jit.add32(MacroAssembler::TrustedImm32(stackAlignmentRegisters() - 1 + JSStack::CallFrameHeaderSize), m_newFrameBase);
- m_jit.and32(MacroAssembler::TrustedImm32(-stackAlignmentRegisters()), m_newFrameBase);
- m_jit.mul32(MacroAssembler::TrustedImm32(sizeof(Register)), m_newFrameBase, m_newFrameBase);
- MacroAssembler::Jump done = m_jit.jump();
- argumentCountOK.link(&m_jit);
- m_jit.move(
- MacroAssembler::TrustedImm32(m_alignedOldFrameSize * sizeof(Register)),
- m_newFrameBase);
- done.link(&m_jit);
-
- m_jit.addPtr(GPRInfo::callFrameRegister, m_newFrameBase);
- m_jit.subPtr(
- MacroAssembler::TrustedImm32(
- (m_alignedNewFrameSize + m_newFrameOffset) * sizeof(Register)),
- m_newFrameBase);
-
- // We load the link register manually for architectures that have one
-#if CPU(ARM) || CPU(SH4) || CPU(ARM64)
- m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
- MacroAssembler::linkRegister);
-#elif CPU(MIPS)
- m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister, sizeof(void*)),
- MacroAssembler::returnAddressRegister);
-#endif
-
- // We want the frame pointer to always point to a valid frame, and
- // we are going to trash the current one. Let's make it point to
- // our caller's frame, since that's what we want to end up with.
- m_jit.loadPtr(MacroAssembler::Address(MacroAssembler::framePointerRegister),
- MacroAssembler::framePointerRegister);
-
- if (verbose)
- dataLog("Preparing frame for tail call:\n", *this);
-
- prepareAny();
-
-#if CPU(X86)
- if (verbose)
- dataLog(" Simulating pop of the call frame register\n");
- m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(void*)), MacroAssembler::stackPointerRegister);
-#endif
-
- if (verbose)
- dataLog("Ready for tail call!\n");
-}
-
-bool CallFrameShuffler::tryWrites(CachedRecovery& cachedRecovery)
-{
- ASSERT(m_newFrameBase != InvalidGPRReg);
-
- // If the value is already set up correctly, we don't have
- // anything to do.
- if (isSlowPath() && cachedRecovery.recovery().isInJSStack()
- && cachedRecovery.targets().size() == 1
- && newAsOld(cachedRecovery.targets()[0]) == cachedRecovery.recovery().virtualRegister()) {
- cachedRecovery.clearTargets();
- if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
- clearCachedRecovery(cachedRecovery.recovery());
- return true;
- }
-
- if (!canLoadAndBox(cachedRecovery))
- return false;
-
- emitLoad(cachedRecovery);
- emitBox(cachedRecovery);
- ASSERT(cachedRecovery.recovery().isInRegisters()
- || cachedRecovery.recovery().isConstant());
-
- if (verbose)
- dataLog(" * Storing ", cachedRecovery.recovery());
- for (size_t i = 0; i < cachedRecovery.targets().size(); ++i) {
- VirtualRegister target { cachedRecovery.targets()[i] };
- ASSERT(!isDangerNew(target));
- if (verbose)
- dataLog(!i ? " into " : ", and ", "NEW ", target);
- emitStore(cachedRecovery, addressForNew(target));
- setNew(target, nullptr);
- }
- if (verbose)
- dataLog("\n");
- cachedRecovery.clearTargets();
- if (!cachedRecovery.wantedJSValueRegs() && cachedRecovery.wantedFPR() == InvalidFPRReg)
- clearCachedRecovery(cachedRecovery.recovery());
-
- return true;
-}
-
-bool CallFrameShuffler::performSafeWrites()
-{
- VirtualRegister firstSafe;
- VirtualRegister end { lastNew() + 1 };
- Vector<VirtualRegister> failures;
-
- // For all cachedRecoveries that writes to the safe zone, if it
- // doesn't also write to the danger zone, we try to perform
- // the writes. This may free up danger slots, so we iterate
- // again until it doesn't happen anymore.
- //
- // Note that even though we have a while block, we look at
- // each slot of the new call frame at most once since in each
- // iteration beyond the first, we only load up the portion of
- // the new call frame that was dangerous and became safe due
- // to the previous iteration.
- do {
- firstSafe = dangerFrontier() + 1;
- if (verbose)
- dataLog(" Trying safe writes (between NEW ", firstSafe, " and NEW ", end - 1, ")\n");
- bool didProgress = false;
- for (VirtualRegister reg = firstSafe; reg < end; reg += 1) {
- CachedRecovery* cachedRecovery = getNew(reg);
- if (!cachedRecovery) {
- if (verbose)
- dataLog(" + ", reg, " is OK.\n");
- continue;
- }
- if (!hasOnlySafeWrites(*cachedRecovery)) {
- if (verbose) {
- dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
- " but also has dangerous writes.\n");
- }
- continue;
- }
- if (cachedRecovery->wantedJSValueRegs()) {
- if (verbose) {
- dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
- " but is also needed in registers.\n");
- }
- continue;
- }
- if (cachedRecovery->wantedFPR() != InvalidFPRReg) {
- if (verbose) {
- dataLog(" - ", cachedRecovery->recovery(), " writes to NEW ", reg,
- " but is also needed in an FPR.\n");
- }
- continue;
- }
- if (!tryWrites(*cachedRecovery)) {
- if (verbose)
- dataLog(" - Unable to write to NEW ", reg, " from ", cachedRecovery->recovery(), "\n");
- failures.append(reg);
- }
- didProgress = true;
- }
- end = firstSafe;
-
- // If we have cachedRecoveries that failed to write, it is
- // because they are on the stack and we didn't have enough
- // registers available at the time to load them into. If
- // we have a free register, we should try again because it
- // could free up some danger slots.
- if (didProgress && hasFreeRegister()) {
- Vector<VirtualRegister> stillFailing;
- for (VirtualRegister failed : failures) {
- CachedRecovery* cachedRecovery = getNew(failed);
- // It could have been handled later if it had
- // several targets
- if (!cachedRecovery)
- continue;
-
- ASSERT(hasOnlySafeWrites(*cachedRecovery)
- && !cachedRecovery->wantedJSValueRegs()
- && cachedRecovery->wantedFPR() == InvalidFPRReg);
- if (!tryWrites(*cachedRecovery))
- stillFailing.append(failed);
- }
- failures = WTFMove(stillFailing);
- }
- if (verbose && firstSafe != dangerFrontier() + 1)
- dataLog(" We freed up danger slots!\n");
- } while (firstSafe != dangerFrontier() + 1);
-
- return failures.isEmpty();
-}
-
-void CallFrameShuffler::prepareAny()
-{
- ASSERT(!isUndecided());
-
- updateDangerFrontier();
-
- // First, we try to store any value that goes above the danger
- // frontier. This will never use more registers since we are only
- // loading+storing if we ensure that any register used for the load
- // will be freed up after the stores (i.e., all stores are above
- // the danger frontier, and there is no wanted register).
- performSafeWrites();
-
- // At this point, we couldn't have more available registers than
- // we have withouth spilling: all values currently in registers
- // either require a write to the danger zone, or have a wanted
- // register, which means that in any case they will have to go
- // through registers again.
-
- // We now slowly free up the danger zone by first loading the old
- // value on the danger frontier, spilling as many registers as
- // needed to do so and ensuring that the corresponding slot in the
- // new frame is now ready to be written. Then, we store the old
- // value to its target location if possible (we could have failed
- // to load it previously due to high pressure). Finally, we write
- // to any of the newly safe slots that we can, which could free up
- // registers (hence why we do it eagerly).
- for (VirtualRegister reg = dangerFrontier(); reg >= firstNew(); reg -= 1) {
- if (reg == dangerFrontier()) {
- if (verbose)
- dataLog(" Next slot (NEW ", reg, ") is the danger frontier\n");
- CachedRecovery* cachedRecovery { getOld(newAsOld(dangerFrontier())) };
- ASSERT(cachedRecovery);
- ensureLoad(*cachedRecovery);
- emitLoad(*cachedRecovery);
- ensureBox(*cachedRecovery);
- emitBox(*cachedRecovery);
- if (hasOnlySafeWrites(*cachedRecovery))
- tryWrites(*cachedRecovery);
- } else if (verbose)
- dataLog(" Next slot is NEW ", reg, "\n");
-
- ASSERT(!isDangerNew(reg));
- CachedRecovery* cachedRecovery = getNew(reg);
- // This could be one of the header slots we don't care about.
- if (!cachedRecovery) {
- if (verbose)
- dataLog(" + ", reg, " is OK\n");
- continue;
- }
-
- if (canLoadAndBox(*cachedRecovery) && hasOnlySafeWrites(*cachedRecovery)
- && !cachedRecovery->wantedJSValueRegs()
- && cachedRecovery->wantedFPR() == InvalidFPRReg) {
- emitLoad(*cachedRecovery);
- emitBox(*cachedRecovery);
- bool writesOK = tryWrites(*cachedRecovery);
- ASSERT_UNUSED(writesOK, writesOK);
- } else if (verbose)
- dataLog(" - ", cachedRecovery->recovery(), " can't be handled just yet.\n");
- }
- ASSERT(dangerFrontier() < firstNew());
-
- // Now, the danger zone is empty, but we still have a couple of
- // things to do:
- //
- // 1) There could be remaining safe writes that failed earlier due
- // to high register pressure and had nothing to do with the
- // danger zone whatsoever.
- //
- // 2) Some wanted registers could have to be loaded (this could
- // happen either when making a call to a new function with a
- // lower number of arguments - since above here, we only load
- // wanted registers when they are at the danger frontier -, or
- // if a wanted register got spilled).
- //
- // 3) Some wanted registers could have been loaded in the wrong
- // registers
- //
- // 4) We have to take care of some bookkeeping - namely, storing
- // the argument count and updating the stack pointer.
-
- // At this point, we must have enough registers available for
- // handling 1). None of the loads can fail because we have been
- // eagerly freeing up registers in all the previous phases - so
- // the only values that are in registers at this point must have
- // wanted registers.
- if (verbose)
- dataLog(" Danger zone is clear, performing remaining writes.\n");
- for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
- CachedRecovery* cachedRecovery { getNew(reg) };
- if (!cachedRecovery)
- continue;
-
- emitLoad(*cachedRecovery);
- emitBox(*cachedRecovery);
- bool writesOK = tryWrites(*cachedRecovery);
- ASSERT_UNUSED(writesOK, writesOK);
- }
-
-#if USE(JSVALUE64)
- if (m_tagTypeNumber != InvalidGPRReg && m_newRegisters[m_tagTypeNumber])
- releaseGPR(m_tagTypeNumber);
-#endif
-
- // Handle 2) by loading all registers. We don't have to do any
- // writes, since they have been taken care of above.
- if (verbose)
- dataLog(" Loading wanted registers into registers\n");
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- CachedRecovery* cachedRecovery { m_newRegisters[reg] };
- if (!cachedRecovery)
- continue;
-
- emitLoad(*cachedRecovery);
- emitBox(*cachedRecovery);
- ASSERT(cachedRecovery->targets().isEmpty());
- }
-
-#if USE(JSVALUE64)
- if (m_tagTypeNumber != InvalidGPRReg)
- releaseGPR(m_tagTypeNumber);
-#endif
-
- // At this point, we have read everything we cared about from the
- // stack, and written everything we had to to the stack.
- if (verbose)
- dataLog(" Callee frame is fully set up\n");
- if (!ASSERT_DISABLED) {
- for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1)
- ASSERT_UNUSED(reg, !getNew(reg));
-
- for (CachedRecovery* cachedRecovery : m_cachedRecoveries) {
- ASSERT_UNUSED(cachedRecovery, cachedRecovery->targets().isEmpty());
- ASSERT(!cachedRecovery->recovery().isInJSStack());
- }
- }
-
- // We need to handle 4) first because it implies releasing
- // m_newFrameBase, which could be a wanted register.
- if (verbose)
- dataLog(" * Storing the argument count into ", VirtualRegister { JSStack::ArgumentCount }, "\n");
- m_jit.store32(MacroAssembler::TrustedImm32(0),
- addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(TagOffset));
- m_jit.store32(MacroAssembler::TrustedImm32(argCount()),
- addressForNew(VirtualRegister { JSStack::ArgumentCount }).withOffset(PayloadOffset));
-
- if (!isSlowPath()) {
- ASSERT(m_newFrameBase != MacroAssembler::stackPointerRegister);
- if (verbose)
- dataLog(" Releasing the new frame base pointer\n");
- m_jit.move(m_newFrameBase, MacroAssembler::stackPointerRegister);
- releaseGPR(m_newFrameBase);
- }
-
- // Finally we handle 3)
- if (verbose)
- dataLog(" Ensuring wanted registers are in the right register\n");
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- CachedRecovery* cachedRecovery { m_newRegisters[reg] };
- if (!cachedRecovery)
- continue;
-
- emitDisplace(*cachedRecovery);
- }
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler.h b/Source/JavaScriptCore/jit/CallFrameShuffler.h
deleted file mode 100644
index d5e6f4253..000000000
--- a/Source/JavaScriptCore/jit/CallFrameShuffler.h
+++ /dev/null
@@ -1,804 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef CallFrameShuffler_h
-#define CallFrameShuffler_h
-
-#if ENABLE(JIT)
-
-#include "CachedRecovery.h"
-#include "CallFrameShuffleData.h"
-#include "MacroAssembler.h"
-#include "RegisterSet.h"
-#include "StackAlignment.h"
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class CallFrameShuffler {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- CallFrameShuffler(CCallHelpers&, const CallFrameShuffleData&);
-
- void dump(PrintStream&) const;
-
- // Any register that has been locked or acquired must be released
- // before calling prepareForTailCall() or prepareForSlowPath().
- void lockGPR(GPRReg gpr)
- {
- ASSERT(!m_lockedRegisters.get(gpr));
- m_lockedRegisters.set(gpr);
- if (verbose)
- dataLog(" * Locking ", gpr, "\n");
- }
-
- GPRReg acquireGPR()
- {
- ensureGPR();
- GPRReg gpr { getFreeGPR() };
- ASSERT(!m_registers[gpr]);
- lockGPR(gpr);
- return gpr;
- }
-
- void releaseGPR(GPRReg gpr)
- {
- if (verbose) {
- if (m_lockedRegisters.get(gpr))
- dataLog(" * Releasing ", gpr, "\n");
- else
- dataLog(" * ", gpr, " was not locked\n");
- }
- m_lockedRegisters.clear(gpr);
- }
-
- void restoreGPR(GPRReg gpr)
- {
- if (!m_newRegisters[gpr])
- return;
-
- ensureGPR();
-#if USE(JSVALUE32_64)
- GPRReg tempGPR { getFreeGPR() };
- lockGPR(tempGPR);
- ensureGPR();
- releaseGPR(tempGPR);
-#endif
- emitDisplace(*m_newRegisters[gpr]);
- }
-
- // You can only take a snapshot if the recovery has not started
- // yet. The only operations that are valid before taking a
- // snapshot are lockGPR(), acquireGPR() and releaseGPR().
- //
- // Locking status is *NOT* preserved by the snapshot: it only
- // contains information about where the
- // arguments/callee/callee-save registers are by taking into
- // account any spilling that acquireGPR() could have done.
- CallFrameShuffleData snapshot() const
- {
- ASSERT(isUndecided());
-
- CallFrameShuffleData data;
- data.numLocals = numLocals();
- data.callee = getNew(VirtualRegister { JSStack::Callee })->recovery();
- data.args.resize(argCount());
- for (size_t i = 0; i < argCount(); ++i)
- data.args[i] = getNew(virtualRegisterForArgument(i))->recovery();
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- CachedRecovery* cachedRecovery { m_newRegisters[reg] };
- if (!cachedRecovery)
- continue;
-
-#if USE(JSVALUE64)
- data.registers[reg] = cachedRecovery->recovery();
-#else
- RELEASE_ASSERT_NOT_REACHED();
-#endif
- }
- return data;
- }
-
- // Ask the shuffler to put the callee into some registers once the
- // shuffling is done. You should call this before any of the
- // prepare() methods, and must not take a snapshot afterwards, as
- // this would crash 32bits platforms.
- void setCalleeJSValueRegs(JSValueRegs jsValueRegs)
- {
- ASSERT(isUndecided());
- ASSERT(!getNew(jsValueRegs));
- CachedRecovery* cachedRecovery { getNew(VirtualRegister(JSStack::Callee)) };
- ASSERT(cachedRecovery);
- addNew(jsValueRegs, cachedRecovery->recovery());
- }
-
- // Ask the suhffler to assume the callee has already be checked to
- // be a cell. This is a no-op on 64bit platforms, but allows to
- // free up a GPR on 32bit platforms.
- // You obviously must have ensured that this is the case before
- // running any of the prepare methods.
- void assumeCalleeIsCell()
- {
-#if USE(JSVALUE32_64)
- CachedRecovery& calleeCachedRecovery = *getNew(VirtualRegister(JSStack::Callee));
- switch (calleeCachedRecovery.recovery().technique()) {
- case InPair:
- updateRecovery(
- calleeCachedRecovery,
- ValueRecovery::inGPR(
- calleeCachedRecovery.recovery().payloadGPR(),
- DataFormatCell));
- break;
- case DisplacedInJSStack:
- updateRecovery(
- calleeCachedRecovery,
- ValueRecovery::displacedInJSStack(
- calleeCachedRecovery.recovery().virtualRegister(),
- DataFormatCell));
- break;
- case InFPR:
- case UnboxedCellInGPR:
- case CellDisplacedInJSStack:
- break;
- case Constant:
- ASSERT(calleeCachedRecovery.recovery().constant().isCell());
- break;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
-#endif
- }
-
- // This will emit code to build the new frame over the old one.
- void prepareForTailCall();
-
- // This will emit code to build the new frame as if performing a
- // regular call. However, the callee save registers will be
- // restored, and any locals (not the header or arguments) of the
- // current frame can be overwritten.
- //
- // A frame built using prepareForSlowPath() should be used either
- // to throw an exception in, or destroyed using
- // CCallHelpers::prepareForTailCallSlow() followed by a tail call.
- void prepareForSlowPath();
-
-private:
- static const bool verbose = false;
-
- CCallHelpers& m_jit;
-
- void prepareAny();
-
- void spill(CachedRecovery&);
-
- // "box" is arguably a bad name here. The meaning is that after
- // calling emitBox(), your ensure that subsequently calling
- // emitStore() will be able to store the value without additional
- // transformation. In particular, this is a no-op for constants,
- // and is a complete no-op on 32bits since any unboxed value can
- // still be stored by storing the payload and a statically known
- // tag.
- void emitBox(CachedRecovery&);
-
- bool canBox(CachedRecovery& cachedRecovery)
- {
- if (cachedRecovery.boxingRequiresGPR() && getFreeGPR() == InvalidGPRReg)
- return false;
-
- if (cachedRecovery.boxingRequiresFPR() && getFreeFPR() == InvalidFPRReg)
- return false;
-
- return true;
- }
-
- void ensureBox(CachedRecovery& cachedRecovery)
- {
- if (canBox(cachedRecovery))
- return;
-
- if (cachedRecovery.boxingRequiresGPR())
- ensureGPR();
-
- if (cachedRecovery.boxingRequiresFPR())
- ensureFPR();
- }
-
- void emitLoad(CachedRecovery&);
-
- bool canLoad(CachedRecovery&);
-
- void ensureLoad(CachedRecovery& cachedRecovery)
- {
- if (canLoad(cachedRecovery))
- return;
-
- ASSERT(cachedRecovery.loadsIntoGPR() || cachedRecovery.loadsIntoFPR());
-
- if (cachedRecovery.loadsIntoFPR()) {
- if (cachedRecovery.loadsIntoGPR())
- ensureRegister();
- else
- ensureFPR();
- } else
- ensureGPR();
- }
-
- bool canLoadAndBox(CachedRecovery& cachedRecovery)
- {
- // We don't have interfering loads & boxes
- ASSERT(!cachedRecovery.loadsIntoFPR() || !cachedRecovery.boxingRequiresFPR());
- ASSERT(!cachedRecovery.loadsIntoGPR() || !cachedRecovery.boxingRequiresGPR());
-
- return canLoad(cachedRecovery) && canBox(cachedRecovery);
- }
-
- DataFormat emitStore(CachedRecovery&, MacroAssembler::Address);
-
- void emitDisplace(CachedRecovery&);
-
- void emitDeltaCheck();
-
- Bag<CachedRecovery> m_cachedRecoveries;
-
- void updateRecovery(CachedRecovery& cachedRecovery, ValueRecovery recovery)
- {
- clearCachedRecovery(cachedRecovery.recovery());
- cachedRecovery.setRecovery(recovery);
- setCachedRecovery(recovery, &cachedRecovery);
- }
-
- CachedRecovery* getCachedRecovery(ValueRecovery);
-
- CachedRecovery* setCachedRecovery(ValueRecovery, CachedRecovery*);
-
- void clearCachedRecovery(ValueRecovery recovery)
- {
- if (!recovery.isConstant())
- setCachedRecovery(recovery, nullptr);
- }
-
- CachedRecovery* addCachedRecovery(ValueRecovery recovery)
- {
- if (recovery.isConstant())
- return m_cachedRecoveries.add(recovery);
- CachedRecovery* cachedRecovery = getCachedRecovery(recovery);
- if (!cachedRecovery)
- return setCachedRecovery(recovery, m_cachedRecoveries.add(recovery));
- return cachedRecovery;
- }
-
- // This is the current recoveries present in the old frame's
- // slots. A null CachedRecovery means we can trash the current
- // value as we don't care about it.
- Vector<CachedRecovery*> m_oldFrame;
-
- int numLocals() const
- {
- return m_oldFrame.size() - JSStack::CallerFrameAndPCSize;
- }
-
- CachedRecovery* getOld(VirtualRegister reg) const
- {
- return m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1];
- }
-
- void setOld(VirtualRegister reg, CachedRecovery* cachedRecovery)
- {
- m_oldFrame[JSStack::CallerFrameAndPCSize - reg.offset() - 1] = cachedRecovery;
- }
-
- VirtualRegister firstOld() const
- {
- return VirtualRegister { static_cast<int>(-numLocals()) };
- }
-
- VirtualRegister lastOld() const
- {
- return VirtualRegister { JSStack::CallerFrameAndPCSize - 1 };
- }
-
- bool isValidOld(VirtualRegister reg) const
- {
- return reg >= firstOld() && reg <= lastOld();
- }
-
- bool m_didExtendFrame { false };
-
- void extendFrameIfNeeded();
-
- // This stores, for each slot in the new frame, information about
- // the recovery for the value that should eventually go into that
- // slot.
- //
- // Once the slot has been written, the corresponding entry in
- // m_newFrame will be empty.
- Vector<CachedRecovery*> m_newFrame;
-
- size_t argCount() const
- {
- return m_newFrame.size() - JSStack::CallFrameHeaderSize;
- }
-
- CachedRecovery* getNew(VirtualRegister newRegister) const
- {
- return m_newFrame[newRegister.offset()];
- }
-
- void setNew(VirtualRegister newRegister, CachedRecovery* cachedRecovery)
- {
- m_newFrame[newRegister.offset()] = cachedRecovery;
- }
-
- void addNew(VirtualRegister newRegister, ValueRecovery recovery)
- {
- CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
- cachedRecovery->addTarget(newRegister);
- setNew(newRegister, cachedRecovery);
- }
-
- VirtualRegister firstNew() const
- {
- return VirtualRegister { 0 };
- }
-
- VirtualRegister lastNew() const
- {
- return VirtualRegister { static_cast<int>(m_newFrame.size()) - 1 };
- }
-
- bool isValidNew(VirtualRegister reg) const
- {
- return reg >= firstNew() && reg <= lastNew();
- }
-
-
- int m_alignedOldFrameSize;
- int m_alignedNewFrameSize;
-
- // This is the distance, in slots, between the base of the new
- // frame and the base of the old frame. It could be negative when
- // preparing for a tail call to a function with smaller argument
- // count.
- //
- // We will overwrite this appropriately for slow path calls, but
- // we initialize it as if doing a fast path for the spills we
- // could do while undecided (typically while calling acquireGPR()
- // for a polymorphic call).
- int m_frameDelta;
-
- VirtualRegister newAsOld(VirtualRegister reg) const
- {
- return reg - m_frameDelta;
- }
-
- // This stores the set of locked registers, i.e. registers for
- // which we have an implicit requirement that they are not changed.
- //
- // This will usually contains the link register on architectures
- // that have one, any scratch register used by the macro assembler
- // (e.g. r11 on X86_64), as well as any register that we use for
- // addressing (see m_oldFrameBase and m_newFrameBase).
- //
- // We also use this to lock registers temporarily, for instance to
- // ensure that we have at least 2 available registers for loading
- // a pair on 32bits.
- mutable RegisterSet m_lockedRegisters;
-
- // This stores the current recoveries present in registers. A null
- // CachedRecovery means we can trash the current value as we don't
- // care about it.
- RegisterMap<CachedRecovery*> m_registers;
-
-#if USE(JSVALUE64)
- mutable GPRReg m_tagTypeNumber;
-
- bool tryAcquireTagTypeNumber();
-#endif
-
- // This stores, for each register, information about the recovery
- // for the value that should eventually go into that register. The
- // only registers that have a target recovery will be callee-save
- // registers, as well as possibly one JSValueRegs for holding the
- // callee.
- //
- // Once the correct value has been put into the registers, and
- // contrary to what we do with m_newFrame, we keep the entry in
- // m_newRegisters to simplify spilling.
- RegisterMap<CachedRecovery*> m_newRegisters;
-
- template<typename CheckFunctor>
- Reg getFreeRegister(const CheckFunctor& check) const
- {
- Reg nonTemp { };
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- if (m_lockedRegisters.get(reg))
- continue;
-
- if (!check(reg))
- continue;
-
- if (!m_registers[reg]) {
- if (!m_newRegisters[reg])
- return reg;
- if (!nonTemp)
- nonTemp = reg;
- }
- }
-
-#if USE(JSVALUE64)
- if (!nonTemp && m_tagTypeNumber != InvalidGPRReg && check(Reg { m_tagTypeNumber })) {
- ASSERT(m_lockedRegisters.get(m_tagTypeNumber));
- m_lockedRegisters.clear(m_tagTypeNumber);
- nonTemp = Reg { m_tagTypeNumber };
- m_tagTypeNumber = InvalidGPRReg;
- }
-#endif
- return nonTemp;
- }
-
- GPRReg getFreeTempGPR() const
- {
- Reg freeTempGPR { getFreeRegister([this] (Reg reg) { return reg.isGPR() && !m_newRegisters[reg]; }) };
- if (!freeTempGPR)
- return InvalidGPRReg;
- return freeTempGPR.gpr();
- }
-
- GPRReg getFreeGPR() const
- {
- Reg freeGPR { getFreeRegister([] (Reg reg) { return reg.isGPR(); }) };
- if (!freeGPR)
- return InvalidGPRReg;
- return freeGPR.gpr();
- }
-
- FPRReg getFreeFPR() const
- {
- Reg freeFPR { getFreeRegister([] (Reg reg) { return reg.isFPR(); }) };
- if (!freeFPR)
- return InvalidFPRReg;
- return freeFPR.fpr();
- }
-
- bool hasFreeRegister() const
- {
- return static_cast<bool>(getFreeRegister([] (Reg) { return true; }));
- }
-
- // This frees up a register satisfying the check functor (this
- // functor could theoretically have any kind of logic, but it must
- // ensure that it will only return true for registers - spill
- // assumes and asserts that it is passed a cachedRecovery stored in a
- // register).
- template<typename CheckFunctor>
- void ensureRegister(const CheckFunctor& check)
- {
- // If we can spill a callee-save, that's best, because it will
- // free up a register that would otherwise been taken for the
- // longest amount of time.
- //
- // We could try to bias towards those that are not in their
- // target registers yet, but the gain is probably super
- // small. Unless you have a huge number of argument (at least
- // around twice the number of available registers on your
- // architecture), no spilling is going to take place anyways.
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- if (m_lockedRegisters.get(reg))
- continue;
-
- CachedRecovery* cachedRecovery { m_newRegisters[reg] };
- if (!cachedRecovery)
- continue;
-
- if (check(*cachedRecovery)) {
- if (verbose)
- dataLog(" ", cachedRecovery->recovery(), " looks like a good spill candidate\n");
- spill(*cachedRecovery);
- return;
- }
- }
-
- // We use the cachedRecovery associated with the first new slot we
- // can, because that is the one for which a write will be
- // possible the latest, i.e. that is the one that we would
- // have had to retain in registers for the longest.
- for (VirtualRegister reg = firstNew(); reg <= lastNew(); reg += 1) {
- CachedRecovery* cachedRecovery { getNew(reg) };
- if (!cachedRecovery)
- continue;
-
- if (check(*cachedRecovery)) {
- spill(*cachedRecovery);
- return;
- }
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- }
-
- void ensureRegister()
- {
- if (hasFreeRegister())
- return;
-
- if (verbose)
- dataLog(" Finding a register to spill\n");
- ensureRegister(
- [this] (const CachedRecovery& cachedRecovery) {
- if (cachedRecovery.recovery().isInGPR())
- return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
- if (cachedRecovery.recovery().isInFPR())
- return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
-#if USE(JSVALUE32_64)
- if (cachedRecovery.recovery().technique() == InPair) {
- return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
- && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
- }
-#endif
- return false;
- });
- }
-
- void ensureTempGPR()
- {
- if (getFreeTempGPR() != InvalidGPRReg)
- return;
-
- if (verbose)
- dataLog(" Finding a temp GPR to spill\n");
- ensureRegister(
- [this] (const CachedRecovery& cachedRecovery) {
- if (cachedRecovery.recovery().isInGPR()) {
- return !m_lockedRegisters.get(cachedRecovery.recovery().gpr())
- && !m_newRegisters[cachedRecovery.recovery().gpr()];
- }
-#if USE(JSVALUE32_64)
- if (cachedRecovery.recovery().technique() == InPair) {
- return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
- && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR())
- && !m_newRegisters[cachedRecovery.recovery().tagGPR()]
- && !m_newRegisters[cachedRecovery.recovery().payloadGPR()];
- }
-#endif
- return false;
- });
- }
-
- void ensureGPR()
- {
- if (getFreeGPR() != InvalidGPRReg)
- return;
-
- if (verbose)
- dataLog(" Finding a GPR to spill\n");
- ensureRegister(
- [this] (const CachedRecovery& cachedRecovery) {
- if (cachedRecovery.recovery().isInGPR())
- return !m_lockedRegisters.get(cachedRecovery.recovery().gpr());
-#if USE(JSVALUE32_64)
- if (cachedRecovery.recovery().technique() == InPair) {
- return !m_lockedRegisters.get(cachedRecovery.recovery().tagGPR())
- && !m_lockedRegisters.get(cachedRecovery.recovery().payloadGPR());
- }
-#endif
- return false;
- });
- }
-
- void ensureFPR()
- {
- if (getFreeFPR() != InvalidFPRReg)
- return;
-
- if (verbose)
- dataLog(" Finding an FPR to spill\n");
- ensureRegister(
- [this] (const CachedRecovery& cachedRecovery) {
- if (cachedRecovery.recovery().isInFPR())
- return !m_lockedRegisters.get(cachedRecovery.recovery().fpr());
- return false;
- });
- }
-
- CachedRecovery* getNew(JSValueRegs jsValueRegs) const
- {
-#if USE(JSVALUE64)
- return m_newRegisters[jsValueRegs.gpr()];
-#else
- ASSERT(
- jsValueRegs.tagGPR() == InvalidGPRReg || jsValueRegs.payloadGPR() == InvalidGPRReg
- || m_newRegisters[jsValueRegs.payloadGPR()] == m_newRegisters[jsValueRegs.tagGPR()]);
- if (jsValueRegs.payloadGPR() == InvalidGPRReg)
- return m_newRegisters[jsValueRegs.tagGPR()];
- return m_newRegisters[jsValueRegs.payloadGPR()];
-#endif
- }
-
- void addNew(JSValueRegs jsValueRegs, ValueRecovery recovery)
- {
- ASSERT(jsValueRegs && !getNew(jsValueRegs));
- CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
-#if USE(JSVALUE64)
- if (cachedRecovery->wantedJSValueRegs())
- m_newRegisters[cachedRecovery->wantedJSValueRegs().gpr()] = nullptr;
- m_newRegisters[jsValueRegs.gpr()] = cachedRecovery;
-#else
- if (JSValueRegs oldRegs { cachedRecovery->wantedJSValueRegs() }) {
- if (oldRegs.payloadGPR())
- m_newRegisters[oldRegs.payloadGPR()] = nullptr;
- if (oldRegs.tagGPR())
- m_newRegisters[oldRegs.tagGPR()] = nullptr;
- }
- if (jsValueRegs.payloadGPR() != InvalidGPRReg)
- m_newRegisters[jsValueRegs.payloadGPR()] = cachedRecovery;
- if (jsValueRegs.tagGPR() != InvalidGPRReg)
- m_newRegisters[jsValueRegs.tagGPR()] = cachedRecovery;
-#endif
- ASSERT(!cachedRecovery->wantedJSValueRegs());
- cachedRecovery->setWantedJSValueRegs(jsValueRegs);
- }
-
- void addNew(FPRReg fpr, ValueRecovery recovery)
- {
- ASSERT(fpr != InvalidFPRReg && !m_newRegisters[fpr]);
- CachedRecovery* cachedRecovery = addCachedRecovery(recovery);
- m_newRegisters[fpr] = cachedRecovery;
- ASSERT(cachedRecovery->wantedFPR() == InvalidFPRReg);
- cachedRecovery->setWantedFPR(fpr);
- }
-
- // m_oldFrameBase is the register relative to which we access
- // slots in the old call frame, with an additional offset of
- // m_oldFrameOffset.
- //
- // - For an actual tail call, m_oldFrameBase is the stack
- // pointer, and m_oldFrameOffset is the number of locals of the
- // tail caller's frame. We use such stack pointer-based
- // addressing because it allows us to load the tail caller's
- // caller's frame pointer in the frame pointer register
- // immediately instead of awkwardly keeping it around on the
- // stack.
- //
- // - For a slow path call, m_oldFrameBase is just the frame
- // pointer, and m_oldFrameOffset is 0.
- GPRReg m_oldFrameBase { MacroAssembler::framePointerRegister };
- int m_oldFrameOffset { 0 };
-
- MacroAssembler::Address addressForOld(VirtualRegister reg) const
- {
- return MacroAssembler::Address(m_oldFrameBase,
- (m_oldFrameOffset + reg.offset()) * sizeof(Register));
- }
-
- // m_newFrameBase is the register relative to which we access
- // slots in the new call frame, and we always make it point to
- // wherever the stack pointer will be right before making the
- // actual call/jump. The actual base of the new frame is at offset
- // m_newFrameOffset relative to m_newFrameBase.
- //
- // - For an actual tail call, m_newFrameBase is computed
- // dynamically, and m_newFrameOffset varies between 0 and -2
- // depending on the architecture's calling convention (see
- // prepareForTailCall).
- //
- // - For a slow path call, m_newFrameBase is the actual stack
- // pointer, and m_newFrameOffset is - CallerFrameAndPCSize,
- // following the convention for a regular call.
- GPRReg m_newFrameBase { InvalidGPRReg };
- int m_newFrameOffset { 0};
-
- bool isUndecided() const
- {
- return m_newFrameBase == InvalidGPRReg;
- }
-
- bool isSlowPath() const
- {
- return m_newFrameBase == MacroAssembler::stackPointerRegister;
- }
-
- MacroAssembler::Address addressForNew(VirtualRegister reg) const
- {
- return MacroAssembler::Address(m_newFrameBase,
- (m_newFrameOffset + reg.offset()) * sizeof(Register));
- }
-
- // We use a concept of "danger zone". The danger zone consists of
- // all the writes in the new frame that could overlap with reads
- // in the old frame.
- //
- // Because we could have a higher actual number of arguments than
- // parameters, when preparing a tail call, we need to assume that
- // writing to a slot on the new frame could overlap not only with
- // the corresponding slot in the old frame, but also with any slot
- // above it. Thus, the danger zone consists of all writes between
- // the first write and what I call the "danger frontier": the
- // highest slot in the old frame we still care about. Thus, the
- // danger zone contains all the slots between the first slot of
- // the new frame and the danger frontier. Because the danger
- // frontier is related to the new frame, it is stored as a virtual
- // register *in the new frame*.
- VirtualRegister m_dangerFrontier;
-
- VirtualRegister dangerFrontier() const
- {
- ASSERT(!isUndecided());
-
- return m_dangerFrontier;
- }
-
- bool isDangerNew(VirtualRegister reg) const
- {
- ASSERT(!isUndecided() && isValidNew(reg));
- return reg <= dangerFrontier();
- }
-
- void updateDangerFrontier()
- {
- ASSERT(!isUndecided());
-
- m_dangerFrontier = firstNew() - 1;
- for (VirtualRegister reg = lastNew(); reg >= firstNew(); reg -= 1) {
- if (!getNew(reg) || !isValidOld(newAsOld(reg)) || !getOld(newAsOld(reg)))
- continue;
-
- m_dangerFrontier = reg;
- if (verbose)
- dataLog(" Danger frontier now at NEW ", m_dangerFrontier, "\n");
- break;
- }
- if (verbose)
- dataLog(" All clear! Danger zone is empty.\n");
- }
-
- // A safe write is a write that never writes into the danger zone.
- bool hasOnlySafeWrites(CachedRecovery& cachedRecovery) const
- {
- for (VirtualRegister target : cachedRecovery.targets()) {
- if (isDangerNew(target))
- return false;
- }
- return true;
- }
-
- // You must ensure that there is no dangerous writes before
- // calling this function.
- bool tryWrites(CachedRecovery&);
-
- // This function tries to ensure that there is no longer any
- // possible safe write, i.e. all remaining writes are either to
- // the danger zone or callee save restorations.
- //
- // It returns false if it was unable to perform some safe writes
- // due to high register pressure.
- bool performSafeWrites();
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // CallFrameShuffler_h
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp
deleted file mode 100644
index 5dfe96e81..000000000
--- a/Source/JavaScriptCore/jit/CallFrameShuffler32_64.cpp
+++ /dev/null
@@ -1,305 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "CallFrameShuffler.h"
-
-#if ENABLE(JIT) && USE(JSVALUE32_64)
-
-#include "CCallHelpers.h"
-#include "DataFormat.h"
-#include "JSCInlines.h"
-
-namespace JSC {
-
-DataFormat CallFrameShuffler::emitStore(CachedRecovery& location, MacroAssembler::Address address)
-{
- ASSERT(!location.recovery().isInJSStack());
-
- switch (location.recovery().technique()) {
- case UnboxedInt32InGPR:
- m_jit.store32(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
- address.withOffset(TagOffset));
- m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
- return DataFormatInt32;
- case UnboxedCellInGPR:
- m_jit.store32(MacroAssembler::TrustedImm32(JSValue::CellTag),
- address.withOffset(TagOffset));
- m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
- return DataFormatCell;
- case Constant:
- m_jit.storeTrustedValue(location.recovery().constant(), address);
- return DataFormatJS;
- case InPair:
- m_jit.storeValue(location.recovery().jsValueRegs(), address);
- return DataFormatJS;
- case UnboxedBooleanInGPR:
- m_jit.store32(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
- address.withOffset(TagOffset));
- m_jit.store32(location.recovery().gpr(), address.withOffset(PayloadOffset));
- return DataFormatBoolean;
- case InFPR:
- case UnboxedDoubleInFPR:
- m_jit.storeDouble(location.recovery().fpr(), address);
- return DataFormatJS;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-}
-
-void CallFrameShuffler::emitBox(CachedRecovery& location)
-{
- // Nothing to do, we're good! JSValues and doubles can be stored
- // immediately, and other formats don't need any transformation -
- // just storing a constant tag separately.
- ASSERT_UNUSED(location, canBox(location));
-}
-
-void CallFrameShuffler::emitLoad(CachedRecovery& location)
-{
- if (!location.recovery().isInJSStack())
- return;
-
- if (verbose)
- dataLog(" * Loading ", location.recovery(), " into ");
- VirtualRegister reg { location.recovery().virtualRegister() };
- MacroAssembler::Address address { addressForOld(reg) };
-
- bool tryFPR { true };
- JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
- if (wantedJSValueRegs) {
- if (wantedJSValueRegs.payloadGPR() != InvalidGPRReg
- && !m_registers[wantedJSValueRegs.payloadGPR()]
- && !m_lockedRegisters.get(wantedJSValueRegs.payloadGPR()))
- tryFPR = false;
- if (wantedJSValueRegs.tagGPR() != InvalidGPRReg
- && !m_registers[wantedJSValueRegs.tagGPR()]
- && !m_lockedRegisters.get(wantedJSValueRegs.tagGPR()))
- tryFPR = false;
- }
-
- if (tryFPR && location.loadsIntoFPR()) {
- FPRReg resultFPR = location.wantedFPR();
- if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR))
- resultFPR = getFreeFPR();
- if (resultFPR != InvalidFPRReg) {
- m_jit.loadDouble(address, resultFPR);
- DataFormat dataFormat = DataFormatJS;
- if (location.recovery().dataFormat() == DataFormatDouble)
- dataFormat = DataFormatDouble;
- updateRecovery(location,
- ValueRecovery::inFPR(resultFPR, dataFormat));
- if (verbose)
- dataLog(location.recovery(), "\n");
- if (reg == newAsOld(dangerFrontier()))
- updateDangerFrontier();
- return;
- }
- }
-
- if (location.loadsIntoGPR()) {
- GPRReg resultGPR { wantedJSValueRegs.payloadGPR() };
- if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
- resultGPR = getFreeGPR();
- ASSERT(resultGPR != InvalidGPRReg);
- m_jit.loadPtr(address.withOffset(PayloadOffset), resultGPR);
- updateRecovery(location,
- ValueRecovery::inGPR(resultGPR, location.recovery().dataFormat()));
- if (verbose)
- dataLog(location.recovery(), "\n");
- if (reg == newAsOld(dangerFrontier()))
- updateDangerFrontier();
- return;
- }
-
- ASSERT(location.recovery().technique() == DisplacedInJSStack);
- GPRReg payloadGPR { wantedJSValueRegs.payloadGPR() };
- GPRReg tagGPR { wantedJSValueRegs.tagGPR() };
- if (payloadGPR == InvalidGPRReg || m_registers[payloadGPR] || m_lockedRegisters.get(payloadGPR))
- payloadGPR = getFreeGPR();
- m_lockedRegisters.set(payloadGPR);
- if (tagGPR == InvalidGPRReg || m_registers[tagGPR] || m_lockedRegisters.get(tagGPR))
- tagGPR = getFreeGPR();
- m_lockedRegisters.clear(payloadGPR);
- ASSERT(payloadGPR != InvalidGPRReg && tagGPR != InvalidGPRReg && tagGPR != payloadGPR);
- m_jit.loadPtr(address.withOffset(PayloadOffset), payloadGPR);
- m_jit.loadPtr(address.withOffset(TagOffset), tagGPR);
- updateRecovery(location,
- ValueRecovery::inPair(tagGPR, payloadGPR));
- if (verbose)
- dataLog(location.recovery(), "\n");
- if (reg == newAsOld(dangerFrontier()))
- updateDangerFrontier();
-}
-
-bool CallFrameShuffler::canLoad(CachedRecovery& location)
-{
- if (!location.recovery().isInJSStack())
- return true;
-
- if (location.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg)
- return true;
-
- if (location.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg)
- return true;
-
- if (location.recovery().technique() == DisplacedInJSStack) {
- GPRReg payloadGPR { getFreeGPR() };
- if (payloadGPR == InvalidGPRReg)
- return false;
- m_lockedRegisters.set(payloadGPR);
- GPRReg tagGPR { getFreeGPR() };
- m_lockedRegisters.clear(payloadGPR);
- return tagGPR != InvalidGPRReg;
- }
-
- return false;
-}
-
-void CallFrameShuffler::emitDisplace(CachedRecovery& location)
-{
- ASSERT(location.recovery().isInRegisters());
- JSValueRegs wantedJSValueRegs { location.wantedJSValueRegs() };
- ASSERT(wantedJSValueRegs); // We don't support wanted FPRs on 32bit platforms
-
- GPRReg wantedTagGPR { wantedJSValueRegs.tagGPR() };
- GPRReg wantedPayloadGPR { wantedJSValueRegs.payloadGPR() };
-
- if (wantedTagGPR != InvalidGPRReg) {
- ASSERT(!m_lockedRegisters.get(wantedTagGPR));
- if (CachedRecovery* currentTag { m_registers[wantedTagGPR] }) {
- if (currentTag == &location) {
- if (verbose)
- dataLog(" + ", wantedTagGPR, " is OK\n");
- } else {
- // This can never happen on 32bit platforms since we
- // have at most one wanted JSValueRegs, for the
- // callee, and no callee-save registers.
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
- }
-
- if (wantedPayloadGPR != InvalidGPRReg) {
- ASSERT(!m_lockedRegisters.get(wantedPayloadGPR));
- if (CachedRecovery* currentPayload { m_registers[wantedPayloadGPR] }) {
- if (currentPayload == &location) {
- if (verbose)
- dataLog(" + ", wantedPayloadGPR, " is OK\n");
- } else {
- // See above
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
- }
-
- if (location.recovery().technique() == InPair
- || location.recovery().isInGPR()) {
- GPRReg payloadGPR;
- if (location.recovery().technique() == InPair)
- payloadGPR = location.recovery().payloadGPR();
- else
- payloadGPR = location.recovery().gpr();
-
- if (wantedPayloadGPR == InvalidGPRReg)
- wantedPayloadGPR = payloadGPR;
-
- if (payloadGPR != wantedPayloadGPR) {
- if (location.recovery().technique() == InPair
- && wantedPayloadGPR == location.recovery().tagGPR()) {
- if (verbose)
- dataLog(" * Swapping ", payloadGPR, " and ", wantedPayloadGPR, "\n");
- m_jit.swap(payloadGPR, wantedPayloadGPR);
- updateRecovery(location,
- ValueRecovery::inPair(payloadGPR, wantedPayloadGPR));
- } else {
- if (verbose)
- dataLog(" * Moving ", payloadGPR, " into ", wantedPayloadGPR, "\n");
- m_jit.move(payloadGPR, wantedPayloadGPR);
- if (location.recovery().technique() == InPair) {
- updateRecovery(location,
- ValueRecovery::inPair(location.recovery().tagGPR(),
- wantedPayloadGPR));
- } else {
- updateRecovery(location,
- ValueRecovery::inGPR(wantedPayloadGPR, location.recovery().dataFormat()));
- }
- }
- }
-
- if (wantedTagGPR == InvalidGPRReg)
- wantedTagGPR = getFreeGPR();
- switch (location.recovery().dataFormat()) {
- case DataFormatInt32:
- if (verbose)
- dataLog(" * Moving int32 tag into ", wantedTagGPR, "\n");
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::Int32Tag),
- wantedTagGPR);
- break;
- case DataFormatCell:
- if (verbose)
- dataLog(" * Moving cell tag into ", wantedTagGPR, "\n");
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::CellTag),
- wantedTagGPR);
- break;
- case DataFormatBoolean:
- if (verbose)
- dataLog(" * Moving boolean tag into ", wantedTagGPR, "\n");
- m_jit.move(MacroAssembler::TrustedImm32(JSValue::BooleanTag),
- wantedTagGPR);
- break;
- case DataFormatJS:
- ASSERT(wantedTagGPR != location.recovery().payloadGPR());
- if (wantedTagGPR != location.recovery().tagGPR()) {
- if (verbose)
- dataLog(" * Moving ", location.recovery().tagGPR(), " into ", wantedTagGPR, "\n");
- m_jit.move(location.recovery().tagGPR(), wantedTagGPR);
- }
- break;
-
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
- } else {
- ASSERT(location.recovery().isInFPR());
- if (wantedTagGPR == InvalidGPRReg) {
- ASSERT(wantedPayloadGPR != InvalidGPRReg);
- m_lockedRegisters.set(wantedPayloadGPR);
- wantedTagGPR = getFreeGPR();
- m_lockedRegisters.clear(wantedPayloadGPR);
- }
- if (wantedPayloadGPR == InvalidGPRReg) {
- m_lockedRegisters.set(wantedTagGPR);
- wantedPayloadGPR = getFreeGPR();
- m_lockedRegisters.clear(wantedTagGPR);
- }
- m_jit.boxDouble(location.recovery().fpr(), wantedTagGPR, wantedPayloadGPR);
- }
- updateRecovery(location, ValueRecovery::inPair(wantedTagGPR, wantedPayloadGPR));
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT) && USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp b/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp
deleted file mode 100644
index 2ef6ed111..000000000
--- a/Source/JavaScriptCore/jit/CallFrameShuffler64.cpp
+++ /dev/null
@@ -1,369 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "CallFrameShuffler.h"
-
-#if ENABLE(JIT) && USE(JSVALUE64)
-
-#include "CCallHelpers.h"
-#include "DataFormat.h"
-#include "JSCInlines.h"
-
-namespace JSC {
-
-DataFormat CallFrameShuffler::emitStore(
- CachedRecovery& cachedRecovery, MacroAssembler::Address address)
-{
- ASSERT(!cachedRecovery.recovery().isInJSStack());
-
- switch (cachedRecovery.recovery().technique()) {
- case InGPR:
- m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
- return DataFormatJS;
- case UnboxedInt32InGPR:
- m_jit.store32(cachedRecovery.recovery().gpr(), address.withOffset(PayloadOffset));
- return DataFormatInt32;
- case UnboxedInt52InGPR:
- m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
- cachedRecovery.recovery().gpr());
- FALLTHROUGH;
- case UnboxedStrictInt52InGPR:
- m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
- return DataFormatStrictInt52;
- case UnboxedBooleanInGPR:
- m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
- return DataFormatBoolean;
- case UnboxedCellInGPR:
- m_jit.storePtr(cachedRecovery.recovery().gpr(), address);
- return DataFormatCell;
- case UnboxedDoubleInFPR:
- m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
- return DataFormatDouble;
- case InFPR:
- m_jit.storeDouble(cachedRecovery.recovery().fpr(), address);
- return DataFormatJS;
- case Constant:
- m_jit.storeTrustedValue(cachedRecovery.recovery().constant(), address);
- return DataFormatJS;
- default:
- RELEASE_ASSERT_NOT_REACHED();
- }
-}
-
-void CallFrameShuffler::emitBox(CachedRecovery& cachedRecovery)
-{
- ASSERT(canBox(cachedRecovery));
- if (cachedRecovery.recovery().isConstant())
- return;
-
- if (cachedRecovery.recovery().isInGPR()) {
- switch (cachedRecovery.recovery().dataFormat()) {
- case DataFormatInt32:
- if (verbose)
- dataLog(" * Boxing ", cachedRecovery.recovery());
- m_jit.zeroExtend32ToPtr(
- cachedRecovery.recovery().gpr(),
- cachedRecovery.recovery().gpr());
- m_lockedRegisters.set(cachedRecovery.recovery().gpr());
- if (tryAcquireTagTypeNumber())
- m_jit.or64(m_tagTypeNumber, cachedRecovery.recovery().gpr());
- else {
- // We have to do this the hard way
- m_jit.or64(MacroAssembler::TrustedImm64(TagTypeNumber),
- cachedRecovery.recovery().gpr());
- }
- m_lockedRegisters.clear(cachedRecovery.recovery().gpr());
- cachedRecovery.setRecovery(
- ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
- if (verbose)
- dataLog(" into ", cachedRecovery.recovery(), "\n");
- return;
- case DataFormatInt52:
- if (verbose)
- dataLog(" * Boxing ", cachedRecovery.recovery());
- m_jit.rshift64(MacroAssembler::TrustedImm32(JSValue::int52ShiftAmount),
- cachedRecovery.recovery().gpr());
- cachedRecovery.setRecovery(
- ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatStrictInt52));
- if (verbose)
- dataLog(" into ", cachedRecovery.recovery(), "\n");
- FALLTHROUGH;
- case DataFormatStrictInt52: {
- if (verbose)
- dataLog(" * Boxing ", cachedRecovery.recovery());
- FPRReg resultFPR = getFreeFPR();
- ASSERT(resultFPR != InvalidFPRReg);
- m_jit.convertInt64ToDouble(cachedRecovery.recovery().gpr(), resultFPR);
- updateRecovery(cachedRecovery, ValueRecovery::inFPR(resultFPR, DataFormatDouble));
- if (verbose)
- dataLog(" into ", cachedRecovery.recovery(), "\n");
- break;
- }
- case DataFormatBoolean:
- if (verbose)
- dataLog(" * Boxing ", cachedRecovery.recovery());
- m_jit.add32(MacroAssembler::TrustedImm32(ValueFalse),
- cachedRecovery.recovery().gpr());
- cachedRecovery.setRecovery(
- ValueRecovery::inGPR(cachedRecovery.recovery().gpr(), DataFormatJS));
- if (verbose)
- dataLog(" into ", cachedRecovery.recovery(), "\n");
- return;
- default:
- return;
- }
- }
-
- if (cachedRecovery.recovery().isInFPR()) {
- if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
- if (verbose)
- dataLog(" * Boxing ", cachedRecovery.recovery());
- GPRReg resultGPR = cachedRecovery.wantedJSValueRegs().gpr();
- if (resultGPR == InvalidGPRReg || m_registers[resultGPR])
- resultGPR = getFreeGPR();
- ASSERT(resultGPR != InvalidGPRReg);
- m_jit.purifyNaN(cachedRecovery.recovery().fpr());
- m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), resultGPR);
- m_lockedRegisters.set(resultGPR);
- if (tryAcquireTagTypeNumber())
- m_jit.sub64(m_tagTypeNumber, resultGPR);
- else
- m_jit.sub64(MacroAssembler::TrustedImm64(TagTypeNumber), resultGPR);
- m_lockedRegisters.clear(resultGPR);
- updateRecovery(cachedRecovery, ValueRecovery::inGPR(resultGPR, DataFormatJS));
- if (verbose)
- dataLog(" into ", cachedRecovery.recovery(), "\n");
- return;
- }
- ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
- return;
- }
-
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-void CallFrameShuffler::emitLoad(CachedRecovery& cachedRecovery)
-{
- if (!cachedRecovery.recovery().isInJSStack())
- return;
-
- if (verbose)
- dataLog(" * Loading ", cachedRecovery.recovery(), " into ");
-
- VirtualRegister reg = cachedRecovery.recovery().virtualRegister();
- MacroAssembler::Address address { addressForOld(reg) };
- bool tryFPR { true };
- GPRReg resultGPR { cachedRecovery.wantedJSValueRegs().gpr() };
-
- // If we want a GPR and it's available, that's better than loading
- // into an FPR.
- if (resultGPR != InvalidGPRReg && !m_registers[resultGPR]
- && !m_lockedRegisters.get(resultGPR) && cachedRecovery.loadsIntoGPR())
- tryFPR = false;
-
- // Otherwise, we prefer loading into FPRs if possible
- if (tryFPR && cachedRecovery.loadsIntoFPR()) {
- FPRReg resultFPR { cachedRecovery.wantedFPR() };
- if (resultFPR == InvalidFPRReg || m_registers[resultFPR] || m_lockedRegisters.get(resultFPR))
- resultFPR = getFreeFPR();
- if (resultFPR != InvalidFPRReg) {
- m_jit.loadDouble(address, resultFPR);
- DataFormat dataFormat = DataFormatJS;
- // We could be transforming a DataFormatCell into a
- // DataFormatJS here - but that's OK.
- if (cachedRecovery.recovery().dataFormat() == DataFormatDouble)
- dataFormat = DataFormatDouble;
- updateRecovery(cachedRecovery,
- ValueRecovery::inFPR(resultFPR, dataFormat));
- if (verbose)
- dataLog(cachedRecovery.recovery(), "\n");
- if (reg == newAsOld(dangerFrontier()))
- updateDangerFrontier();
- return;
- }
- }
-
- ASSERT(cachedRecovery.loadsIntoGPR());
- if (resultGPR == InvalidGPRReg || m_registers[resultGPR] || m_lockedRegisters.get(resultGPR))
- resultGPR = getFreeGPR();
- ASSERT(resultGPR != InvalidGPRReg);
- m_jit.loadPtr(address, resultGPR);
- updateRecovery(cachedRecovery,
- ValueRecovery::inGPR(resultGPR, cachedRecovery.recovery().dataFormat()));
- if (verbose)
- dataLog(cachedRecovery.recovery(), "\n");
- if (reg == newAsOld(dangerFrontier()))
- updateDangerFrontier();
-}
-
-bool CallFrameShuffler::canLoad(CachedRecovery& cachedRecovery)
-{
- if (!cachedRecovery.recovery().isInJSStack())
- return true;
-
- ASSERT(cachedRecovery.loadsIntoFPR() || cachedRecovery.loadsIntoGPR());
-
- if (cachedRecovery.loadsIntoFPR() && getFreeFPR() != InvalidFPRReg)
- return true;
-
- if (cachedRecovery.loadsIntoGPR() && getFreeGPR() != InvalidGPRReg)
- return true;
-
- return false;
-}
-
-void CallFrameShuffler::emitDisplace(CachedRecovery& cachedRecovery)
-{
- Reg wantedReg;
- if (!(wantedReg = Reg { cachedRecovery.wantedJSValueRegs().gpr() }))
- wantedReg = Reg { cachedRecovery.wantedFPR() };
- ASSERT(wantedReg);
- ASSERT(!m_lockedRegisters.get(wantedReg));
-
- if (CachedRecovery* current = m_registers[wantedReg]) {
- if (current == &cachedRecovery) {
- if (verbose)
- dataLog(" + ", wantedReg, " is OK\n");
- return;
- }
- // We could do a more complex thing by finding cycles
- // etc. in that case.
- // However, ending up in this situation will be super
- // rare, and should actually be outright impossible for
- // non-FTL tiers, since:
- // (a) All doubles have been converted into JSValues with
- // ValueRep nodes, so FPRs are initially free
- //
- // (b) The only recoveries with wanted registers are the
- // callee (which always starts out in a register) and
- // the callee-save registers
- //
- // (c) The callee-save registers are the first things we
- // load (after the return PC), and they are loaded as JSValues
- //
- // (d) We prefer loading JSValues into FPRs if their
- // wanted GPR is not available
- //
- // (e) If we end up spilling some registers with a
- // target, we won't load them again before the very
- // end of the algorithm
- //
- // Combined, this means that we will never load a recovery
- // with a wanted GPR into any GPR other than its wanted
- // GPR. The callee could however have been initially in
- // one of the callee-save registers - but since the wanted
- // GPR for the callee is always regT0, it will be the
- // first one to be displaced, and we won't see it when
- // handling any of the callee-save registers.
- //
- // Thus, the only way we could ever reach this path is in
- // the FTL, when there is so much pressure that we
- // absolutely need to load the callee-save registers into
- // different GPRs initially but not enough pressure to
- // then have to spill all of them. And even in that case,
- // depending on the order in which B3 saves the
- // callee-saves, we will probably still be safe. Anyway,
- // the couple extra move instructions compared to an
- // efficient cycle-based algorithm are not going to hurt
- // us.
- if (wantedReg.isFPR()) {
- FPRReg tempFPR = getFreeFPR();
- if (verbose)
- dataLog(" * Moving ", wantedReg, " into ", tempFPR, "\n");
- m_jit.moveDouble(wantedReg.fpr(), tempFPR);
- updateRecovery(*current,
- ValueRecovery::inFPR(tempFPR, current->recovery().dataFormat()));
- } else {
- GPRReg tempGPR = getFreeGPR();
- if (verbose)
- dataLog(" * Moving ", wantedReg.gpr(), " into ", tempGPR, "\n");
- m_jit.move(wantedReg.gpr(), tempGPR);
- updateRecovery(*current,
- ValueRecovery::inGPR(tempGPR, current->recovery().dataFormat()));
- }
- }
- ASSERT(!m_registers[wantedReg]);
-
- if (cachedRecovery.recovery().isConstant()) {
- // We only care about callee saves for wanted FPRs, and those are never constants
- ASSERT(wantedReg.isGPR());
- if (verbose)
- dataLog(" * Loading ", cachedRecovery.recovery().constant(), " into ", wantedReg, "\n");
- m_jit.moveTrustedValue(cachedRecovery.recovery().constant(), JSValueRegs { wantedReg.gpr() });
- updateRecovery(
- cachedRecovery,
- ValueRecovery::inRegister(wantedReg, DataFormatJS));
- } else if (cachedRecovery.recovery().isInGPR()) {
- if (verbose)
- dataLog(" * Moving ", cachedRecovery.recovery(), " into ", wantedReg, "\n");
- if (wantedReg.isGPR())
- m_jit.move(cachedRecovery.recovery().gpr(), wantedReg.gpr());
- else
- m_jit.move64ToDouble(cachedRecovery.recovery().gpr(), wantedReg.fpr());
- RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
- updateRecovery(cachedRecovery,
- ValueRecovery::inRegister(wantedReg, DataFormatJS));
- } else {
- ASSERT(cachedRecovery.recovery().isInFPR());
- if (cachedRecovery.recovery().dataFormat() == DataFormatDouble) {
- // We only care about callee saves for wanted FPRs, and those are always DataFormatJS
- ASSERT(wantedReg.isGPR());
- // This will automatically pick the wanted GPR
- emitBox(cachedRecovery);
- } else {
- if (verbose)
- dataLog(" * Moving ", cachedRecovery.recovery().fpr(), " into ", wantedReg, "\n");
- if (wantedReg.isGPR())
- m_jit.moveDoubleTo64(cachedRecovery.recovery().fpr(), wantedReg.gpr());
- else
- m_jit.moveDouble(cachedRecovery.recovery().fpr(), wantedReg.fpr());
- RELEASE_ASSERT(cachedRecovery.recovery().dataFormat() == DataFormatJS);
- updateRecovery(cachedRecovery,
- ValueRecovery::inRegister(wantedReg, DataFormatJS));
- }
- }
-
- ASSERT(m_registers[wantedReg] == &cachedRecovery);
-}
-
-bool CallFrameShuffler::tryAcquireTagTypeNumber()
-{
- if (m_tagTypeNumber != InvalidGPRReg)
- return true;
-
- m_tagTypeNumber = getFreeGPR();
-
- if (m_tagTypeNumber == InvalidGPRReg)
- return false;
-
- m_lockedRegisters.set(m_tagTypeNumber);
- m_jit.move(MacroAssembler::TrustedImm64(TagTypeNumber), m_tagTypeNumber);
- return true;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT) && USE(JSVALUE64)
diff --git a/Source/JavaScriptCore/jit/Reg.cpp b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
index 6c0258061..1588f7fea 100644
--- a/Source/JavaScriptCore/jit/Reg.cpp
+++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,23 +24,37 @@
*/
#include "config.h"
-#include "Reg.h"
+#include "ClosureCallStubRoutine.h"
#if ENABLE(JIT)
-#include "FPRInfo.h"
-#include "GPRInfo.h"
+#include "Executable.h"
+#include "Heap.h"
+#include "VM.h"
+#include "Operations.h"
+#include "SlotVisitor.h"
+#include "Structure.h"
namespace JSC {
-void Reg::dump(PrintStream& out) const
+ClosureCallStubRoutine::ClosureCallStubRoutine(
+ const MacroAssemblerCodeRef& code, VM& vm, const JSCell* owner,
+ Structure* structure, ExecutableBase* executable, const CodeOrigin& codeOrigin)
+ : GCAwareJITStubRoutine(code, vm, true)
+ , m_structure(vm, owner, structure)
+ , m_executable(vm, owner, executable)
+ , m_codeOrigin(codeOrigin)
{
- if (!*this)
- out.print("<none>");
- else if (isGPR())
- out.print(gpr());
- else
- out.print(fpr());
+}
+
+ClosureCallStubRoutine::~ClosureCallStubRoutine()
+{
+}
+
+void ClosureCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
+{
+ visitor.append(&m_structure);
+ visitor.append(&m_executable);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITNegGenerator.h b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
index 6dc2c85a6..ad61ed514 100644
--- a/Source/JavaScriptCore/jit/JITNegGenerator.h
+++ b/Source/JavaScriptCore/jit/ClosureCallStubRoutine.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -20,45 +20,47 @@
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
-#ifndef JITNegGenerator_h
-#define JITNegGenerator_h
+#ifndef ClosureCallStubRoutine_h
+#define ClosureCallStubRoutine_h
+
+#include <wtf/Platform.h>
#if ENABLE(JIT)
-#include "CCallHelpers.h"
-#include "SnippetOperand.h"
+#include "CodeOrigin.h"
+#include "GCAwareJITStubRoutine.h"
namespace JSC {
-class JITNegGenerator {
+class ClosureCallStubRoutine : public GCAwareJITStubRoutine {
public:
- JITNegGenerator(JSValueRegs result, JSValueRegs src, GPRReg scratchGPR)
- : m_result(result)
- , m_src(src)
- , m_scratchGPR(scratchGPR)
- { }
-
- void generateFastPath(CCallHelpers&);
+ ClosureCallStubRoutine(
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner,
+ Structure*, ExecutableBase*, const CodeOrigin&);
+
+ virtual ~ClosureCallStubRoutine();
+
+ Structure* structure() const { return m_structure.get(); }
+ ExecutableBase* executable() const { return m_executable.get(); }
+ const CodeOrigin& codeOrigin() const { return m_codeOrigin; }
- bool didEmitFastPath() const { return m_didEmitFastPath; }
- CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
- CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
+protected:
+ virtual void markRequiredObjectsInternal(SlotVisitor&) override;
private:
- JSValueRegs m_result;
- JSValueRegs m_src;
- GPRReg m_scratchGPR;
- bool m_didEmitFastPath { false };
-
- CCallHelpers::JumpList m_endJumpList;
- CCallHelpers::JumpList m_slowPathJumpList;
+ WriteBarrier<Structure> m_structure;
+ WriteBarrier<ExecutableBase> m_executable;
+ // This allows us to figure out who a call is linked to by searching through
+ // stub routines.
+ CodeOrigin m_codeOrigin;
};
} // namespace JSC
#endif // ENABLE(JIT)
-#endif // JITNegGenerator_h
+#endif // ClosureCallStubRoutine_h
+
diff --git a/Source/JavaScriptCore/jit/CompactJITCodeMap.h b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
index d5eaa4072..b09f2f6cd 100644
--- a/Source/JavaScriptCore/jit/CompactJITCodeMap.h
+++ b/Source/JavaScriptCore/jit/CompactJITCodeMap.h
@@ -10,7 +10,7 @@
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Inc. ("Apple") nor the names of
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
* its contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
@@ -32,6 +32,8 @@
#include <wtf/Assertions.h>
#include <wtf/FastMalloc.h>
#include <wtf/FastMalloc.h>
+#include <wtf/OwnPtr.h>
+#include <wtf/PassOwnPtr.h>
#include <wtf/Vector.h>
namespace JSC {
@@ -45,7 +47,7 @@ namespace JSC {
// CompactJITCodeMap::Encoder encoder(map);
// encoder.append(a, b);
// encoder.append(c, d); // preconditions: c >= a, d >= b
-// auto map = encoder.finish();
+// OwnPtr<CompactJITCodeMap> map = encoder.finish();
//
// At some later time:
//
@@ -78,16 +80,6 @@ struct BytecodeAndMachineOffset {
class CompactJITCodeMap {
WTF_MAKE_FAST_ALLOCATED;
public:
- CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries)
- : m_buffer(buffer)
-#if !ASSERT_DISABLED
- , m_size(size)
-#endif
- , m_numberOfEntries(numberOfEntries)
- {
- UNUSED_PARAM(size);
- }
-
~CompactJITCodeMap()
{
if (m_buffer)
@@ -102,6 +94,16 @@ public:
void decode(Vector<BytecodeAndMachineOffset>& result) const;
private:
+ CompactJITCodeMap(uint8_t* buffer, unsigned size, unsigned numberOfEntries)
+ : m_buffer(buffer)
+#if !ASSERT_DISABLED
+ , m_size(size)
+#endif
+ , m_numberOfEntries(numberOfEntries)
+ {
+ UNUSED_PARAM(size);
+ }
+
uint8_t at(unsigned index) const
{
ASSERT(index < m_size);
@@ -136,8 +138,8 @@ public:
void ensureCapacityFor(unsigned numberOfEntriesToAdd);
void append(unsigned bytecodeIndex, unsigned machineCodeOffset);
- std::unique_ptr<CompactJITCodeMap> finish();
-
+ PassOwnPtr<CompactJITCodeMap> finish();
+
private:
void appendByte(uint8_t value);
void encodeNumber(uint32_t value);
@@ -210,18 +212,18 @@ inline void CompactJITCodeMap::Encoder::append(unsigned bytecodeIndex, unsigned
m_numberOfEntries++;
}
-inline std::unique_ptr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish()
+inline PassOwnPtr<CompactJITCodeMap> CompactJITCodeMap::Encoder::finish()
{
m_capacity = m_size;
m_buffer = static_cast<uint8_t*>(fastRealloc(m_buffer, m_capacity));
- auto result = std::make_unique<CompactJITCodeMap>(m_buffer, m_size, m_numberOfEntries);
+ OwnPtr<CompactJITCodeMap> result = adoptPtr(new CompactJITCodeMap(m_buffer, m_size, m_numberOfEntries));
m_buffer = 0;
m_size = 0;
m_capacity = 0;
m_numberOfEntries = 0;
m_previousBytecodeIndex = 0;
m_previousMachineCodeOffset = 0;
- return result;
+ return result.release();
}
inline void CompactJITCodeMap::Encoder::appendByte(uint8_t value)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp
deleted file mode 100644
index b4f56650b..000000000
--- a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "ExecutableAllocationFuzz.h"
-
-#include "TestRunnerUtils.h"
-#include <wtf/Atomics.h>
-#include <wtf/DataLog.h>
-
-namespace JSC {
-
-static Atomic<unsigned> s_numberOfExecutableAllocationFuzzChecks;
-unsigned numberOfExecutableAllocationFuzzChecks()
-{
- return s_numberOfExecutableAllocationFuzzChecks.load();
-}
-
-ExecutableAllocationFuzzResult doExecutableAllocationFuzzing()
-{
- ASSERT(Options::useExecutableAllocationFuzz());
-
- unsigned oldValue;
- unsigned newValue;
- do {
- oldValue = s_numberOfExecutableAllocationFuzzChecks.load();
- newValue = oldValue + 1;
- } while (!s_numberOfExecutableAllocationFuzzChecks.compareExchangeWeak(oldValue, newValue));
-
- if (newValue == Options::fireExecutableAllocationFuzzAt()) {
- if (Options::verboseExecutableAllocationFuzz()) {
- dataLog("Will pretend to fail executable allocation.\n");
- WTFReportBacktrace();
- }
- return PretendToFailExecutableAllocation;
- }
-
- if (Options::fireExecutableAllocationFuzzAtOrAfter()
- && newValue >= Options::fireExecutableAllocationFuzzAtOrAfter()) {
- if (Options::verboseExecutableAllocationFuzz()) {
- dataLog("Will pretend to fail executable allocation.\n");
- WTFReportBacktrace();
- }
- return PretendToFailExecutableAllocation;
- }
-
- return AllowNormalExecutableAllocation;
-}
-
-} // namespace JSC
-
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h b/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h
deleted file mode 100644
index b15cdef44..000000000
--- a/Source/JavaScriptCore/jit/ExecutableAllocationFuzz.h
+++ /dev/null
@@ -1,51 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef ExecutableAllocationFuzz_h
-#define ExecutableAllocationFuzz_h
-
-#include "Options.h"
-
-namespace JSC {
-
-enum ExecutableAllocationFuzzResult {
- AllowNormalExecutableAllocation,
- PretendToFailExecutableAllocation
-};
-
-ExecutableAllocationFuzzResult doExecutableAllocationFuzzing();
-
-inline ExecutableAllocationFuzzResult doExecutableAllocationFuzzingIfEnabled()
-{
- if (LIKELY(!Options::useExecutableAllocationFuzz()))
- return AllowNormalExecutableAllocation;
-
- return doExecutableAllocationFuzzing();
-}
-
-} // namespace JSC
-
-#endif // ExecutableAllocationFuzz_h
-
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
index 4ede23531..5ac6cc412 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp
@@ -24,17 +24,18 @@
*/
#include "config.h"
-#include "ExecutableAllocator.h"
-#include "JSCInlines.h"
+#include "ExecutableAllocator.h"
#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
#include "CodeProfiling.h"
#include <wtf/HashSet.h>
-#include <wtf/Lock.h>
#include <wtf/MetaAllocator.h>
-#include <wtf/NeverDestroyed.h>
#include <wtf/PageReservation.h>
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#include <wtf/PassOwnPtr.h>
+#endif
+#include <wtf/ThreadingPrimitives.h>
#include <wtf/VMTags.h>
#endif
@@ -56,7 +57,7 @@ public:
DemandExecutableAllocator()
: MetaAllocator(jitAllocationGranule)
{
- std::lock_guard<StaticLock> lock(allocatorsMutex());
+ MutexLocker lock(allocatorsMutex());
allocators().add(this);
// Don't preallocate any memory here.
}
@@ -64,7 +65,7 @@ public:
virtual ~DemandExecutableAllocator()
{
{
- std::lock_guard<StaticLock> lock(allocatorsMutex());
+ MutexLocker lock(allocatorsMutex());
allocators().remove(this);
}
for (unsigned i = 0; i < reservations.size(); ++i)
@@ -74,7 +75,7 @@ public:
static size_t bytesAllocatedByAllAllocators()
{
size_t total = 0;
- std::lock_guard<StaticLock> lock(allocatorsMutex());
+ MutexLocker lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
total += (*allocator)->bytesAllocated();
return total;
@@ -83,7 +84,7 @@ public:
static size_t bytesCommittedByAllocactors()
{
size_t total = 0;
- std::lock_guard<StaticLock> lock(allocatorsMutex());
+ MutexLocker lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
total += (*allocator)->bytesCommitted();
return total;
@@ -92,7 +93,7 @@ public:
#if ENABLE(META_ALLOCATOR_PROFILE)
static void dumpProfileFromAllAllocators()
{
- std::lock_guard<StaticLock> lock(allocatorsMutex());
+ MutexLocker lock(allocatorsMutex());
for (HashSet<DemandExecutableAllocator*>::const_iterator allocator = allocators().begin(); allocator != allocators().end(); ++allocator)
(*allocator)->dumpProfile();
}
@@ -134,14 +135,12 @@ private:
Vector<PageReservation, 16> reservations;
static HashSet<DemandExecutableAllocator*>& allocators()
{
- static NeverDestroyed<HashSet<DemandExecutableAllocator*>> set;
- return set;
+ DEFINE_STATIC_LOCAL(HashSet<DemandExecutableAllocator*>, sAllocators, ());
+ return sAllocators;
}
-
- static StaticLock& allocatorsMutex()
+ static Mutex& allocatorsMutex()
{
- static StaticLock mutex;
-
+ DEFINE_STATIC_LOCAL(Mutex, mutex, ());
return mutex;
}
};
@@ -170,7 +169,7 @@ void ExecutableAllocator::initializeAllocator()
ExecutableAllocator::ExecutableAllocator(VM&)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
- : m_allocator(std::make_unique<DemandExecutableAllocator>())
+ : m_allocator(adoptPtr(new DemandExecutableAllocator()))
#endif
{
ASSERT(allocator());
@@ -213,11 +212,11 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
}
-RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
RefPtr<ExecutableMemoryHandle> result = allocator()->allocate(sizeInBytes, ownerUID);
RELEASE_ASSERT(result || effort != JITCompilationMustSucceed);
- return result;
+ return result.release();
}
size_t ExecutableAllocator::committedByteCount()
@@ -232,16 +231,6 @@ void ExecutableAllocator::dumpProfile()
}
#endif
-Lock& ExecutableAllocator::getLock() const
-{
- return gAllocator->getLock();
-}
-
-bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
-{
- return gAllocator->isInAllocatedMemory(locker, address);
-}
-
#endif // ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h
index 09b768bed..01be7c1aa 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocator.h
+++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h
@@ -29,10 +29,10 @@
#include <stddef.h> // for ptrdiff_t
#include <limits>
#include <wtf/Assertions.h>
-#include <wtf/Lock.h>
#include <wtf/MetaAllocatorHandle.h>
#include <wtf/MetaAllocator.h>
#include <wtf/PageAllocation.h>
+#include <wtf/PassRefPtr.h>
#include <wtf/RefCounted.h>
#include <wtf/Vector.h>
@@ -55,16 +55,44 @@
#include <unistd.h>
#endif
+#if OS(WINCE)
+// From pkfuncs.h (private header file from the Platform Builder)
+#define CACHE_SYNC_ALL 0x07F
+extern "C" __declspec(dllimport) void CacheRangeFlush(LPVOID pAddr, DWORD dwLength, DWORD dwFlags);
+#endif
+
#define JIT_ALLOCATOR_LARGE_ALLOC_SIZE (pageSize() * 4)
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+#define PROTECTION_FLAGS_RW (PROT_READ | PROT_WRITE)
+#define PROTECTION_FLAGS_RX (PROT_READ | PROT_EXEC)
+#define EXECUTABLE_POOL_WRITABLE false
+#else
#define EXECUTABLE_POOL_WRITABLE true
+#endif
namespace JSC {
class VM;
+void releaseExecutableMemory(VM&);
static const unsigned jitAllocationGranule = 32;
+inline size_t roundUpAllocationSize(size_t request, size_t granularity)
+{
+ RELEASE_ASSERT((std::numeric_limits<size_t>::max() - granularity) > request);
+
+ // Round up to next page boundary
+ size_t size = request + (granularity - 1);
+ size = size & ~(granularity - 1);
+ ASSERT(size >= request);
+ return size;
+}
+
+}
+
+namespace JSC {
+
typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle;
#if ENABLE(ASSEMBLER)
@@ -74,20 +102,13 @@ class DemandExecutableAllocator;
#endif
#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
-#if CPU(ARM)
+#if CPU(ARM) || CPU(ARM64)
static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024;
-#elif CPU(ARM64)
-static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
#elif CPU(X86_64)
static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024;
#else
static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024;
#endif
-#if CPU(ARM)
-static const double executablePoolReservationFraction = 0.15;
-#else
-static const double executablePoolReservationFraction = 0.25;
-#endif
extern uintptr_t startOfFixedExecutableMemoryPool;
#endif
@@ -113,13 +134,36 @@ public:
static void dumpProfile() { }
#endif
- RefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
+ PassRefPtr<ExecutableMemoryHandle> allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort);
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void makeWritable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Writable);
+ }
- bool isValidExecutableMemory(const LockHolder&, void* address);
+ static void makeExecutable(void* start, size_t size)
+ {
+ reprotectRegion(start, size, Executable);
+ }
+#else
+ static void makeWritable(void*, size_t) {}
+ static void makeExecutable(void*, size_t) {}
+#endif
static size_t committedByteCount();
- Lock& getLock() const;
+private:
+
+#if ENABLE(ASSEMBLER_WX_EXCLUSIVE)
+ static void reprotectRegion(void*, size_t, ProtectionSetting);
+#if ENABLE(EXECUTABLE_ALLOCATOR_DEMAND)
+ // We create a MetaAllocator for each JS global object.
+ OwnPtr<DemandExecutableAllocator> m_allocator;
+ DemandExecutableAllocator* allocator() { return m_allocator.get(); }
+#endif
+#endif
+
};
#endif // ENABLE(JIT) && ENABLE(ASSEMBLER)
diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
index 8d5018b6c..8e0b77cfc 100644
--- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
+++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,18 +24,14 @@
*/
#include "config.h"
-#include "ExecutableAllocator.h"
-#include "JSCInlines.h"
+#include "ExecutableAllocator.h"
#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED)
#include "CodeProfiling.h"
-#include "ExecutableAllocationFuzz.h"
#include <errno.h>
-#if !PLATFORM(WIN)
#include <unistd.h>
-#endif
#include <wtf/MetaAllocator.h>
#include <wtf/PageReservation.h>
#include <wtf/VMTags.h>
@@ -48,6 +44,11 @@
#include <stdio.h>
#endif
+#if !PLATFORM(IOS) && PLATFORM(MAC) && __MAC_OS_X_VERSION_MIN_REQUIRED < 1090
+// MADV_FREE_REUSABLE does not work for JIT memory on older OSes so use MADV_FREE in that case.
+#define WTF_USE_MADV_FREE_FOR_JIT_MEMORY 1
+#endif
+
using namespace WTF;
namespace JSC {
@@ -60,15 +61,12 @@ public:
FixedVMPoolExecutableAllocator()
: MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes
{
- size_t reservationSize;
- if (Options::jitMemoryReservationSize())
- reservationSize = Options::jitMemoryReservationSize();
- else
- reservationSize = fixedExecutableMemoryPoolSize;
- reservationSize = roundUpToMultipleOf(pageSize(), reservationSize);
- m_reservation = PageReservation::reserveWithGuardPages(reservationSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+ m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true);
+#if !ENABLE(LLINT)
+ RELEASE_ASSERT(m_reservation);
+#endif
if (m_reservation) {
- ASSERT(m_reservation.size() == reservationSize);
+ ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize);
addFreshFreeSpace(m_reservation.base(), m_reservation.size());
startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base());
@@ -154,59 +152,28 @@ double ExecutableAllocator::memoryPressureMultiplier(size_t addedMemoryUsage)
MetaAllocator::Statistics statistics = allocator->currentStatistics();
ASSERT(statistics.bytesAllocated <= statistics.bytesReserved);
size_t bytesAllocated = statistics.bytesAllocated + addedMemoryUsage;
- size_t bytesAvailable = static_cast<size_t>(
- statistics.bytesReserved * (1 - executablePoolReservationFraction));
- if (bytesAllocated >= bytesAvailable)
- bytesAllocated = bytesAvailable;
+ if (bytesAllocated >= statistics.bytesReserved)
+ bytesAllocated = statistics.bytesReserved;
double result = 1.0;
- size_t divisor = bytesAvailable - bytesAllocated;
+ size_t divisor = statistics.bytesReserved - bytesAllocated;
if (divisor)
- result = static_cast<double>(bytesAvailable) / divisor;
+ result = static_cast<double>(statistics.bytesReserved) / divisor;
if (result < 1.0)
result = 1.0;
return result;
}
-RefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM&, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
+PassRefPtr<ExecutableMemoryHandle> ExecutableAllocator::allocate(VM& vm, size_t sizeInBytes, void* ownerUID, JITCompilationEffort effort)
{
- if (effort != JITCompilationCanFail && Options::reportMustSucceedExecutableAllocations()) {
- dataLog("Allocating ", sizeInBytes, " bytes of executable memory with JITCompilationMustSucceed.\n");
- WTFReportBacktrace();
- }
-
- if (effort == JITCompilationCanFail
- && doExecutableAllocationFuzzingIfEnabled() == PretendToFailExecutableAllocation)
- return nullptr;
-
- if (effort == JITCompilationCanFail) {
- // Don't allow allocations if we are down to reserve.
- MetaAllocator::Statistics statistics = allocator->currentStatistics();
- size_t bytesAllocated = statistics.bytesAllocated + sizeInBytes;
- size_t bytesAvailable = static_cast<size_t>(
- statistics.bytesReserved * (1 - executablePoolReservationFraction));
- if (bytesAllocated > bytesAvailable)
- return nullptr;
- }
-
RefPtr<ExecutableMemoryHandle> result = allocator->allocate(sizeInBytes, ownerUID);
if (!result) {
- if (effort != JITCompilationCanFail) {
- dataLog("Ran out of executable memory while allocating ", sizeInBytes, " bytes.\n");
- CRASH();
- }
- return nullptr;
+ if (effort == JITCompilationCanFail)
+ return result;
+ releaseExecutableMemory(vm);
+ result = allocator->allocate(sizeInBytes, ownerUID);
+ RELEASE_ASSERT(result);
}
- return result;
-}
-
-bool ExecutableAllocator::isValidExecutableMemory(const LockHolder& locker, void* address)
-{
- return allocator->isInAllocatedMemory(locker, address);
-}
-
-Lock& ExecutableAllocator::getLock() const
-{
- return allocator->getLock();
+ return result.release();
}
size_t ExecutableAllocator::committedByteCount()
diff --git a/Source/JavaScriptCore/jit/FPRInfo.h b/Source/JavaScriptCore/jit/FPRInfo.h
index a19a1ac38..5bb0e16cc 100644
--- a/Source/JavaScriptCore/jit/FPRInfo.h
+++ b/Source/JavaScriptCore/jit/FPRInfo.h
@@ -42,7 +42,6 @@ class FPRInfo {
public:
typedef FPRReg RegisterType;
static const unsigned numberOfRegisters = 6;
- static const unsigned numberOfArgumentRegisters = 8;
// Temporary registers.
static const FPRReg fpRegT0 = X86Registers::xmm0;
@@ -57,10 +56,6 @@ public:
static const FPRReg argumentFPR1 = X86Registers::xmm1; // fpRegT1
static const FPRReg argumentFPR2 = X86Registers::xmm2; // fpRegT2
static const FPRReg argumentFPR3 = X86Registers::xmm3; // fpRegT3
- static const FPRReg argumentFPR4 = X86Registers::xmm4; // fpRegT4
- static const FPRReg argumentFPR5 = X86Registers::xmm5; // fpRegT5
- static const FPRReg argumentFPR6 = X86Registers::xmm6;
- static const FPRReg argumentFPR7 = X86Registers::xmm7;
#endif
// On X86 the return will actually be on the x87 stack,
// so we'll copy to xmm0 for sanity!
@@ -187,7 +182,6 @@ class FPRInfo {
public:
typedef FPRReg RegisterType;
static const unsigned numberOfRegisters = 23;
- static const unsigned numberOfArgumentRegisters = 8;
// Temporary registers.
// q8-q15 are callee saved, q31 is use by the MacroAssembler as fpTempRegister.
@@ -214,14 +208,6 @@ public:
static const FPRReg fpRegT20 = ARM64Registers::q28;
static const FPRReg fpRegT21 = ARM64Registers::q29;
static const FPRReg fpRegT22 = ARM64Registers::q30;
- static const FPRReg fpRegCS0 = ARM64Registers::q8;
- static const FPRReg fpRegCS1 = ARM64Registers::q9;
- static const FPRReg fpRegCS2 = ARM64Registers::q10;
- static const FPRReg fpRegCS3 = ARM64Registers::q11;
- static const FPRReg fpRegCS4 = ARM64Registers::q12;
- static const FPRReg fpRegCS5 = ARM64Registers::q13;
- static const FPRReg fpRegCS6 = ARM64Registers::q14;
- static const FPRReg fpRegCS7 = ARM64Registers::q15;
static const FPRReg argumentFPR0 = ARM64Registers::q0; // fpRegT0
static const FPRReg argumentFPR1 = ARM64Registers::q1; // fpRegT1
@@ -256,15 +242,10 @@ public:
16, 17, 18, 19, 20, 21, 22, InvalidIndex
};
unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
return result;
}
- static FPRReg toArgumentRegister(unsigned index)
- {
- ASSERT(index < 8);
- return static_cast<FPRReg>(index);
- }
-
static const char* debugName(FPRReg reg)
{
ASSERT(reg != InvalidFPRReg);
@@ -288,16 +269,15 @@ public:
class FPRInfo {
public:
typedef FPRReg RegisterType;
- static const unsigned numberOfRegisters = 7;
+ static const unsigned numberOfRegisters = 6;
// Temporary registers.
static const FPRReg fpRegT0 = MIPSRegisters::f0;
- static const FPRReg fpRegT1 = MIPSRegisters::f2;
- static const FPRReg fpRegT2 = MIPSRegisters::f4;
- static const FPRReg fpRegT3 = MIPSRegisters::f6;
- static const FPRReg fpRegT4 = MIPSRegisters::f8;
- static const FPRReg fpRegT5 = MIPSRegisters::f10;
- static const FPRReg fpRegT6 = MIPSRegisters::f18;
+ static const FPRReg fpRegT1 = MIPSRegisters::f4;
+ static const FPRReg fpRegT2 = MIPSRegisters::f6;
+ static const FPRReg fpRegT3 = MIPSRegisters::f8;
+ static const FPRReg fpRegT4 = MIPSRegisters::f10;
+ static const FPRReg fpRegT5 = MIPSRegisters::f18;
static const FPRReg returnValueFPR = MIPSRegisters::f0;
@@ -307,7 +287,7 @@ public:
static FPRReg toRegister(unsigned index)
{
static const FPRReg registerForIndex[numberOfRegisters] = {
- fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5, fpRegT6 };
+ fpRegT0, fpRegT1, fpRegT2, fpRegT3, fpRegT4, fpRegT5 };
ASSERT(index < numberOfRegisters);
return registerForIndex[index];
@@ -318,13 +298,14 @@ public:
ASSERT(reg != InvalidFPRReg);
ASSERT(reg < 20);
static const unsigned indexForRegister[20] = {
- 0, InvalidIndex, 1, InvalidIndex,
- 2, InvalidIndex, 3, InvalidIndex,
- 4, InvalidIndex, 5, InvalidIndex,
+ 0, InvalidIndex, InvalidIndex, InvalidIndex,
+ 1, InvalidIndex, 2, InvalidIndex,
+ 3, InvalidIndex, 4, InvalidIndex,
InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
- InvalidIndex, InvalidIndex, 6, InvalidIndex,
+ InvalidIndex, InvalidIndex, 5, InvalidIndex,
};
unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
return result;
}
@@ -390,6 +371,7 @@ public:
InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
};
unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
return result;
}
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
index 60c0c5514..f681dd847 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp
@@ -28,25 +28,24 @@
#if ENABLE(JIT)
-#include "CodeBlock.h"
-#include "DFGCommonData.h"
#include "Heap.h"
#include "VM.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "SlotVisitor.h"
#include "Structure.h"
namespace JSC {
GCAwareJITStubRoutine::GCAwareJITStubRoutine(
- const MacroAssemblerCodeRef& code, VM& vm)
+ const MacroAssemblerCodeRef& code, VM& vm, bool isClosureCall)
: JITStubRoutine(code)
, m_mayBeExecuting(false)
, m_isJettisoned(false)
+ , m_isClosureCall(isClosureCall)
{
vm.heap.m_jitStubRoutines.add(this);
}
-
+
GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { }
void GCAwareJITStubRoutine::observeZeroRefCount()
@@ -96,61 +95,29 @@ void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(Slot
visitor.append(&m_object);
}
-
-GCAwareJITStubRoutineWithExceptionHandler::GCAwareJITStubRoutineWithExceptionHandler(
- const MacroAssemblerCodeRef& code, VM& vm,
- CodeBlock* codeBlockForExceptionHandlers, CallSiteIndex exceptionHandlerCallSiteIndex)
- : GCAwareJITStubRoutine(code, vm)
- , m_codeBlockWithExceptionHandler(codeBlockForExceptionHandlers)
- , m_exceptionHandlerCallSiteIndex(exceptionHandlerCallSiteIndex)
-{
- RELEASE_ASSERT(m_codeBlockWithExceptionHandler);
- ASSERT(!!m_codeBlockWithExceptionHandler->handlerForIndex(exceptionHandlerCallSiteIndex.bits()));
-}
-
-void GCAwareJITStubRoutineWithExceptionHandler::aboutToDie()
-{
- m_codeBlockWithExceptionHandler = nullptr;
-}
-
-void GCAwareJITStubRoutineWithExceptionHandler::observeZeroRefCount()
+PassRefPtr<JITStubRoutine> createJITStubRoutine(
+ const MacroAssemblerCodeRef& code,
+ VM& vm,
+ const JSCell*,
+ bool makesCalls)
{
-#if ENABLE(DFG_JIT)
- if (m_codeBlockWithExceptionHandler) {
- m_codeBlockWithExceptionHandler->jitCode()->dfgCommon()->removeCallSiteIndex(m_exceptionHandlerCallSiteIndex);
- m_codeBlockWithExceptionHandler->removeExceptionHandlerForCallSite(m_exceptionHandlerCallSiteIndex);
- m_codeBlockWithExceptionHandler = nullptr;
- }
-#endif
+ if (!makesCalls)
+ return adoptRef(new JITStubRoutine(code));
- Base::observeZeroRefCount();
+ return static_pointer_cast<JITStubRoutine>(
+ adoptRef(new GCAwareJITStubRoutine(code, vm)));
}
-
PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef& code,
VM& vm,
const JSCell* owner,
bool makesCalls,
- JSCell* object,
- CodeBlock* codeBlockForExceptionHandlers,
- CallSiteIndex exceptionHandlerCallSiteIndex)
+ JSCell* object)
{
if (!makesCalls)
return adoptRef(new JITStubRoutine(code));
- if (codeBlockForExceptionHandlers) {
- RELEASE_ASSERT(!object); // We're not a marking stub routine.
- RELEASE_ASSERT(JITCode::isOptimizingJIT(codeBlockForExceptionHandlers->jitType()));
- return static_pointer_cast<JITStubRoutine>(
- adoptRef(new GCAwareJITStubRoutineWithExceptionHandler(code, vm, codeBlockForExceptionHandlers, exceptionHandlerCallSiteIndex)));
- }
-
- if (!object) {
- return static_pointer_cast<JITStubRoutine>(
- adoptRef(new GCAwareJITStubRoutine(code, vm)));
- }
-
return static_pointer_cast<JITStubRoutine>(
adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, vm, owner, object)));
}
diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
index 97d9016d6..03045c5d1 100644
--- a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef GCAwareJITStubRoutine_h
#define GCAwareJITStubRoutine_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "JITStubRoutine.h"
@@ -52,7 +54,7 @@ class JITStubRoutineSet;
// list which does not get reclaimed all at once).
class GCAwareJITStubRoutine : public JITStubRoutine {
public:
- GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&);
+ GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, VM&, bool isClosureCall = false);
virtual ~GCAwareJITStubRoutine();
void markRequiredObjects(SlotVisitor& visitor)
@@ -62,6 +64,8 @@ public:
void deleteFromGC();
+ bool isClosureCall() const { return m_isClosureCall; }
+
protected:
virtual void observeZeroRefCount() override;
@@ -72,6 +76,7 @@ private:
bool m_mayBeExecuting;
bool m_isJettisoned;
+ bool m_isClosureCall;
};
// Use this if you want to mark one additional object during GC if your stub
@@ -89,24 +94,6 @@ private:
WriteBarrier<JSCell> m_object;
};
-
-// The stub has exception handlers in it. So it clears itself from exception
-// handling table when it dies. It also frees space in CodeOrigin table
-// for new exception handlers to use the same CallSiteIndex.
-class GCAwareJITStubRoutineWithExceptionHandler : public GCAwareJITStubRoutine {
-public:
- typedef GCAwareJITStubRoutine Base;
-
- GCAwareJITStubRoutineWithExceptionHandler(const MacroAssemblerCodeRef&, VM&, CodeBlock*, CallSiteIndex);
-
- void aboutToDie() override;
- void observeZeroRefCount() override;
-
-private:
- CodeBlock* m_codeBlockWithExceptionHandler;
- CallSiteIndex m_exceptionHandlerCallSiteIndex;
-};
-
// Helper for easily creating a GC-aware JIT stub routine. For the varargs,
// pass zero or more JSCell*'s. This will either create a JITStubRoutine, a
// GCAwareJITStubRoutine, or an ObjectMarkingGCAwareJITStubRoutine as
@@ -127,14 +114,10 @@ private:
// way.
PassRefPtr<JITStubRoutine> createJITStubRoutine(
+ const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls);
+PassRefPtr<JITStubRoutine> createJITStubRoutine(
const MacroAssemblerCodeRef&, VM&, const JSCell* owner, bool makesCalls,
- JSCell* = nullptr,
- CodeBlock* codeBlockForExceptionHandlers = nullptr, CallSiteIndex exceptionHandlingCallSiteIndex = CallSiteIndex(std::numeric_limits<unsigned>::max()));
-
-// Helper for the creation of simple stub routines that need no help from the GC. Note
-// that codeBlock gets "executed" more than once.
-#define FINALIZE_CODE_FOR_GC_AWARE_STUB(codeBlock, patchBuffer, makesCalls, cell, dataLogFArguments) \
- (createJITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments), *(codeBlock)->vm(), (codeBlock), (makesCalls), (cell)))
+ JSCell*);
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/GPRInfo.cpp b/Source/JavaScriptCore/jit/GPRInfo.cpp
deleted file mode 100644
index 849354854..000000000
--- a/Source/JavaScriptCore/jit/GPRInfo.cpp
+++ /dev/null
@@ -1,42 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "GPRInfo.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-// This is in the .cpp file to work around clang issues.
-#if CPU(X86_64)
-const GPRReg GPRInfo::patchpointScratchRegister = MacroAssembler::s_scratchRegister;
-#elif CPU(ARM64)
-const GPRReg GPRInfo::patchpointScratchRegister = ARM64Registers::ip0;
-#endif
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/GPRInfo.h b/Source/JavaScriptCore/jit/GPRInfo.h
index 14a2ebd3d..393a56b50 100644
--- a/Source/JavaScriptCore/jit/GPRInfo.h
+++ b/Source/JavaScriptCore/jit/GPRInfo.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,16 +27,10 @@
#define GPRInfo_h
#include "MacroAssembler.h"
-#include <array>
#include <wtf/PrintStream.h>
namespace JSC {
-// We use the same conventions in the basline JIT as in the LLint. If you
-// change mappings in the GPRInfo, you should change them in the offlineasm
-// compiler adequately. The register naming conventions are described at the
-// top of the LowLevelInterpreter.asm file.
-
typedef MacroAssembler::RegisterID GPRReg;
#define InvalidGPRReg ((::JSC::GPRReg)-1)
@@ -60,23 +54,12 @@ public:
return JSValueRegs(gpr);
}
- static JSValueRegs withTwoAvailableRegs(GPRReg gpr, GPRReg)
- {
- return JSValueRegs(gpr);
- }
-
bool operator!() const { return m_gpr == InvalidGPRReg; }
- explicit operator bool() const { return m_gpr != InvalidGPRReg; }
-
- bool operator==(JSValueRegs other) { return m_gpr == other.m_gpr; }
- bool operator!=(JSValueRegs other) { return !(*this == other); }
GPRReg gpr() const { return m_gpr; }
GPRReg tagGPR() const { return InvalidGPRReg; }
GPRReg payloadGPR() const { return m_gpr; }
- bool uses(GPRReg gpr) const { return m_gpr == gpr; }
-
private:
GPRReg m_gpr;
};
@@ -115,7 +98,6 @@ public:
}
bool operator!() const { return m_base == InvalidGPRReg; }
- explicit operator bool() const { return m_base != InvalidGPRReg; }
bool isAddress() const { return m_offset != notAddress(); }
@@ -162,29 +144,16 @@ public:
{
}
- static JSValueRegs withTwoAvailableRegs(GPRReg gpr1, GPRReg gpr2)
- {
- return JSValueRegs(gpr1, gpr2);
- }
-
static JSValueRegs payloadOnly(GPRReg gpr)
{
return JSValueRegs(InvalidGPRReg, gpr);
}
- bool operator!() const { return !static_cast<bool>(*this); }
- explicit operator bool() const
+ bool operator!() const
{
- return static_cast<GPRReg>(m_tagGPR) != InvalidGPRReg
- || static_cast<GPRReg>(m_payloadGPR) != InvalidGPRReg;
+ return static_cast<GPRReg>(m_tagGPR) == InvalidGPRReg
+ && static_cast<GPRReg>(m_payloadGPR) == InvalidGPRReg;
}
-
- bool operator==(JSValueRegs other) const
- {
- return m_tagGPR == other.m_tagGPR
- && m_payloadGPR == other.m_payloadGPR;
- }
- bool operator!=(JSValueRegs other) const { return !(*this == other); }
GPRReg tagGPR() const { return static_cast<GPRReg>(m_tagGPR); }
GPRReg payloadGPR() const { return static_cast<GPRReg>(m_payloadGPR); }
@@ -200,8 +169,6 @@ public:
return tagGPR();
}
- bool uses(GPRReg gpr) const { return m_tagGPR == gpr || m_payloadGPR == gpr; }
-
private:
int8_t m_tagGPR;
int8_t m_payloadGPR;
@@ -252,12 +219,11 @@ public:
result.m_tagType = static_cast<int8_t>(JSValue::CellTag);
return result;
}
-
- bool operator!() const { return !static_cast<bool>(*this); }
- explicit operator bool() const
+
+ bool operator!() const
{
- return static_cast<GPRReg>(m_baseOrTag) != InvalidGPRReg
- || static_cast<GPRReg>(m_payload) != InvalidGPRReg;
+ return static_cast<GPRReg>(m_baseOrTag) == InvalidGPRReg
+ && static_cast<GPRReg>(m_payload) == InvalidGPRReg;
}
bool isAddress() const
@@ -314,9 +280,10 @@ private:
};
#endif // USE(JSVALUE32_64)
+// The baseline JIT requires that regT3 be callee-preserved.
+
#if CPU(X86)
#define NUMBER_OF_ARGUMENT_REGISTERS 0u
-#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
class GPRInfo {
public:
@@ -324,20 +291,25 @@ public:
static const unsigned numberOfRegisters = 6;
static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
+ // Note: regT3 is required to be callee-preserved.
+
// Temporary registers.
static const GPRReg regT0 = X86Registers::eax;
static const GPRReg regT1 = X86Registers::edx;
static const GPRReg regT2 = X86Registers::ecx;
- static const GPRReg regT3 = X86Registers::ebx; // Callee-save
- static const GPRReg regT4 = X86Registers::esi; // Callee-save
- static const GPRReg regT5 = X86Registers::edi; // Callee-save
+ static const GPRReg regT3 = X86Registers::ebx;
+ static const GPRReg regT4 = X86Registers::edi;
+ static const GPRReg regT5 = X86Registers::esi;
+ // These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = regT0;
+ static const GPRReg cachedResultRegister2 = regT1;
static const GPRReg callFrameRegister = X86Registers::ebp;
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = X86Registers::ecx; // regT2
static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
- static const GPRReg argumentGPR2 = X86Registers::eax; // regT0
- static const GPRReg argumentGPR3 = X86Registers::ebx; // regT3
static const GPRReg nonArgGPR0 = X86Registers::esi; // regT4
+ static const GPRReg nonArgGPR1 = X86Registers::eax; // regT0
+ static const GPRReg nonArgGPR2 = X86Registers::ebx; // regT3
static const GPRReg returnValueGPR = X86Registers::eax; // regT0
static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
static const GPRReg nonPreservedNonReturnGPR = X86Registers::ecx;
@@ -349,18 +321,13 @@ public:
return registerForIndex[index];
}
- static GPRReg toArgumentRegister(unsigned)
- {
- UNREACHABLE_FOR_PLATFORM();
- return InvalidGPRReg;
- }
-
static unsigned toIndex(GPRReg reg)
{
ASSERT(reg != InvalidGPRReg);
ASSERT(static_cast<int>(reg) < 8);
- static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 4, 5 };
+ static const unsigned indexForRegister[8] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4 };
unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
return result;
}
@@ -383,10 +350,8 @@ public:
#if CPU(X86_64)
#if !OS(WINDOWS)
#define NUMBER_OF_ARGUMENT_REGISTERS 6u
-#define NUMBER_OF_CALLEE_SAVES_REGISTERS 5u
#else
#define NUMBER_OF_ARGUMENT_REGISTERS 4u
-#define NUMBER_OF_CALLEE_SAVES_REGISTERS 7u
#endif
class GPRInfo {
@@ -395,78 +360,50 @@ public:
static const unsigned numberOfRegisters = 11;
static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
+ // Note: regT3 is required to be callee-preserved.
+
// These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = X86Registers::eax;
static const GPRReg callFrameRegister = X86Registers::ebp;
static const GPRReg tagTypeNumberRegister = X86Registers::r14;
static const GPRReg tagMaskRegister = X86Registers::r15;
-
// Temporary registers.
static const GPRReg regT0 = X86Registers::eax;
-#if !OS(WINDOWS)
- static const GPRReg regT1 = X86Registers::esi;
- static const GPRReg regT2 = X86Registers::edx;
- static const GPRReg regT3 = X86Registers::ecx;
- static const GPRReg regT4 = X86Registers::r8;
- static const GPRReg regT5 = X86Registers::r10;
- static const GPRReg regT6 = X86Registers::edi;
- static const GPRReg regT7 = X86Registers::r9;
-#else
static const GPRReg regT1 = X86Registers::edx;
- static const GPRReg regT2 = X86Registers::r8;
- static const GPRReg regT3 = X86Registers::r9;
- static const GPRReg regT4 = X86Registers::r10;
- static const GPRReg regT5 = X86Registers::ecx;
-#endif
-
- static const GPRReg regCS0 = X86Registers::ebx;
-
-#if !OS(WINDOWS)
- static const GPRReg regCS1 = X86Registers::r12;
- static const GPRReg regCS2 = X86Registers::r13;
- static const GPRReg regCS3 = X86Registers::r14;
- static const GPRReg regCS4 = X86Registers::r15;
-#else
- static const GPRReg regCS1 = X86Registers::esi;
- static const GPRReg regCS2 = X86Registers::edi;
- static const GPRReg regCS3 = X86Registers::r12;
- static const GPRReg regCS4 = X86Registers::r13;
- static const GPRReg regCS5 = X86Registers::r14;
- static const GPRReg regCS6 = X86Registers::r15;
-#endif
-
+ static const GPRReg regT2 = X86Registers::ecx;
+ static const GPRReg regT3 = X86Registers::ebx;
+ static const GPRReg regT4 = X86Registers::edi;
+ static const GPRReg regT5 = X86Registers::esi;
+ static const GPRReg regT6 = X86Registers::r8;
+ static const GPRReg regT7 = X86Registers::r9;
+ static const GPRReg regT8 = X86Registers::r10;
+ static const GPRReg regT9 = X86Registers::r12;
+ static const GPRReg regT10 = X86Registers::r13;
// These constants provide the names for the general purpose argument & return value registers.
#if !OS(WINDOWS)
- static const GPRReg argumentGPR0 = X86Registers::edi; // regT6
- static const GPRReg argumentGPR1 = X86Registers::esi; // regT1
- static const GPRReg argumentGPR2 = X86Registers::edx; // regT2
- static const GPRReg argumentGPR3 = X86Registers::ecx; // regT3
- static const GPRReg argumentGPR4 = X86Registers::r8; // regT4
- static const GPRReg argumentGPR5 = X86Registers::r9; // regT7
+ static const GPRReg argumentGPR0 = X86Registers::edi; // regT4
+ static const GPRReg argumentGPR1 = X86Registers::esi; // regT5
+ static const GPRReg argumentGPR2 = X86Registers::edx; // regT1
+ static const GPRReg argumentGPR3 = X86Registers::ecx; // regT2
+ static const GPRReg argumentGPR4 = X86Registers::r8; // regT6
+ static const GPRReg argumentGPR5 = X86Registers::r9; // regT7
#else
- static const GPRReg argumentGPR0 = X86Registers::ecx; // regT5
- static const GPRReg argumentGPR1 = X86Registers::edx; // regT1
- static const GPRReg argumentGPR2 = X86Registers::r8; // regT2
- static const GPRReg argumentGPR3 = X86Registers::r9; // regT3
+ static const GPRReg argumentGPR0 = X86Registers::ecx;
+ static const GPRReg argumentGPR1 = X86Registers::edx;
+ static const GPRReg argumentGPR2 = X86Registers::r8; // regT6
+ static const GPRReg argumentGPR3 = X86Registers::r9; // regT7
#endif
- static const GPRReg nonArgGPR0 = X86Registers::r10; // regT5 (regT4 on Windows)
+ static const GPRReg nonArgGPR0 = X86Registers::r10; // regT8
+ static const GPRReg nonArgGPR1 = X86Registers::ebx; // regT3
+ static const GPRReg nonArgGPR2 = X86Registers::r12; // regT9
static const GPRReg returnValueGPR = X86Registers::eax; // regT0
- static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1 or regT2
- static const GPRReg nonPreservedNonReturnGPR = X86Registers::r10; // regT5 (regT4 on Windows)
- static const GPRReg nonPreservedNonArgumentGPR = X86Registers::r10; // regT5 (regT4 on Windows)
-
- // FIXME: I believe that all uses of this are dead in the sense that it just causes the scratch
- // register allocator to select a different register and potentially spill things. It would be better
- // if we instead had a more explicit way of saying that we don't have a scratch register.
- static const GPRReg patchpointScratchRegister;
+ static const GPRReg returnValueGPR2 = X86Registers::edx; // regT1
+ static const GPRReg nonPreservedNonReturnGPR = X86Registers::esi;
static GPRReg toRegister(unsigned index)
{
ASSERT(index < numberOfRegisters);
-#if !OS(WINDOWS)
- static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regCS0, regCS1, regCS2 };
-#else
- static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regCS0, regCS1, regCS2, regCS3, regCS4 };
-#endif
+ static const GPRReg registerForIndex[numberOfRegisters] = { regT0, regT1, regT2, regT3, regT4, regT5, regT6, regT7, regT8, regT9, regT10 };
return registerForIndex[index];
}
@@ -485,11 +422,7 @@ public:
{
ASSERT(reg != InvalidGPRReg);
ASSERT(static_cast<int>(reg) < 16);
-#if !OS(WINDOWS)
- static const unsigned indexForRegister[16] = { 0, 3, 2, 8, InvalidIndex, InvalidIndex, 1, 6, 4, 7, 5, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
-#else
- static const unsigned indexForRegister[16] = { 0, 5, 1, 6, InvalidIndex, InvalidIndex, 7, 8, 2, 3, 4, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
-#endif
+ static const unsigned indexForRegister[16] = { 0, 2, 1, 3, InvalidIndex, InvalidIndex, 5, 4, 6, 7, 8, InvalidIndex, 9, 10, InvalidIndex, InvalidIndex };
return indexForRegister[reg];
}
@@ -506,16 +439,6 @@ public:
return nameForRegister[reg];
}
- static const std::array<GPRReg, 3>& reservedRegisters()
- {
- static const std::array<GPRReg, 3> reservedRegisters { {
- MacroAssembler::s_scratchRegister,
- tagTypeNumberRegister,
- tagMaskRegister,
- } };
- return reservedRegisters;
- }
-
static const unsigned InvalidIndex = 0xffffffff;
};
@@ -523,7 +446,6 @@ public:
#if CPU(ARM)
#define NUMBER_OF_ARGUMENT_REGISTERS 4u
-#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
class GPRInfo {
public:
@@ -531,11 +453,13 @@ public:
static const unsigned numberOfRegisters = 9;
static const unsigned numberOfArgumentRegisters = NUMBER_OF_ARGUMENT_REGISTERS;
+ // Note: regT3 is required to be callee-preserved.
+
// Temporary registers.
static const GPRReg regT0 = ARMRegisters::r0;
static const GPRReg regT1 = ARMRegisters::r1;
static const GPRReg regT2 = ARMRegisters::r2;
- static const GPRReg regT3 = ARMRegisters::r3;
+ static const GPRReg regT3 = ARMRegisters::r4;
static const GPRReg regT4 = ARMRegisters::r8;
static const GPRReg regT5 = ARMRegisters::r9;
static const GPRReg regT6 = ARMRegisters::r10;
@@ -544,19 +468,22 @@ public:
#else
static const GPRReg regT7 = ARMRegisters::r7;
#endif
- static const GPRReg regT8 = ARMRegisters::r4;
+ static const GPRReg regT8 = ARMRegisters::r3;
// These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = regT0;
+ static const GPRReg cachedResultRegister2 = regT1;
static const GPRReg callFrameRegister = ARMRegisters::fp;
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = ARMRegisters::r0; // regT0
static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1
static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2
- static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT3
- static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT8
+ static const GPRReg argumentGPR3 = ARMRegisters::r3; // regT8
+ static const GPRReg nonArgGPR0 = ARMRegisters::r4; // regT3
static const GPRReg nonArgGPR1 = ARMRegisters::r8; // regT4
+ static const GPRReg nonArgGPR2 = ARMRegisters::r9; // regT5
static const GPRReg returnValueGPR = ARMRegisters::r0; // regT0
static const GPRReg returnValueGPR2 = ARMRegisters::r1; // regT1
- static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5;
+ static const GPRReg nonPreservedNonReturnGPR = ARMRegisters::r5; // regT7
static GPRReg toRegister(unsigned index)
{
@@ -565,24 +492,18 @@ public:
return registerForIndex[index];
}
- static GPRReg toArgumentRegister(unsigned index)
- {
- ASSERT(index < numberOfArgumentRegisters);
- static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 };
- return registerForIndex[index];
- }
-
static unsigned toIndex(GPRReg reg)
{
ASSERT(reg != InvalidGPRReg);
ASSERT(static_cast<int>(reg) < 16);
static const unsigned indexForRegister[16] =
#if CPU(ARM_THUMB2)
- { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ { 0, 1, 2, 8, 3, 9, InvalidIndex, InvalidIndex, 4, 5, 6, 7, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
#else
- { 0, 1, 2, 3, 8, InvalidIndex, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ { 0, 1, 2, 8, 3, 9, InvalidIndex, 7, 4, 5, 6, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
#endif
unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
return result;
}
@@ -606,27 +527,26 @@ public:
#if CPU(ARM64)
#define NUMBER_OF_ARGUMENT_REGISTERS 8u
-// Callee Saves includes x19..x28 and FP registers q8..q15
-#define NUMBER_OF_CALLEE_SAVES_REGISTERS 18u
class GPRInfo {
public:
typedef GPRReg RegisterType;
static const unsigned numberOfRegisters = 16;
- static const unsigned numberOfArgumentRegisters = 8;
+
+ // Note: regT3 is required to be callee-preserved.
// These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = ARM64Registers::x0;
+ static const GPRReg timeoutCheckRegister = ARM64Registers::x26;
static const GPRReg callFrameRegister = ARM64Registers::fp;
static const GPRReg tagTypeNumberRegister = ARM64Registers::x27;
static const GPRReg tagMaskRegister = ARM64Registers::x28;
- static const GPRReg dataTempRegister = MacroAssembler::dataTempRegister;
- static const GPRReg memoryTempRegister = MacroAssembler::memoryTempRegister;
// Temporary registers.
static const GPRReg regT0 = ARM64Registers::x0;
static const GPRReg regT1 = ARM64Registers::x1;
static const GPRReg regT2 = ARM64Registers::x2;
- static const GPRReg regT3 = ARM64Registers::x3;
- static const GPRReg regT4 = ARM64Registers::x4;
+ static const GPRReg regT3 = ARM64Registers::x23;
+ static const GPRReg regT4 = ARM64Registers::x24;
static const GPRReg regT5 = ARM64Registers::x5;
static const GPRReg regT6 = ARM64Registers::x6;
static const GPRReg regT7 = ARM64Registers::x7;
@@ -638,16 +558,6 @@ public:
static const GPRReg regT13 = ARM64Registers::x13;
static const GPRReg regT14 = ARM64Registers::x14;
static const GPRReg regT15 = ARM64Registers::x15;
- static const GPRReg regCS0 = ARM64Registers::x19; // Used by FTL only
- static const GPRReg regCS1 = ARM64Registers::x20; // Used by FTL only
- static const GPRReg regCS2 = ARM64Registers::x21; // Used by FTL only
- static const GPRReg regCS3 = ARM64Registers::x22; // Used by FTL only
- static const GPRReg regCS4 = ARM64Registers::x23; // Used by FTL only
- static const GPRReg regCS5 = ARM64Registers::x24; // Used by FTL only
- static const GPRReg regCS6 = ARM64Registers::x25; // Used by FTL only
- static const GPRReg regCS7 = ARM64Registers::x26;
- static const GPRReg regCS8 = ARM64Registers::x27; // tagTypeNumber
- static const GPRReg regCS9 = ARM64Registers::x28; // tagMask
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = ARM64Registers::x0; // regT0
static const GPRReg argumentGPR1 = ARM64Registers::x1; // regT1
@@ -659,13 +569,12 @@ public:
static const GPRReg argumentGPR7 = ARM64Registers::x7; // regT7
static const GPRReg nonArgGPR0 = ARM64Registers::x8; // regT8
static const GPRReg nonArgGPR1 = ARM64Registers::x9; // regT9
+ static const GPRReg nonArgGPR2 = ARM64Registers::x10; // regT10
static const GPRReg returnValueGPR = ARM64Registers::x0; // regT0
static const GPRReg returnValueGPR2 = ARM64Registers::x1; // regT1
static const GPRReg nonPreservedNonReturnGPR = ARM64Registers::x2;
- static const GPRReg nonPreservedNonArgumentGPR = ARM64Registers::x8;
- static const GPRReg patchpointScratchRegister;
- // GPRReg mapping is direct, the machine register numbers can
+ // GPRReg mapping is direct, the machine regsiter numbers can
// be used directly as indices into the GPR RegisterBank.
COMPILE_ASSERT(ARM64Registers::q0 == 0, q0_is_0);
COMPILE_ASSERT(ARM64Registers::q1 == 1, q1_is_1);
@@ -689,20 +598,12 @@ public:
}
static unsigned toIndex(GPRReg reg)
{
- if (reg > regT15)
- return InvalidIndex;
return (unsigned)reg;
}
- static GPRReg toArgumentRegister(unsigned index)
- {
- ASSERT(index < numberOfArgumentRegisters);
- return toRegister(index);
- }
-
static const char* debugName(GPRReg reg)
{
- ASSERT(reg != InvalidGPRReg);
+ ASSERT(static_cast<unsigned>(reg) != InvalidGPRReg);
ASSERT(static_cast<unsigned>(reg) < 32);
static const char* nameForRegister[32] = {
"r0", "r1", "r2", "r3", "r4", "r5", "r6", "r7",
@@ -713,17 +614,6 @@ public:
return nameForRegister[reg];
}
- static const std::array<GPRReg, 4>& reservedRegisters()
- {
- static const std::array<GPRReg, 4> reservedRegisters { {
- dataTempRegister,
- memoryTempRegister,
- tagTypeNumberRegister,
- tagMaskRegister,
- } };
- return reservedRegisters;
- }
-
static const unsigned InvalidIndex = 0xffffffff;
};
@@ -731,7 +621,6 @@ public:
#if CPU(MIPS)
#define NUMBER_OF_ARGUMENT_REGISTERS 4u
-#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
class GPRInfo {
public:
@@ -741,26 +630,31 @@ public:
// regT0 must be v0 for returning a 32-bit value.
// regT1 must be v1 for returning a pair of 32-bit value.
+ // regT3 must be saved in the callee, so use an S register.
// Temporary registers.
static const GPRReg regT0 = MIPSRegisters::v0;
static const GPRReg regT1 = MIPSRegisters::v1;
- static const GPRReg regT2 = MIPSRegisters::t2;
- static const GPRReg regT3 = MIPSRegisters::t3;
- static const GPRReg regT4 = MIPSRegisters::t4;
- static const GPRReg regT5 = MIPSRegisters::t5;
- static const GPRReg regT6 = MIPSRegisters::t6;
+ static const GPRReg regT2 = MIPSRegisters::t4;
+ static const GPRReg regT3 = MIPSRegisters::s2;
+ static const GPRReg regT4 = MIPSRegisters::t5;
+ static const GPRReg regT5 = MIPSRegisters::t6;
+ static const GPRReg regT6 = MIPSRegisters::s0;
// These registers match the baseline JIT.
+ static const GPRReg cachedResultRegister = regT0;
+ static const GPRReg cachedResultRegister2 = regT1;
static const GPRReg callFrameRegister = MIPSRegisters::fp;
// These constants provide the names for the general purpose argument & return value registers.
static const GPRReg argumentGPR0 = MIPSRegisters::a0;
static const GPRReg argumentGPR1 = MIPSRegisters::a1;
static const GPRReg argumentGPR2 = MIPSRegisters::a2;
static const GPRReg argumentGPR3 = MIPSRegisters::a3;
- static const GPRReg nonArgGPR0 = regT0;
+ static const GPRReg nonArgGPR0 = regT2;
+ static const GPRReg nonArgGPR1 = regT3;
+ static const GPRReg nonArgGPR2 = regT4;
static const GPRReg returnValueGPR = regT0;
static const GPRReg returnValueGPR2 = regT1;
- static const GPRReg nonPreservedNonReturnGPR = regT2;
+ static const GPRReg nonPreservedNonReturnGPR = regT5;
static GPRReg toRegister(unsigned index)
{
@@ -769,24 +663,17 @@ public:
return registerForIndex[index];
}
- static GPRReg toArgumentRegister(unsigned index)
- {
- ASSERT(index < numberOfArgumentRegisters);
- static const GPRReg registerForIndex[numberOfArgumentRegisters] = { argumentGPR0, argumentGPR1, argumentGPR2, argumentGPR3 };
- return registerForIndex[index];
- }
-
static unsigned toIndex(GPRReg reg)
{
ASSERT(reg != InvalidGPRReg);
- ASSERT(reg < 32);
- static const unsigned indexForRegister[32] = {
+ ASSERT(reg < 24);
+ static const unsigned indexForRegister[24] = {
InvalidIndex, InvalidIndex, 0, 1, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
- InvalidIndex, InvalidIndex, 2, 3, 4, 5, 6, InvalidIndex,
- InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex,
- InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
+ InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, 2, 4, 5, InvalidIndex,
+ 6, InvalidIndex, 3, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex
};
unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
return result;
}
@@ -810,7 +697,6 @@ public:
#if CPU(SH4)
#define NUMBER_OF_ARGUMENT_REGISTERS 4u
-#define NUMBER_OF_CALLEE_SAVES_REGISTERS 0u
class GPRInfo {
public:
@@ -822,12 +708,12 @@ public:
// Temporary registers.
static const GPRReg regT0 = SH4Registers::r0;
static const GPRReg regT1 = SH4Registers::r1;
- static const GPRReg regT2 = SH4Registers::r6;
- static const GPRReg regT3 = SH4Registers::r7;
- static const GPRReg regT4 = SH4Registers::r2;
- static const GPRReg regT5 = SH4Registers::r3;
- static const GPRReg regT6 = SH4Registers::r4;
- static const GPRReg regT7 = SH4Registers::r5;
+ static const GPRReg regT2 = SH4Registers::r2;
+ static const GPRReg regT3 = SH4Registers::r10;
+ static const GPRReg regT4 = SH4Registers::r4;
+ static const GPRReg regT5 = SH4Registers::r5;
+ static const GPRReg regT6 = SH4Registers::r6;
+ static const GPRReg regT7 = SH4Registers::r7;
static const GPRReg regT8 = SH4Registers::r8;
static const GPRReg regT9 = SH4Registers::r9;
// These registers match the baseline JIT.
@@ -835,11 +721,13 @@ public:
static const GPRReg cachedResultRegister2 = regT1;
static const GPRReg callFrameRegister = SH4Registers::fp;
// These constants provide the names for the general purpose argument & return value registers.
- static const GPRReg argumentGPR0 = SH4Registers::r4; // regT6
- static const GPRReg argumentGPR1 = SH4Registers::r5; // regT7
- static const GPRReg argumentGPR2 = SH4Registers::r6; // regT2
- static const GPRReg argumentGPR3 = SH4Registers::r7; // regT3
- static const GPRReg nonArgGPR0 = regT4;
+ static const GPRReg argumentGPR0 = regT4;
+ static const GPRReg argumentGPR1 = regT5;
+ static const GPRReg argumentGPR2 = regT6;
+ static const GPRReg argumentGPR3 = regT7;
+ static const GPRReg nonArgGPR0 = regT3;
+ static const GPRReg nonArgGPR1 = regT8;
+ static const GPRReg nonArgGPR2 = regT9;
static const GPRReg returnValueGPR = regT0;
static const GPRReg returnValueGPR2 = regT1;
static const GPRReg nonPreservedNonReturnGPR = regT2;
@@ -855,8 +743,9 @@ public:
{
ASSERT(reg != InvalidGPRReg);
ASSERT(reg < 14);
- static const unsigned indexForRegister[14] = { 0, 1, 4, 5, 6, 7, 2, 3, 8, 9, InvalidIndex, InvalidIndex, InvalidIndex, InvalidIndex };
+ static const unsigned indexForRegister[14] = { 0, 1, 2, InvalidIndex, 4, 5, 6, 7, 8, 9, 3, InvalidIndex, InvalidIndex, InvalidIndex };
unsigned result = indexForRegister[reg];
+ ASSERT(result != InvalidIndex);
return result;
}
@@ -876,20 +765,6 @@ public:
#endif // CPU(SH4)
-inline GPRReg argumentRegisterFor(unsigned argumentIndex)
-{
-#if USE(JSVALUE64)
- if (argumentIndex >= NUMBER_OF_ARGUMENT_REGISTERS)
- return InvalidGPRReg;
-
- return GPRInfo::toArgumentRegister(argumentIndex);
-#else
- UNUSED_PARAM(argumentIndex);
-
- return InvalidGPRReg;
-#endif
-}
-
// The baseline JIT uses "accumulator" style execution with regT0 (for 64-bit)
// and regT0 + regT1 (for 32-bit) serving as the accumulator register(s) for
// passing results of one opcode to the next. Hence:
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
index e8d01916b..528fb2bc4 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.cpp
@@ -29,7 +29,6 @@
#include "CallFrame.h"
#include "JSCJSValueInlines.h"
#include "JSObject.h"
-#include "JSCInlines.h"
#include <wtf/InlineASM.h>
diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h
index 71ff4e5bd..f4c8bc703 100644
--- a/Source/JavaScriptCore/jit/HostCallReturnValue.h
+++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h
@@ -28,6 +28,7 @@
#include "JSCJSValue.h"
#include "MacroAssemblerCodeRef.h"
+#include <wtf/Platform.h>
#if ENABLE(JIT)
@@ -41,7 +42,7 @@ namespace JSC {
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL;
-#if COMPILER(GCC_OR_CLANG)
+#if COMPILER(GCC)
// This is a public declaration only to convince CLANG not to elide it.
extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL;
@@ -51,11 +52,11 @@ inline void initializeHostCallReturnValue()
getHostCallReturnValueWithExecState(0);
}
-#else // COMPILER(GCC_OR_CLANG)
+#else // COMPILER(GCC)
inline void initializeHostCallReturnValue() { }
-#endif // COMPILER(GCC_OR_CLANG)
+#endif // COMPILER(GCC)
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp b/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp
deleted file mode 100644
index 5243b49ea..000000000
--- a/Source/JavaScriptCore/jit/IntrinsicEmitter.cpp
+++ /dev/null
@@ -1,134 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- *
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. Neither the name of Apple Inc. ("Apple") nor the names of
- * its contributors may be used to endorse or promote products derived
- * from this software without specific prior written permission.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
- * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
- * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
- * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
- * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
- * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
- * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "CallFrame.h"
-#include "CodeBlock.h"
-#include "JSArrayBufferView.h"
-#include "JSCJSValueInlines.h"
-#include "JSCellInlines.h"
-#include "PolymorphicAccess.h"
-
-namespace JSC {
-
-typedef CCallHelpers::TrustedImm32 TrustedImm32;
-typedef CCallHelpers::Imm32 Imm32;
-typedef CCallHelpers::TrustedImmPtr TrustedImmPtr;
-typedef CCallHelpers::ImmPtr ImmPtr;
-typedef CCallHelpers::TrustedImm64 TrustedImm64;
-typedef CCallHelpers::Imm64 Imm64;
-
-bool AccessCase::canEmitIntrinsicGetter(JSFunction* getter, Structure* structure)
-{
-
- switch (getter->intrinsic()) {
- case TypedArrayByteOffsetIntrinsic:
- case TypedArrayByteLengthIntrinsic:
- case TypedArrayLengthIntrinsic: {
- TypedArrayType type = structure->classInfo()->typedArrayStorageType;
-
- if (!isTypedView(type))
- return false;
-
- return true;
- }
- default:
- return false;
- }
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-void AccessCase::emitIntrinsicGetter(AccessGenerationState& state)
-{
- CCallHelpers& jit = *state.jit;
- JSValueRegs valueRegs = state.valueRegs;
- GPRReg baseGPR = state.baseGPR;
- GPRReg valueGPR = valueRegs.payloadGPR();
-
- switch (intrinsic()) {
- case TypedArrayLengthIntrinsic: {
- jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
- jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
- state.succeed();
- return;
- }
-
- case TypedArrayByteLengthIntrinsic: {
- TypedArrayType type = structure()->classInfo()->typedArrayStorageType;
-
- jit.load32(MacroAssembler::Address(state.baseGPR, JSArrayBufferView::offsetOfLength()), valueGPR);
-
- if (elementSize(type) > 1) {
- // We can use a bitshift here since we TypedArrays cannot have byteLength that overflows an int32.
- jit.lshift32(valueGPR, Imm32(logElementSize(type)), valueGPR);
- }
-
- jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
- state.succeed();
- return;
- }
-
- case TypedArrayByteOffsetIntrinsic: {
- GPRReg scratchGPR = state.scratchGPR;
-
- CCallHelpers::Jump emptyByteOffset = jit.branch32(
- MacroAssembler::NotEqual,
- MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfMode()),
- TrustedImm32(WastefulTypedArray));
-
- jit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
- jit.loadPtr(MacroAssembler::Address(baseGPR, JSArrayBufferView::offsetOfVector()), valueGPR);
- jit.loadPtr(MacroAssembler::Address(scratchGPR, Butterfly::offsetOfArrayBuffer()), scratchGPR);
- jit.loadPtr(MacroAssembler::Address(scratchGPR, ArrayBuffer::offsetOfData()), scratchGPR);
- jit.subPtr(scratchGPR, valueGPR);
-
- CCallHelpers::Jump done = jit.jump();
-
- emptyByteOffset.link(&jit);
- jit.move(TrustedImmPtr(0), valueGPR);
-
- done.link(&jit);
-
- jit.boxInt32(valueGPR, valueRegs, CCallHelpers::DoNotHaveTagRegisters);
- state.succeed();
- return;
- }
-
- default:
- break;
- }
- RELEASE_ASSERT_NOT_REACHED();
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp
index ac8c132aa..c3508b01d 100644
--- a/Source/JavaScriptCore/jit/JIT.cpp
+++ b/Source/JavaScriptCore/jit/JIT.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,11 +26,15 @@
#include "config.h"
#if ENABLE(JIT)
-
#include "JIT.h"
+// This probably does not belong here; adding here for now as a quick Windows build fix.
+#if ENABLE(ASSEMBLER) && CPU(X86) && !OS(MAC_OS_X)
+#include "MacroAssembler.h"
+JSC::MacroAssemblerX86Common::SSE2CheckState JSC::MacroAssemblerX86Common::s_sse2CheckState = NotCheckedSSE2;
+#endif
+
#include "CodeBlock.h"
-#include "CodeBlockWithJITType.h"
#include "DFGCapabilities.h"
#include "Interpreter.h"
#include "JITInlines.h"
@@ -38,39 +42,45 @@
#include "JSArray.h"
#include "JSFunction.h"
#include "LinkBuffer.h"
-#include "MaxFrameExtentForSlowPathCall.h"
-#include "JSCInlines.h"
-#include "PCToCodeOriginMap.h"
-#include "ProfilerDatabase.h"
+#include "Operations.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
#include "SlowPathCall.h"
-#include "StackAlignment.h"
-#include "TypeProfilerLog.h"
#include <wtf/CryptographicallyRandomNumber.h>
using namespace std;
namespace JSC {
-void ctiPatchCallByReturnAddress(ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
+void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkNearCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction)
{
- MacroAssembler::repatchCall(
- CodeLocationCall(MacroAssemblerCodePtr(returnAddress)),
- newCalleeFunction);
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToTrampoline(returnAddress, newCalleeFunction);
+}
+
+void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction)
+{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchBuffer.relinkCallerToFunction(returnAddress, newCalleeFunction);
}
JIT::JIT(VM* vm, CodeBlock* codeBlock)
: JSInterfaceJIT(vm, codeBlock)
, m_interpreter(vm->interpreter)
, m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0)
- , m_bytecodeOffset(std::numeric_limits<unsigned>::max())
+ , m_bytecodeOffset((unsigned)-1)
, m_getByIdIndex(UINT_MAX)
, m_putByIdIndex(UINT_MAX)
, m_byValInstructionIndex(UINT_MAX)
, m_callLinkInfoIndex(UINT_MAX)
, m_randomGenerator(cryptographicallyRandomNumber())
- , m_pcToCodeOriginMapBuilder(*vm)
, m_canBeOptimized(false)
, m_shouldEmitProfiling(false)
{
@@ -86,41 +96,13 @@ void JIT::emitEnterOptimizationCheck()
skipOptimize.append(branchAdd32(Signed, TrustedImm32(Options::executionCounterIncrementForEntry()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
ASSERT(!m_bytecodeOffset);
-
- copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer();
-
callOperation(operationOptimize, m_bytecodeOffset);
skipOptimize.append(branchTestPtr(Zero, returnValueGPR));
- move(returnValueGPR2, stackPointerRegister);
jump(returnValueGPR);
skipOptimize.link(this);
}
#endif
-void JIT::emitNotifyWrite(WatchpointSet* set)
-{
- if (!set || set->state() == IsInvalidated)
- return;
-
- addSlowCase(branch8(NotEqual, AbsoluteAddress(set->addressOfState()), TrustedImm32(IsInvalidated)));
-}
-
-void JIT::emitNotifyWrite(GPRReg pointerToSet)
-{
- addSlowCase(branch8(NotEqual, Address(pointerToSet, WatchpointSet::offsetOfState()), TrustedImm32(IsInvalidated)));
-}
-
-void JIT::assertStackPointerOffset()
-{
- if (ASSERT_DISABLED)
- return;
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT0);
- Jump ok = branchPtr(Equal, regT0, stackPointerRegister);
- breakpoint();
- ok.link(this);
-}
-
#define NEXT_OPCODE(name) \
m_bytecodeOffset += OPCODE_LENGTH(name); \
break;
@@ -146,9 +128,6 @@ void JIT::assertStackPointerOffset()
void JIT::privateCompileMainPass()
{
- jitAssertTagsInPlace();
- jitAssertArgumentCountSane();
-
Instruction* instructionsBegin = m_codeBlock->instructions().begin();
unsigned instructionCount = m_codeBlock->instructions().size();
@@ -160,8 +139,6 @@ void JIT::privateCompileMainPass()
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
ASSERT_WITH_MESSAGE(m_interpreter->isOpcode(currentInstruction->u.opcode), "privateCompileMainPass gone bad @ %d", m_bytecodeOffset);
- m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
-
#if ENABLE(OPCODE_SAMPLING)
if (m_bytecodeOffset > 0) // Avoid the overhead of sampling op_enter twice.
sampleInstruction(currentInstruction);
@@ -181,12 +158,7 @@ void JIT::privateCompileMainPass()
AbsoluteAddress(m_compilation->executionCounterFor(Profiler::OriginStack(Profiler::Origin(
m_compilation->bytecodes(), m_bytecodeOffset)))->address()));
}
-
- if (Options::eagerlyUpdateTopCallFrame())
- updateTopCallFrame();
- unsigned bytecodeOffset = m_bytecodeOffset;
-
switch (opcodeID) {
DEFINE_SLOW_OP(del_by_val)
DEFINE_SLOW_OP(in)
@@ -195,51 +167,46 @@ void JIT::privateCompileMainPass()
DEFINE_SLOW_OP(greater)
DEFINE_SLOW_OP(greatereq)
DEFINE_SLOW_OP(is_function)
- DEFINE_SLOW_OP(is_object_or_null)
+ DEFINE_SLOW_OP(is_object)
DEFINE_SLOW_OP(typeof)
+ DEFINE_OP(op_touch_entry)
DEFINE_OP(op_add)
DEFINE_OP(op_bitand)
DEFINE_OP(op_bitor)
DEFINE_OP(op_bitxor)
DEFINE_OP(op_call)
- DEFINE_OP(op_tail_call)
DEFINE_OP(op_call_eval)
DEFINE_OP(op_call_varargs)
- DEFINE_OP(op_tail_call_varargs)
- DEFINE_OP(op_construct_varargs)
DEFINE_OP(op_catch)
DEFINE_OP(op_construct)
+ DEFINE_OP(op_get_callee)
DEFINE_OP(op_create_this)
DEFINE_OP(op_to_this)
- DEFINE_OP(op_create_direct_arguments)
- DEFINE_OP(op_create_scoped_arguments)
- DEFINE_OP(op_create_out_of_band_arguments)
- DEFINE_OP(op_copy_rest)
- DEFINE_OP(op_get_rest_length)
- DEFINE_OP(op_check_tdz)
- DEFINE_OP(op_assert)
- DEFINE_OP(op_save)
- DEFINE_OP(op_resume)
+ DEFINE_OP(op_init_lazy_reg)
+ DEFINE_OP(op_create_arguments)
DEFINE_OP(op_debug)
DEFINE_OP(op_del_by_id)
DEFINE_OP(op_div)
DEFINE_OP(op_end)
DEFINE_OP(op_enter)
- DEFINE_OP(op_get_scope)
+ DEFINE_OP(op_create_activation)
DEFINE_OP(op_eq)
DEFINE_OP(op_eq_null)
+ case op_get_by_id_out_of_line:
case op_get_array_length:
DEFINE_OP(op_get_by_id)
+ DEFINE_OP(op_get_arguments_length)
DEFINE_OP(op_get_by_val)
- DEFINE_OP(op_overrides_has_instance)
+ DEFINE_OP(op_get_argument_by_val)
+ DEFINE_OP(op_get_by_pname)
+ DEFINE_OP(op_get_pnames)
+ DEFINE_OP(op_check_has_instance)
DEFINE_OP(op_instanceof)
- DEFINE_OP(op_instanceof_custom)
DEFINE_OP(op_is_undefined)
DEFINE_OP(op_is_boolean)
DEFINE_OP(op_is_number)
DEFINE_OP(op_is_string)
- DEFINE_OP(op_is_object)
DEFINE_OP(op_jeq_null)
DEFINE_OP(op_jfalse)
DEFINE_OP(op_jmp)
@@ -255,9 +222,9 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_jngreatereq)
DEFINE_OP(op_jtrue)
DEFINE_OP(op_loop_hint)
- DEFINE_OP(op_watchdog)
DEFINE_OP(op_lshift)
DEFINE_OP(op_mod)
+ DEFINE_OP(op_captured_mov)
DEFINE_OP(op_mov)
DEFINE_OP(op_mul)
DEFINE_OP(op_negate)
@@ -267,34 +234,36 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_new_array_with_size)
DEFINE_OP(op_new_array_buffer)
DEFINE_OP(op_new_func)
+ DEFINE_OP(op_new_captured_func)
DEFINE_OP(op_new_func_exp)
- DEFINE_OP(op_new_generator_func)
- DEFINE_OP(op_new_generator_func_exp)
- DEFINE_OP(op_new_arrow_func_exp)
DEFINE_OP(op_new_object)
DEFINE_OP(op_new_regexp)
+ DEFINE_OP(op_next_pname)
DEFINE_OP(op_not)
DEFINE_OP(op_nstricteq)
+ DEFINE_OP(op_pop_scope)
DEFINE_OP(op_dec)
DEFINE_OP(op_inc)
DEFINE_OP(op_profile_did_call)
DEFINE_OP(op_profile_will_call)
- DEFINE_OP(op_profile_type)
- DEFINE_OP(op_profile_control_flow)
+ DEFINE_OP(op_push_name_scope)
DEFINE_OP(op_push_with_scope)
- DEFINE_OP(op_create_lexical_environment)
- DEFINE_OP(op_get_parent_scope)
+ case op_put_by_id_out_of_line:
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line:
DEFINE_OP(op_put_by_id)
DEFINE_OP(op_put_by_index)
case op_put_by_val_direct:
DEFINE_OP(op_put_by_val)
- DEFINE_OP(op_put_getter_by_id)
- DEFINE_OP(op_put_setter_by_id)
- DEFINE_OP(op_put_getter_setter_by_id)
- DEFINE_OP(op_put_getter_by_val)
- DEFINE_OP(op_put_setter_by_val)
+ DEFINE_OP(op_put_getter_setter)
+ case op_init_global_const_nop:
+ NEXT_OPCODE(op_init_global_const_nop);
+ DEFINE_OP(op_init_global_const)
DEFINE_OP(op_ret)
+ DEFINE_OP(op_ret_object_or_this)
DEFINE_OP(op_rshift)
DEFINE_OP(op_unsigned)
DEFINE_OP(op_urshift)
@@ -304,40 +273,40 @@ void JIT::privateCompileMainPass()
DEFINE_OP(op_switch_char)
DEFINE_OP(op_switch_imm)
DEFINE_OP(op_switch_string)
+ DEFINE_OP(op_tear_off_activation)
+ DEFINE_OP(op_tear_off_arguments)
DEFINE_OP(op_throw)
DEFINE_OP(op_throw_static_error)
DEFINE_OP(op_to_number)
- DEFINE_OP(op_to_string)
DEFINE_OP(op_to_primitive)
DEFINE_OP(op_resolve_scope)
DEFINE_OP(op_get_from_scope)
DEFINE_OP(op_put_to_scope)
- DEFINE_OP(op_get_from_arguments)
- DEFINE_OP(op_put_to_arguments)
-
- DEFINE_OP(op_get_enumerable_length)
- DEFINE_OP(op_has_generic_property)
- DEFINE_OP(op_has_structure_property)
- DEFINE_OP(op_has_indexed_property)
- DEFINE_OP(op_get_direct_pname)
- DEFINE_OP(op_get_property_enumerator)
- DEFINE_OP(op_enumerator_structure_pname)
- DEFINE_OP(op_enumerator_generic_pname)
- DEFINE_OP(op_to_index_string)
- default:
+
+ case op_get_by_id_chain:
+ case op_get_by_id_generic:
+ case op_get_by_id_proto:
+ case op_get_by_id_self:
+ case op_get_by_id_getter_chain:
+ case op_get_by_id_getter_proto:
+ case op_get_by_id_getter_self:
+ case op_get_by_id_custom_chain:
+ case op_get_by_id_custom_proto:
+ case op_get_by_id_custom_self:
+ case op_get_string_length:
+ case op_put_by_id_generic:
+ case op_put_by_id_replace:
+ case op_put_by_id_transition:
RELEASE_ASSERT_NOT_REACHED();
}
-
- if (false)
- dataLog("At ", bytecodeOffset, ": ", m_slowCases.size(), "\n");
}
- RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = std::numeric_limits<unsigned>::max();
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
@@ -369,8 +338,6 @@ void JIT::privateCompileSlowCases()
for (Vector<SlowCaseEntry>::iterator iter = m_slowCases.begin(); iter != m_slowCases.end();) {
m_bytecodeOffset = iter->to;
- m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(m_bytecodeOffset));
-
unsigned firstTo = m_bytecodeOffset;
Instruction* currentInstruction = instructionsBegin + m_bytecodeOffset;
@@ -392,22 +359,24 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_bitor)
DEFINE_SLOWCASE_OP(op_bitxor)
DEFINE_SLOWCASE_OP(op_call)
- DEFINE_SLOWCASE_OP(op_tail_call)
DEFINE_SLOWCASE_OP(op_call_eval)
DEFINE_SLOWCASE_OP(op_call_varargs)
- DEFINE_SLOWCASE_OP(op_tail_call_varargs)
- DEFINE_SLOWCASE_OP(op_construct_varargs)
DEFINE_SLOWCASE_OP(op_construct)
DEFINE_SLOWCASE_OP(op_to_this)
- DEFINE_SLOWCASE_OP(op_check_tdz)
DEFINE_SLOWCASE_OP(op_create_this)
+ DEFINE_SLOWCASE_OP(op_captured_mov)
DEFINE_SLOWCASE_OP(op_div)
DEFINE_SLOWCASE_OP(op_eq)
+ DEFINE_SLOWCASE_OP(op_get_callee)
+ case op_get_by_id_out_of_line:
case op_get_array_length:
DEFINE_SLOWCASE_OP(op_get_by_id)
+ DEFINE_SLOWCASE_OP(op_get_arguments_length)
DEFINE_SLOWCASE_OP(op_get_by_val)
+ DEFINE_SLOWCASE_OP(op_get_argument_by_val)
+ DEFINE_SLOWCASE_OP(op_get_by_pname)
+ DEFINE_SLOWCASE_OP(op_check_has_instance)
DEFINE_SLOWCASE_OP(op_instanceof)
- DEFINE_SLOWCASE_OP(op_instanceof_custom)
DEFINE_SLOWCASE_OP(op_jfalse)
DEFINE_SLOWCASE_OP(op_jless)
DEFINE_SLOWCASE_OP(op_jlesseq)
@@ -419,7 +388,6 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_jngreatereq)
DEFINE_SLOWCASE_OP(op_jtrue)
DEFINE_SLOWCASE_OP(op_loop_hint)
- DEFINE_SLOWCASE_OP(op_watchdog)
DEFINE_SLOWCASE_OP(op_lshift)
DEFINE_SLOWCASE_OP(op_mod)
DEFINE_SLOWCASE_OP(op_mul)
@@ -430,6 +398,11 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_nstricteq)
DEFINE_SLOWCASE_OP(op_dec)
DEFINE_SLOWCASE_OP(op_inc)
+ case op_put_by_id_out_of_line:
+ case op_put_by_id_transition_direct:
+ case op_put_by_id_transition_normal:
+ case op_put_by_id_transition_direct_out_of_line:
+ case op_put_by_id_transition_normal_out_of_line:
DEFINE_SLOWCASE_OP(op_put_by_id)
case op_put_by_val_direct:
DEFINE_SLOWCASE_OP(op_put_by_val)
@@ -439,11 +412,7 @@ void JIT::privateCompileSlowCases()
DEFINE_SLOWCASE_OP(op_stricteq)
DEFINE_SLOWCASE_OP(op_sub)
DEFINE_SLOWCASE_OP(op_to_number)
- DEFINE_SLOWCASE_OP(op_to_string)
DEFINE_SLOWCASE_OP(op_to_primitive)
- DEFINE_SLOWCASE_OP(op_has_indexed_property)
- DEFINE_SLOWCASE_OP(op_has_structure_property)
- DEFINE_SLOWCASE_OP(op_get_direct_pname)
DEFINE_SLOWCASE_OP(op_resolve_scope)
DEFINE_SLOWCASE_OP(op_get_from_scope)
@@ -453,9 +422,6 @@ void JIT::privateCompileSlowCases()
RELEASE_ASSERT_NOT_REACHED();
}
- if (false)
- dataLog("At ", firstTo, " slow: ", iter - m_slowCases.begin(), "\n");
-
RELEASE_ASSERT_WITH_MESSAGE(iter == m_slowCases.end() || firstTo != iter->to, "Not enough jumps linked in slow case codegen.");
RELEASE_ASSERT_WITH_MESSAGE(firstTo == (iter - 1)->to, "Too many jumps linked in slow case codegen.");
@@ -467,12 +433,12 @@ void JIT::privateCompileSlowCases()
RELEASE_ASSERT(m_getByIdIndex == m_getByIds.size());
RELEASE_ASSERT(m_putByIdIndex == m_putByIds.size());
- RELEASE_ASSERT(m_callLinkInfoIndex == m_callCompilationInfo.size());
+ RELEASE_ASSERT(m_callLinkInfoIndex == m_callStructureStubCompilationInfo.size());
RELEASE_ASSERT(numberOfValueProfiles == m_codeBlock->numberOfValueProfiles());
#ifndef NDEBUG
// Reset this, in order to guard its use with ASSERTs.
- m_bytecodeOffset = std::numeric_limits<unsigned>::max();
+ m_bytecodeOffset = (unsigned)-1;
#endif
}
@@ -485,6 +451,11 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
m_canBeOptimizedOrInlined = false;
m_shouldEmitProfiling = false;
break;
+ case DFG::CanInline:
+ m_canBeOptimized = false;
+ m_canBeOptimizedOrInlined = true;
+ m_shouldEmitProfiling = true;
+ break;
case DFG::CanCompile:
case DFG::CanCompileAndInline:
m_canBeOptimized = true;
@@ -498,7 +469,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
switch (m_codeBlock->codeType()) {
case GlobalCode:
- case ModuleCode:
case EvalCode:
m_codeBlock->m_shouldAlwaysBeInlined = false;
break;
@@ -508,15 +478,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
m_codeBlock->m_shouldAlwaysBeInlined &= canInline(level) && DFG::mightInlineFunction(m_codeBlock);
break;
}
-
- m_codeBlock->setCalleeSaveRegisters(RegisterSet::llintBaselineCalleeSaveRegisters()); // Might be able to remove as this is probably already set to this value.
-
- // This ensures that we have the most up to date type information when performing typecheck optimizations for op_profile_type.
- if (m_vm->typeProfiler())
- m_vm->typeProfilerLog()->processLogEntries(ASCIILiteral("Preparing for JIT compilation."));
- if (Options::dumpDisassembly() || m_vm->m_perBytecodeProfiler)
- m_disassembler = std::make_unique<JITDisassembler>(m_codeBlock);
+ if (Options::showDisassembly() || m_vm->m_perBytecodeProfiler)
+ m_disassembler = adoptPtr(new JITDisassembler(m_codeBlock));
if (m_vm->m_perBytecodeProfiler) {
m_compilation = adoptRef(
new Profiler::Compilation(
@@ -525,8 +489,6 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
m_compilation->addProfiledBytecodes(*m_vm->m_perBytecodeProfiler, m_codeBlock);
}
- m_pcToCodeOriginMapBuilder.appendItem(label(), CodeOrigin(0, nullptr));
-
if (m_disassembler)
m_disassembler->setStartOfCode(label());
@@ -534,8 +496,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
if (m_randomGenerator.getUint32() & 1)
nop();
- emitFunctionPrologue();
- emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ preserveReturnAddressAfterCall(regT2);
+ emitPutReturnPCToCallFrameHeader(regT2);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
Label beginLabel(this);
@@ -544,8 +507,9 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
sampleInstruction(m_codeBlock->instructions().begin());
#endif
+ Jump stackCheck;
if (m_codeBlock->codeType() == FunctionCode) {
- ASSERT(m_bytecodeOffset == std::numeric_limits<unsigned>::max());
+ ASSERT(m_bytecodeOffset == (unsigned)-1);
if (shouldEmitProfiling()) {
for (int argument = 0; argument < m_codeBlock->numParameters(); ++argument) {
// If this is a constructor, then we want to put in a dummy profiling site (to
@@ -562,54 +526,49 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
emitValueProfilingSite(m_codeBlock->valueProfileForArgument(argument));
}
}
- }
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, regT1);
- Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfStackLimit()), regT1);
- move(regT1, stackPointerRegister);
- checkStackPointerAlignment();
-
- emitSaveCalleeSaves();
- emitMaterializeTagCheckRegisters();
+ addPtr(TrustedImm32(virtualRegisterForLocal(frameRegisterCountFor(m_codeBlock)).offset() * sizeof(Register)), callFrameRegister, regT1);
+ stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1);
+ }
+ Label functionBody = label();
+
privateCompileMainPass();
privateCompileLinkPass();
privateCompileSlowCases();
if (m_disassembler)
m_disassembler->setEndOfSlowPath(label());
- m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
-
- stackOverflow.link(this);
- m_bytecodeOffset = 0;
- if (maxFrameExtentForSlowPathCall)
- addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
- callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
Label arityCheck;
if (m_codeBlock->codeType() == FunctionCode) {
+ stackCheck.link(this);
+ m_bytecodeOffset = 0;
+ callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock);
+#ifndef NDEBUG
+ m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
+#endif
+ jump(functionBody);
+
arityCheck = label();
store8(TrustedImm32(0), &m_codeBlock->m_shouldAlwaysBeInlined);
- emitFunctionPrologue();
- emitPutToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ preserveReturnAddressAfterCall(regT2);
+ emitPutReturnPCToCallFrameHeader(regT2);
+ emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
load32(payloadFor(JSStack::ArgumentCount), regT1);
branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this);
m_bytecodeOffset = 0;
- if (maxFrameExtentForSlowPathCall)
- addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck);
- if (maxFrameExtentForSlowPathCall)
- addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
- branchTest32(Zero, returnValueGPR).linkTo(beginLabel, this);
- move(returnValueGPR, GPRInfo::argumentGPR0);
- emitNakedCall(m_vm->getCTIStub(arityFixupGenerator).code());
+ if (returnValueGPR != regT0)
+ move(returnValueGPR, regT0);
+ branchTest32(Zero, regT0).linkTo(beginLabel, this);
+ emitNakedCall(m_vm->getCTIStub(arityFixup).code());
#if !ASSERT_DISABLED
- m_bytecodeOffset = std::numeric_limits<unsigned>::max(); // Reset this, in order to guard its use with ASSERTs.
+ m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs.
#endif
jump(beginLabel);
@@ -621,10 +580,8 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
if (m_disassembler)
m_disassembler->setEndOfCode(label());
- m_pcToCodeOriginMapBuilder.appendItem(label(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
-
- LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock, effort);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock, effort);
if (patchBuffer.didFailToAllocate())
return CompilationFailed;
@@ -671,69 +628,67 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
for (unsigned i = m_putByIds.size(); i--;)
m_putByIds[i].finalize(patchBuffer);
- for (const auto& byValCompilationInfo : m_byValCompilationInfo) {
- PatchableJump patchableNotIndexJump = byValCompilationInfo.notIndexJump;
- CodeLocationJump notIndexJump = CodeLocationJump();
- if (Jump(patchableNotIndexJump).isSet())
- notIndexJump = CodeLocationJump(patchBuffer.locationOf(patchableNotIndexJump));
- CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(byValCompilationInfo.badTypeJump));
- CodeLocationLabel doneTarget = patchBuffer.locationOf(byValCompilationInfo.doneTarget);
- CodeLocationLabel nextHotPathTarget = patchBuffer.locationOf(byValCompilationInfo.nextHotPathTarget);
- CodeLocationLabel slowPathTarget = patchBuffer.locationOf(byValCompilationInfo.slowPathTarget);
- CodeLocationCall returnAddress = patchBuffer.locationOf(byValCompilationInfo.returnAddress);
-
- *byValCompilationInfo.byValInfo = ByValInfo(
- byValCompilationInfo.bytecodeIndex,
- notIndexJump,
+ m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size());
+ for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) {
+ CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump));
+ CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget);
+ CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget);
+ CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress);
+
+ m_codeBlock->byValInfo(i) = ByValInfo(
+ m_byValCompilationInfo[i].bytecodeIndex,
badTypeJump,
- byValCompilationInfo.arrayMode,
- byValCompilationInfo.arrayProfile,
+ m_byValCompilationInfo[i].arrayMode,
differenceBetweenCodePtr(badTypeJump, doneTarget),
- differenceBetweenCodePtr(badTypeJump, nextHotPathTarget),
differenceBetweenCodePtr(returnAddress, slowPathTarget));
}
- for (unsigned i = 0; i < m_callCompilationInfo.size(); ++i) {
- CallCompilationInfo& compilationInfo = m_callCompilationInfo[i];
- CallLinkInfo& info = *compilationInfo.callLinkInfo;
- info.setCallLocations(patchBuffer.locationOfNearCall(compilationInfo.callReturnLocation),
- patchBuffer.locationOf(compilationInfo.hotPathBegin),
- patchBuffer.locationOfNearCall(compilationInfo.hotPathOther));
+ m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size());
+ for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) {
+ CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
+ info.callType = m_callStructureStubCompilationInfo[i].callType;
+ info.codeOrigin = CodeOrigin(m_callStructureStubCompilationInfo[i].bytecodeIndex);
+ info.callReturnLocation = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].callReturnLocation);
+ info.hotPathBegin = patchBuffer.locationOf(m_callStructureStubCompilationInfo[i].hotPathBegin);
+ info.hotPathOther = patchBuffer.locationOfNearCall(m_callStructureStubCompilationInfo[i].hotPathOther);
+ info.calleeGPR = regT0;
}
- CompactJITCodeMap::Encoder jitCodeMapEncoder;
- for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
- if (m_labels[bytecodeOffset].isSet())
- jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
+#if ENABLE(DFG_JIT) || ENABLE(LLINT)
+ if (canBeOptimizedOrInlined()
+#if ENABLE(LLINT)
+ || true
+#endif
+ ) {
+ CompactJITCodeMap::Encoder jitCodeMapEncoder;
+ for (unsigned bytecodeOffset = 0; bytecodeOffset < m_labels.size(); ++bytecodeOffset) {
+ if (m_labels[bytecodeOffset].isSet())
+ jitCodeMapEncoder.append(bytecodeOffset, patchBuffer.offsetOf(m_labels[bytecodeOffset]));
+ }
+ m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
}
- m_codeBlock->setJITCodeMap(jitCodeMapEncoder.finish());
+#endif
MacroAssemblerCodePtr withArityCheck;
if (m_codeBlock->codeType() == FunctionCode)
withArityCheck = patchBuffer.locationOf(arityCheck);
- if (Options::dumpDisassembly()) {
+ if (Options::showDisassembly())
m_disassembler->dump(patchBuffer);
- patchBuffer.didAlreadyDisassemble();
- }
if (m_compilation) {
m_disassembler->reportToProfiler(m_compilation.get(), patchBuffer);
m_vm->m_perBytecodeProfiler->addCompilation(m_compilation);
}
-
- if (m_pcToCodeOriginMapBuilder.didBuildMapping())
- m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), patchBuffer));
- CodeRef result = FINALIZE_CODE(
- patchBuffer,
- ("Baseline JIT code for %s", toCString(CodeBlockWithJITType(m_codeBlock, JITCode::BaselineJIT)).data()));
+ CodeRef result = patchBuffer.finalizeCodeWithoutDisassembly();
m_vm->machineCodeBytesPerBytecodeWordForBaselineJIT.add(
static_cast<double>(result.size()) /
static_cast<double>(m_codeBlock->instructions().size()));
-
+
m_codeBlock->shrinkToFit(CodeBlock::LateShrink);
m_codeBlock->setJITCode(
- adoptRef(new DirectJITCode(result, withArityCheck, JITCode::BaselineJIT)));
+ adoptRef(new DirectJITCode(result, JITCode::BaselineJIT)),
+ withArityCheck);
#if ENABLE(JIT_VERBOSE)
dataLogF("JIT generated code for %p at [%p, %p).\n", m_codeBlock, result.executableMemory()->start(), result.executableMemory()->end());
@@ -742,57 +697,72 @@ CompilationResult JIT::privateCompile(JITCompilationEffort effort)
return CompilationSuccessful;
}
-void JIT::privateCompileExceptionHandlers()
+void JIT::linkFor(ExecState* exec, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, JIT::CodePtr code, CallLinkInfo* callLinkInfo, VM* vm, CodeSpecializationKind kind)
{
- if (!m_exceptionChecksWithCallFrameRollback.empty()) {
- m_exceptionChecksWithCallFrameRollback.link(this);
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ ASSERT(!callLinkInfo->isLinked());
+ callLinkInfo->callee.set(*vm, callLinkInfo->hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo->lastSeenCallee.set(*vm, callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.relink(callLinkInfo->hotPathOther, code);
+
+ if (calleeCodeBlock)
+ calleeCodeBlock->linkIncomingCall(exec, callLinkInfo);
+
+ // Patch the slow patch so we do not continue to try to link.
+ if (kind == CodeForCall) {
+ ASSERT(callLinkInfo->callType == CallLinkInfo::Call
+ || callLinkInfo->callType == CallLinkInfo::CallVarargs);
+ if (callLinkInfo->callType == CallLinkInfo::Call) {
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
+ return;
+ }
- copyCalleeSavesToVMCalleeSavesBuffer();
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
+ return;
+ }
- // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
+ ASSERT(kind == CodeForConstruct);
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
+}
- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+void JIT::linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo* callLinkInfo)
+{
+ RepatchBuffer repatchBuffer(callerCodeBlock);
-#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
-#endif
- m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandlerFromCallerFrame).value()));
- jumpToExceptionHandler();
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, callerCodeBlock->vm()->getCTIStub(virtualCallThunkGenerator).code());
+}
+
+void JIT::privateCompileExceptionHandlers()
+{
+ if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
+ return;
+
+ Jump doLookup;
+
+ if (!m_exceptionChecksWithCallFrameRollback.empty()) {
+ m_exceptionChecksWithCallFrameRollback.link(this);
+ emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0);
+ doLookup = jump();
}
- if (!m_exceptionChecks.empty()) {
+ if (!m_exceptionChecks.empty())
m_exceptionChecks.link(this);
+
+ // lookupExceptionHandler is passed one argument, the exec (the CallFrame*).
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
- copyCalleeSavesToVMCalleeSavesBuffer();
-
- // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
- move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ if (doLookup.isSet())
+ doLookup.link(this);
#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
- poke(GPRInfo::argumentGPR1, 1);
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
#endif
- m_calls.append(CallRecord(call(), std::numeric_limits<unsigned>::max(), FunctionPtr(lookupExceptionHandler).value()));
- jumpToExceptionHandler();
- }
-}
-
-unsigned JIT::frameRegisterCountFor(CodeBlock* codeBlock)
-{
- ASSERT(static_cast<unsigned>(codeBlock->m_numCalleeLocals) == WTF::roundUpToMultipleOf(stackAlignmentRegisters(), static_cast<unsigned>(codeBlock->m_numCalleeLocals)));
-
- return roundLocalRegisterCountForFramePointerOffset(codeBlock->m_numCalleeLocals + maxFrameExtentForSlowPathCallInRegisters);
+ m_calls.append(CallRecord(call(), (unsigned)-1, FunctionPtr(lookupExceptionHandler).value()));
+ jumpToExceptionHandler();
}
-int JIT::stackPointerOffsetFor(CodeBlock* codeBlock)
-{
- return virtualRegisterForLocal(frameRegisterCountFor(codeBlock) - 1).offset();
-}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index e81824268..298075706 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,7 +30,7 @@
// We've run into some problems where changing the size of the class JIT leads to
// performance fluctuations. Try forcing alignment in an attempt to stabalize this.
-#if COMPILER(GCC_OR_CLANG)
+#if COMPILER(GCC)
#define JIT_CLASS_ALIGNMENT __attribute__ ((aligned (32)))
#else
#define JIT_CLASS_ALIGNMENT
@@ -44,8 +44,8 @@
#include "JITDisassembler.h"
#include "JITInlineCacheGenerator.h"
#include "JSInterfaceJIT.h"
+#include "LegacyProfiler.h"
#include "Opcode.h"
-#include "PCToCodeOriginMap.h"
#include "ResultType.h"
#include "SamplingTool.h"
#include "UnusedPointer.h"
@@ -53,10 +53,10 @@
namespace JSC {
class ArrayAllocationProfile;
- class CallLinkInfo;
class CodeBlock;
class FunctionExecutable;
class JIT;
+ class JSPropertyNameIterator;
class Identifier;
class Interpreter;
class JSScope;
@@ -64,12 +64,14 @@ namespace JSC {
class MarkedAllocator;
class Register;
class StructureChain;
- class StructureStubInfo;
+ struct CallLinkInfo;
struct Instruction;
struct OperandTypes;
+ struct PolymorphicAccessStructureList;
struct SimpleJumpTable;
struct StringJumpTable;
+ struct StructureStubInfo;
struct CallRecord {
MacroAssembler::Call from;
@@ -149,38 +151,34 @@ namespace JSC {
struct ByValCompilationInfo {
ByValCompilationInfo() { }
- ByValCompilationInfo(ByValInfo* byValInfo, unsigned bytecodeIndex, MacroAssembler::PatchableJump notIndexJump, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, ArrayProfile* arrayProfile, MacroAssembler::Label doneTarget, MacroAssembler::Label nextHotPathTarget)
- : byValInfo(byValInfo)
- , bytecodeIndex(bytecodeIndex)
- , notIndexJump(notIndexJump)
+ ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget)
+ : bytecodeIndex(bytecodeIndex)
, badTypeJump(badTypeJump)
, arrayMode(arrayMode)
- , arrayProfile(arrayProfile)
, doneTarget(doneTarget)
- , nextHotPathTarget(nextHotPathTarget)
{
}
-
- ByValInfo* byValInfo;
+
unsigned bytecodeIndex;
- MacroAssembler::PatchableJump notIndexJump;
MacroAssembler::PatchableJump badTypeJump;
JITArrayMode arrayMode;
- ArrayProfile* arrayProfile;
MacroAssembler::Label doneTarget;
- MacroAssembler::Label nextHotPathTarget;
MacroAssembler::Label slowPathTarget;
MacroAssembler::Call returnAddress;
};
- struct CallCompilationInfo {
+ struct StructureStubCompilationInfo {
MacroAssembler::DataLabelPtr hotPathBegin;
MacroAssembler::Call hotPathOther;
MacroAssembler::Call callReturnLocation;
- CallLinkInfo* callLinkInfo;
+ CallLinkInfo::CallType callType;
+ unsigned bytecodeIndex;
};
- void ctiPatchCallByReturnAddress(ReturnAddressPtr, FunctionPtr newCalleeFunction);
+ // Near calls can only be patched to other JIT code, regular calls can be patched to JIT code or relinked to stub functions.
+ void ctiPatchNearCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, MacroAssemblerCodePtr newCalleeFunction);
+ void ctiPatchCallByReturnAddress(CodeBlock* codeblock, ReturnAddressPtr returnAddress, FunctionPtr newCalleeFunction);
class JIT : private JSInterfaceJIT {
friend class JITSlowPathCall;
@@ -202,18 +200,18 @@ namespace JSC {
return JIT(vm, codeBlock).privateCompile(effort);
}
- static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
+ static void compileClosureCall(VM* vm, CallLinkInfo* callLinkInfo, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
{
- JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
+ JIT jit(vm, callerCodeBlock);
+ jit.m_bytecodeOffset = callLinkInfo->codeOrigin.bytecodeIndex;
+ jit.privateCompileClosureCall(callLinkInfo, calleeCodeBlock, expectedStructure, expectedExecutable, codePtr);
}
- static void compileGetByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
+ static void compileGetByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
{
JIT jit(vm, codeBlock);
jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompileGetByValWithCachedId(byValInfo, returnAddress, propertyName);
+ jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode);
}
static void compilePutByVal(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -230,31 +228,26 @@ namespace JSC {
jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode);
}
- static void compilePutByValWithCachedId(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
- {
- JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompilePutByValWithCachedId(byValInfo, returnAddress, putKind, propertyName);
- }
-
- static void compileHasIndexedProperty(VM* vm, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
- {
- JIT jit(vm, codeBlock);
- jit.m_bytecodeOffset = byValInfo->bytecodeIndex;
- jit.privateCompileHasIndexedProperty(byValInfo, returnAddress, arrayMode);
- }
-
static CodeRef compileCTINativeCall(VM* vm, NativeFunction func)
{
if (!vm->canUseJIT()) {
+#if ENABLE(LLINT)
return CodeRef::createLLIntCodeRef(llint_native_call_trampoline);
+#else
+ return CodeRef();
+#endif
}
JIT jit(vm, 0);
return jit.privateCompileCTINativeCall(vm, func);
}
- static unsigned frameRegisterCountFor(CodeBlock*);
- static int stackPointerOffsetFor(CodeBlock*);
+ static void linkFor(ExecState*, JSFunction* callee, CodeBlock* callerCodeBlock, CodeBlock* calleeCodeBlock, CodePtr, CallLinkInfo*, VM*, CodeSpecializationKind);
+ static void linkSlowCall(CodeBlock* callerCodeBlock, CallLinkInfo*);
+
+ static unsigned frameRegisterCountFor(CodeBlock* codeBlock)
+ {
+ return codeBlock->m_numCalleeRegisters;
+ }
private:
JIT(VM*, CodeBlock* = 0);
@@ -264,12 +257,10 @@ namespace JSC {
void privateCompileSlowCases();
CompilationResult privateCompile(JITCompilationEffort);
+ void privateCompileClosureCall(CallLinkInfo*, CodeBlock* calleeCodeBlock, Structure*, ExecutableBase*, MacroAssemblerCodePtr);
+
void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
- void privateCompileGetByValWithCachedId(ByValInfo*, ReturnAddressPtr, const Identifier&);
void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode);
- void privateCompilePutByValWithCachedId(ByValInfo*, ReturnAddressPtr, PutKind, const Identifier&);
-
- void privateCompileHasIndexedProperty(ByValInfo*, ReturnAddressPtr, JITArrayMode);
Label privateCompileCTINativeCall(VM*, bool isConstruct = false);
CodeRef privateCompileCTINativeCall(VM*, NativeFunction);
@@ -283,15 +274,6 @@ namespace JSC {
return functionCall;
}
-#if OS(WINDOWS) && CPU(X86_64)
- Call appendCallWithSlowPathReturnType(const FunctionPtr& function)
- {
- Call functionCall = callWithSlowPathReturnType();
- m_calls.append(CallRecord(functionCall, m_bytecodeOffset, function.value()));
- return functionCall;
- }
-#endif
-
void exceptionCheck(Jump jumpToHandler)
{
m_exceptionChecks.append(jumpToHandler);
@@ -317,26 +299,34 @@ namespace JSC {
void compileOpCall(OpcodeID, Instruction*, unsigned callLinkInfoIndex);
void compileOpCallSlowCase(OpcodeID, Instruction*, Vector<SlowCaseEntry>::iterator&, unsigned callLinkInfoIndex);
- void compileSetupVarargsFrame(Instruction*, CallLinkInfo*);
+ void compileLoadVarargs(Instruction*);
void compileCallEval(Instruction*);
void compileCallEvalSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitPutCallResult(Instruction*);
enum CompileOpStrictEqType { OpStrictEq, OpNStrictEq };
void compileOpStrictEq(Instruction* instruction, CompileOpStrictEqType type);
- bool isOperandConstantDouble(int src);
+ bool isOperandConstantImmediateDouble(int src);
void emitLoadDouble(int index, FPRegisterID value);
void emitLoadInt32ToDouble(int index, FPRegisterID value);
- Jump emitJumpIfCellObject(RegisterID cellReg);
- Jump emitJumpIfCellNotObject(RegisterID cellReg);
+ Jump emitJumpIfNotObject(RegisterID structureReg);
- enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterBase, ShouldFilterValue, ShouldFilterBaseAndValue };
+ Jump addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, RegisterID scratch);
+ void addStructureTransitionCheck(JSCell*, Structure*, StructureStubInfo*, JumpList& failureCases, RegisterID scratch);
+ void testPrototype(JSValue, JumpList& failureCases, StructureStubInfo*);
+
+ enum WriteBarrierMode { UnconditionalWriteBarrier, ShouldFilterValue, ShouldFilterBaseAndValue };
// value register in write barrier is used before any scratch registers
// so may safely be the same as either of the scratch registers.
+ Jump checkMarkWord(RegisterID owner, RegisterID scratch1, RegisterID scratch2);
+ Jump checkMarkWord(JSCell* owner);
void emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode);
void emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode);
- void emitWriteBarrier(JSCell* owner);
+/*
+ void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch1, RegisterID scratch2, WriteBarrierMode);
+ void emitWriteBarrier(JSCell* owner, RegisterID value, WriteBarrierMode);
+*/
template<typename StructureType> // StructureType can be RegisterID or ImmPtr.
void emitAllocateJSObject(RegisterID allocator, StructureType, RegisterID result, RegisterID scratch);
@@ -346,8 +336,8 @@ namespace JSC {
void emitValueProfilingSite(ValueProfile*);
void emitValueProfilingSite(unsigned bytecodeOffset);
void emitValueProfilingSite();
- void emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile*);
- void emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex);
+ void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*);
+ void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex);
void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*);
void emitArrayProfileOutOfBoundsSpecialCase(ArrayProfile*);
@@ -357,22 +347,14 @@ namespace JSC {
// Property is int-checked and zero extended. Base is cell checked.
// Structure is already profiled. Returns the slow cases. Fall-through
// case contains result in regT0, and it is not yet profiled.
- JumpList emitInt32Load(Instruction* instruction, PatchableJump& badType) { return emitContiguousLoad(instruction, badType, Int32Shape); }
- JumpList emitDoubleLoad(Instruction*, PatchableJump& badType);
- JumpList emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
- JumpList emitArrayStorageLoad(Instruction*, PatchableJump& badType);
- JumpList emitLoadForArrayMode(Instruction*, JITArrayMode, PatchableJump& badType);
-
JumpList emitInt32GetByVal(Instruction* instruction, PatchableJump& badType) { return emitContiguousGetByVal(instruction, badType, Int32Shape); }
JumpList emitDoubleGetByVal(Instruction*, PatchableJump& badType);
JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape = ContiguousShape);
JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType);
- JumpList emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType);
- JumpList emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType);
JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType);
JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType);
- // Property is in regT1, base is in regT0. regT2 contains indecing type.
+ // Property is in regT0, base is in regT0. regT2 contains indecing type.
// The value to store is not yet loaded. Property is int-checked and
// zero-extended. Base is cell checked. Structure is already profiled.
// returns the slow cases.
@@ -392,23 +374,11 @@ namespace JSC {
JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType);
JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType);
JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, TypedArrayType);
-
- // Identifier check helper for GetByVal and PutByVal.
- void emitIdentifierCheck(RegisterID cell, RegisterID scratch, const Identifier&, JumpList& slowCases);
-
- JITGetByIdGenerator emitGetByValWithCachedId(Instruction*, const Identifier&, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases);
- JITPutByIdGenerator emitPutByValWithCachedId(Instruction*, PutKind, const Identifier&, JumpList& doneCases, JumpList& slowCases);
-
+
enum FinalObjectMode { MayBeFinal, KnownNotFinal };
- void emitGetVirtualRegister(int src, JSValueRegs dst);
- void emitPutVirtualRegister(int dst, JSValueRegs src);
-
- int32_t getOperandConstantInt(int src);
- double getOperandConstantDouble(int src);
-
#if USE(JSVALUE32_64)
- bool getOperandConstantInt(int op1, int op2, int& op, int32_t& constant);
+ bool getOperandConstantImmediateInt(int op1, int op2, int& op, int32_t& constant);
void emitLoadTag(int index, RegisterID tag);
void emitLoadPayload(int index, RegisterID payload);
@@ -429,8 +399,13 @@ namespace JSC {
void emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterID tag);
void compileGetByIdHotPath(const Identifier*);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal);
+ void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset);
// Arithmetic opcode helpers
+ void emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType);
void emitSub32Constant(int dst, int op, int32_t constant, ResultType opType);
void emitBinaryDoubleOp(OpcodeID, int dst, int op1, int op2, OperandTypes, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters = true, bool op2IsInRegisters = true);
@@ -450,74 +425,72 @@ namespace JSC {
emitPutVirtualRegister(dst, payload);
}
+ int32_t getConstantOperandImmediateInt(int src);
+
Jump emitJumpIfJSCell(RegisterID);
Jump emitJumpIfBothJSCells(RegisterID, RegisterID, RegisterID);
void emitJumpSlowCaseIfJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID);
void emitJumpSlowCaseIfNotJSCell(RegisterID, int VReg);
- Jump emitJumpIfInt(RegisterID);
- Jump emitJumpIfNotInt(RegisterID);
- Jump emitJumpIfNotInt(RegisterID, RegisterID, RegisterID scratch);
- PatchableJump emitPatchableJumpIfNotInt(RegisterID);
- void emitJumpSlowCaseIfNotInt(RegisterID);
- void emitJumpSlowCaseIfNotNumber(RegisterID);
- void emitJumpSlowCaseIfNotInt(RegisterID, RegisterID, RegisterID scratch);
+ Jump emitJumpIfImmediateInteger(RegisterID);
+ Jump emitJumpIfNotImmediateInteger(RegisterID);
+ Jump emitJumpIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
+ void emitJumpSlowCaseIfNotImmediateInteger(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateNumber(RegisterID);
+ void emitJumpSlowCaseIfNotImmediateIntegers(RegisterID, RegisterID, RegisterID);
- void emitTagBool(RegisterID);
+ void emitFastArithReTagImmediate(RegisterID src, RegisterID dest);
+
+ void emitTagAsBoolImmediate(RegisterID reg);
+ void compileBinaryArithOp(OpcodeID, int dst, int src1, int src2, OperandTypes opi);
+ void compileBinaryArithOpSlowCase(Instruction*, OpcodeID, Vector<SlowCaseEntry>::iterator&, int dst, int src1, int src2, OperandTypes, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase);
void compileGetByIdHotPath(int baseVReg, const Identifier*);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset);
+ void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal);
+ void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset);
#endif // USE(JSVALUE32_64)
void emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, RelationalCondition);
void emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator&);
-
- void assertStackPointerOffset();
+ void emit_op_touch_entry(Instruction*);
void emit_op_add(Instruction*);
void emit_op_bitand(Instruction*);
void emit_op_bitor(Instruction*);
void emit_op_bitxor(Instruction*);
void emit_op_call(Instruction*);
- void emit_op_tail_call(Instruction*);
void emit_op_call_eval(Instruction*);
void emit_op_call_varargs(Instruction*);
- void emit_op_tail_call_varargs(Instruction*);
- void emit_op_construct_varargs(Instruction*);
+ void emit_op_captured_mov(Instruction*);
void emit_op_catch(Instruction*);
void emit_op_construct(Instruction*);
+ void emit_op_get_callee(Instruction*);
void emit_op_create_this(Instruction*);
void emit_op_to_this(Instruction*);
- void emit_op_create_direct_arguments(Instruction*);
- void emit_op_create_scoped_arguments(Instruction*);
- void emit_op_create_out_of_band_arguments(Instruction*);
- void emit_op_copy_rest(Instruction*);
- void emit_op_get_rest_length(Instruction*);
- void emit_op_check_tdz(Instruction*);
- void emit_op_assert(Instruction*);
- void emit_op_save(Instruction*);
- void emit_op_resume(Instruction*);
+ void emit_op_create_arguments(Instruction*);
void emit_op_debug(Instruction*);
void emit_op_del_by_id(Instruction*);
void emit_op_div(Instruction*);
void emit_op_end(Instruction*);
void emit_op_enter(Instruction*);
- void emit_op_get_scope(Instruction*);
+ void emit_op_create_activation(Instruction*);
void emit_op_eq(Instruction*);
void emit_op_eq_null(Instruction*);
void emit_op_get_by_id(Instruction*);
void emit_op_get_arguments_length(Instruction*);
void emit_op_get_by_val(Instruction*);
void emit_op_get_argument_by_val(Instruction*);
+ void emit_op_get_by_pname(Instruction*);
void emit_op_init_lazy_reg(Instruction*);
- void emit_op_overrides_has_instance(Instruction*);
+ void emit_op_check_has_instance(Instruction*);
void emit_op_instanceof(Instruction*);
- void emit_op_instanceof_custom(Instruction*);
void emit_op_is_undefined(Instruction*);
void emit_op_is_boolean(Instruction*);
void emit_op_is_number(Instruction*);
void emit_op_is_string(Instruction*);
- void emit_op_is_object(Instruction*);
void emit_op_jeq_null(Instruction*);
void emit_op_jfalse(Instruction*);
void emit_op_jmp(Instruction*);
@@ -533,7 +506,6 @@ namespace JSC {
void emit_op_jngreatereq(Instruction*);
void emit_op_jtrue(Instruction*);
void emit_op_loop_hint(Instruction*);
- void emit_op_watchdog(Instruction*);
void emit_op_lshift(Instruction*);
void emit_op_mod(Instruction*);
void emit_op_mov(Instruction*);
@@ -545,32 +517,28 @@ namespace JSC {
void emit_op_new_array_with_size(Instruction*);
void emit_op_new_array_buffer(Instruction*);
void emit_op_new_func(Instruction*);
+ void emit_op_new_captured_func(Instruction*);
void emit_op_new_func_exp(Instruction*);
- void emit_op_new_generator_func(Instruction*);
- void emit_op_new_generator_func_exp(Instruction*);
- void emit_op_new_arrow_func_exp(Instruction*);
void emit_op_new_object(Instruction*);
void emit_op_new_regexp(Instruction*);
+ void emit_op_get_pnames(Instruction*);
+ void emit_op_next_pname(Instruction*);
void emit_op_not(Instruction*);
void emit_op_nstricteq(Instruction*);
+ void emit_op_pop_scope(Instruction*);
void emit_op_dec(Instruction*);
void emit_op_inc(Instruction*);
void emit_op_profile_did_call(Instruction*);
void emit_op_profile_will_call(Instruction*);
- void emit_op_profile_type(Instruction*);
- void emit_op_profile_control_flow(Instruction*);
+ void emit_op_push_name_scope(Instruction*);
void emit_op_push_with_scope(Instruction*);
- void emit_op_create_lexical_environment(Instruction*);
- void emit_op_get_parent_scope(Instruction*);
void emit_op_put_by_id(Instruction*);
void emit_op_put_by_index(Instruction*);
void emit_op_put_by_val(Instruction*);
- void emit_op_put_getter_by_id(Instruction*);
- void emit_op_put_setter_by_id(Instruction*);
- void emit_op_put_getter_setter_by_id(Instruction*);
- void emit_op_put_getter_by_val(Instruction*);
- void emit_op_put_setter_by_val(Instruction*);
+ void emit_op_put_getter_setter(Instruction*);
+ void emit_op_init_global_const(Instruction*);
void emit_op_ret(Instruction*);
+ void emit_op_ret_object_or_this(Instruction*);
void emit_op_rshift(Instruction*);
void emit_op_strcat(Instruction*);
void emit_op_stricteq(Instruction*);
@@ -578,39 +546,27 @@ namespace JSC {
void emit_op_switch_char(Instruction*);
void emit_op_switch_imm(Instruction*);
void emit_op_switch_string(Instruction*);
+ void emit_op_tear_off_activation(Instruction*);
void emit_op_tear_off_arguments(Instruction*);
void emit_op_throw(Instruction*);
void emit_op_throw_static_error(Instruction*);
void emit_op_to_number(Instruction*);
- void emit_op_to_string(Instruction*);
void emit_op_to_primitive(Instruction*);
void emit_op_unexpected_load(Instruction*);
void emit_op_unsigned(Instruction*);
void emit_op_urshift(Instruction*);
- void emit_op_get_enumerable_length(Instruction*);
- void emit_op_has_generic_property(Instruction*);
- void emit_op_has_structure_property(Instruction*);
- void emit_op_has_indexed_property(Instruction*);
- void emit_op_get_direct_pname(Instruction*);
- void emit_op_get_property_enumerator(Instruction*);
- void emit_op_enumerator_structure_pname(Instruction*);
- void emit_op_enumerator_generic_pname(Instruction*);
- void emit_op_to_index_string(Instruction*);
void emitSlow_op_add(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitand(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_bitxor(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_tail_call(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_eval(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_tail_call_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_construct_varargs(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_captured_mov(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_construct(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_create_this(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_check_tdz(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_div(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_eq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_callee(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -618,8 +574,9 @@ namespace JSC {
void emitSlow_op_get_arguments_length(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_argument_by_val(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_get_by_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
+ void emitSlow_op_check_has_instance(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_instanceof(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_instanceof_custom(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jfalse(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jless(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jlesseq(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -631,7 +588,6 @@ namespace JSC {
void emitSlow_op_jngreatereq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_jtrue(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_lshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_mul(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -648,19 +604,13 @@ namespace JSC {
void emitSlow_op_stricteq(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_sub(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_number(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_to_string(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_unsigned(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_has_indexed_property(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_has_structure_property(Instruction*, Vector<SlowCaseEntry>::iterator&);
- void emitSlow_op_get_direct_pname(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emit_op_resolve_scope(Instruction*);
void emit_op_get_from_scope(Instruction*);
void emit_op_put_to_scope(Instruction*);
- void emit_op_get_from_arguments(Instruction*);
- void emit_op_put_to_arguments(Instruction*);
void emitSlow_op_resolve_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_get_from_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
void emitSlow_op_put_to_scope(Instruction*, Vector<SlowCaseEntry>::iterator&);
@@ -668,32 +618,33 @@ namespace JSC {
void emitRightShift(Instruction*, bool isUnsigned);
void emitRightShiftSlowCase(Instruction*, Vector<SlowCaseEntry>::iterator&, bool isUnsigned);
- void emitNewFuncCommon(Instruction*);
- void emitNewFuncExprCommon(Instruction*);
void emitVarInjectionCheck(bool needsVarInjectionChecks);
- void emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth);
+ void emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth);
void emitLoadWithStructureCheck(int scope, Structure** structureSlot);
+ void emitGetGlobalProperty(uintptr_t* operandSlot);
+ void emitGetGlobalVar(uintptr_t operand);
+ void emitGetClosureVar(int scope, uintptr_t operand);
+ void emitPutGlobalProperty(uintptr_t* operandSlot, int value);
#if USE(JSVALUE64)
- void emitGetVarFromPointer(JSValue* operand, GPRReg);
- void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg);
+ void emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet*);
#else
- void emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload);
- void emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload);
+ void emitNotifyWrite(RegisterID tag, RegisterID payload, RegisterID scratch, VariableWatchpointSet*);
#endif
- void emitGetClosureVar(int scope, uintptr_t operand);
- void emitNotifyWrite(WatchpointSet*);
- void emitNotifyWrite(GPRReg pointerToSet);
- void emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet*);
- void emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet**);
- void emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet*);
+ void emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet*);
+ void emitPutClosureVar(int scope, uintptr_t operand, int value);
void emitInitRegister(int dst);
void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
+ void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+ void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#if USE(JSVALUE64)
+ void emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
+#endif
JSValue getConstantOperand(int src);
- bool isOperandConstantInt(int src);
- bool isOperandConstantChar(int src);
+ bool isOperandConstantImmediateInt(int src);
+ bool isOperandConstantImmediateChar(int src);
Jump getSlowCase(Vector<SlowCaseEntry>::iterator& iter)
{
@@ -710,13 +661,8 @@ namespace JSC {
++iter;
}
void linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&, int virtualRegisterIndex);
- void linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases,
- Vector<SlowCaseEntry>::iterator&, unsigned bytecodeOffset);
MacroAssembler::Call appendCallWithExceptionCheck(const FunctionPtr&);
-#if OS(WINDOWS) && CPU(X86_64)
- MacroAssembler::Call appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr&);
-#endif
MacroAssembler::Call appendCallWithCallFrameRollbackOnException(const FunctionPtr&);
MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResult(const FunctionPtr&, int);
MacroAssembler::Call appendCallWithExceptionCheckSetJSValueResultWithProfile(const FunctionPtr&, int);
@@ -725,11 +671,9 @@ namespace JSC {
MacroAssembler::Call callOperation(C_JITOperation_E);
MacroAssembler::Call callOperation(C_JITOperation_EO, GPRReg);
- MacroAssembler::Call callOperation(C_JITOperation_EL, GPRReg);
- MacroAssembler::Call callOperation(C_JITOperation_EL, TrustedImmPtr);
MacroAssembler::Call callOperation(C_JITOperation_ESt, Structure*);
MacroAssembler::Call callOperation(C_JITOperation_EZ, int32_t);
- MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, int32_t, int32_t);
+ MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, int32_t);
MacroAssembler::Call callOperation(J_JITOperation_E, int);
MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg);
MacroAssembler::Call callOperation(J_JITOperation_EAapJcpZ, int, ArrayAllocationProfile*, GPRReg, int32_t);
@@ -738,20 +682,12 @@ namespace JSC {
MacroAssembler::Call callOperation(V_JITOperation_EC, JSCell*);
MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg);
#if USE(JSVALUE64)
- MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, StringImpl*);
#else
- MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_ESsiJI, int, StructureStubInfo*, GPRReg, GPRReg, StringImpl*);
#endif
MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, const Identifier*);
MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg);
- MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, ArrayProfile*);
- MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, ByValInfo*);
- MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg);
- MacroAssembler::Call callOperation(C_JITOperation_EJsc, GPRReg);
- MacroAssembler::Call callOperation(J_JITOperation_EJscC, int, GPRReg, JSCell*);
- MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg);
- MacroAssembler::Call callOperation(C_JITOperation_EJscZ, GPRReg, int32_t);
- MacroAssembler::Call callOperation(C_JITOperation_EJscZ, int, GPRReg, int32_t);
#if USE(JSVALUE64)
MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EJJ, int, GPRReg, GPRReg);
#else
@@ -759,89 +695,61 @@ namespace JSC {
#endif
MacroAssembler::Call callOperation(J_JITOperation_EP, int, void*);
MacroAssembler::Call callOperation(WithProfileTag, J_JITOperation_EPc, int, Instruction*);
- MacroAssembler::Call callOperation(J_JITOperation_EPc, int, Instruction*);
MacroAssembler::Call callOperation(J_JITOperation_EZ, int, int32_t);
- MacroAssembler::Call callOperation(J_JITOperation_EZZ, int, int32_t, int32_t);
- MacroAssembler::Call callOperation(P_JITOperation_E);
MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, size_t);
+ MacroAssembler::Call callOperation(P_JITOperation_EZ, int32_t);
MacroAssembler::Call callOperation(S_JITOperation_ECC, RegisterID, RegisterID);
MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID);
MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID);
MacroAssembler::Call callOperation(S_JITOperation_EOJss, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(Sprt_JITOperation_EZ, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_E);
MacroAssembler::Call callOperation(V_JITOperation_EC, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_ECC, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_ECIZC, RegisterID, UniquedStringImpl*, int32_t, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_ECIZCC, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID);
-#if USE(JSVALUE64)
- MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, int32_t, RegisterID);
-#else
- MacroAssembler::Call callOperation(V_JITOperation_ECJZC, RegisterID, RegisterID, RegisterID, int32_t, RegisterID);
-#endif
- MacroAssembler::Call callOperation(J_JITOperation_EE, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID);
- MacroAssembler::Call callOperation(J_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ECICC, RegisterID, const Identifier*, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID);
- MacroAssembler::Call callOperationNoExceptionCheck(Z_JITOperation_E);
#if USE(JSVALUE64)
MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID);
#else
MacroAssembler::Call callOperationNoExceptionCheck(V_JITOperation_EJ, RegisterID, RegisterID);
#endif
+ MacroAssembler::Call callOperation(V_JITOperation_EJIdJJ, RegisterID, const Identifier*, RegisterID, RegisterID);
#if USE(JSVALUE64)
- MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, int32_t, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, UniquedStringImpl*);
- MacroAssembler::Call callOperation(V_JITOperation_ECIZJJ, RegisterID, UniquedStringImpl*, int32_t, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, StringImpl*);
#else
- MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, UniquedStringImpl*);
+ MacroAssembler::Call callOperation(V_JITOperation_ESsiJJI, StructureStubInfo*, RegisterID, RegisterID, RegisterID, RegisterID, StringImpl*);
#endif
MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, ArrayProfile*);
- MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, ByValInfo*);
MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, int32_t, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_EPc, Instruction*);
MacroAssembler::Call callOperation(V_JITOperation_EZ, int32_t);
- MacroAssembler::Call callOperation(V_JITOperation_EZJ, int, GPRReg);
MacroAssembler::Call callOperationWithCallFrameRollbackOnException(J_JITOperation_E);
MacroAssembler::Call callOperationWithCallFrameRollbackOnException(V_JITOperation_ECb, CodeBlock*);
MacroAssembler::Call callOperationWithCallFrameRollbackOnException(Z_JITOperation_E);
#if USE(JSVALUE32_64)
- MacroAssembler::Call callOperation(F_JITOperation_EFJZZ, RegisterID, RegisterID, RegisterID, int32_t, RegisterID);
- MacroAssembler::Call callOperation(Z_JITOperation_EJZZ, GPRReg, GPRReg, int32_t, int32_t);
+ MacroAssembler::Call callOperation(F_JITOperation_EFJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(F_JITOperation_EJZ, GPRReg, GPRReg, int32_t);
MacroAssembler::Call callOperation(J_JITOperation_EAapJ, int, ArrayAllocationProfile*, GPRReg, GPRReg);
MacroAssembler::Call callOperation(J_JITOperation_EJ, int, GPRReg, GPRReg);
MacroAssembler::Call callOperation(J_JITOperation_EJIdc, int, GPRReg, GPRReg, const Identifier*);
MacroAssembler::Call callOperation(J_JITOperation_EJJ, int, GPRReg, GPRReg, GPRReg, GPRReg);
- MacroAssembler::Call callOperation(Z_JITOperation_EJOJ, GPRReg, GPRReg, GPRReg, GPRReg, GPRReg);
- MacroAssembler::Call callOperation(J_JITOperation_EJJAp, int, GPRReg, GPRReg, GPRReg, GPRReg, ArrayProfile*);
- MacroAssembler::Call callOperation(J_JITOperation_EJJBy, int, GPRReg, GPRReg, GPRReg, GPRReg, ByValInfo*);
MacroAssembler::Call callOperation(P_JITOperation_EJS, GPRReg, GPRReg, size_t);
MacroAssembler::Call callOperation(S_JITOperation_EJ, RegisterID, RegisterID);
MacroAssembler::Call callOperation(S_JITOperation_EJJ, RegisterID, RegisterID, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_EZSymtabJ, int, SymbolTable*, RegisterID, RegisterID);
+ MacroAssembler::Call callOperation(V_JITOperation_EIdJZ, const Identifier*, RegisterID, RegisterID, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_EJ, RegisterID, RegisterID);
MacroAssembler::Call callOperation(V_JITOperation_EJJJ, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_EJJJAp, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ArrayProfile*);
- MacroAssembler::Call callOperation(V_JITOperation_EJJJBy, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, RegisterID, ByValInfo*);
MacroAssembler::Call callOperation(V_JITOperation_EJZ, RegisterID, RegisterID, int32_t);
MacroAssembler::Call callOperation(V_JITOperation_EJZJ, RegisterID, RegisterID, int32_t, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(V_JITOperation_EZJ, int32_t, RegisterID, RegisterID);
- MacroAssembler::Call callOperation(J_JITOperation_EJscCJ, int, GPRReg, JSCell*, GPRReg, GPRReg);
#endif
- template<typename SnippetGenerator>
- void emitBitBinaryOpFastPath(Instruction* currentInstruction);
-
- void emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID);
-
Jump checkStructure(RegisterID reg, Structure* structure);
void updateTopCallFrame();
Call emitNakedCall(CodePtr function = CodePtr());
- Call emitNakedTailCall(CodePtr function = CodePtr());
// Loads the character value of a single character string into dst.
void emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures);
@@ -894,7 +802,7 @@ namespace JSC {
Vector<JITGetByIdGenerator> m_getByIds;
Vector<JITPutByIdGenerator> m_putByIds;
Vector<ByValCompilationInfo> m_byValCompilationInfo;
- Vector<CallCompilationInfo> m_callCompilationInfo;
+ Vector<StructureStubCompilationInfo> m_callStructureStubCompilationInfo;
Vector<JumpTable> m_jmpTable;
unsigned m_bytecodeOffset;
@@ -909,13 +817,11 @@ namespace JSC {
unsigned m_byValInstructionIndex;
unsigned m_callLinkInfoIndex;
- std::unique_ptr<JITDisassembler> m_disassembler;
+ OwnPtr<JITDisassembler> m_disassembler;
RefPtr<Profiler::Compilation> m_compilation;
WeakRandom m_randomGenerator;
static CodeRef stringGetByValStubGenerator(VM*);
- PCToCodeOriginMapBuilder m_pcToCodeOriginMapBuilder;
-
bool m_canBeOptimized;
bool m_canBeOptimizedOrInlined;
bool m_shouldEmitProfiling;
diff --git a/Source/JavaScriptCore/jit/JITAddGenerator.cpp b/Source/JavaScriptCore/jit/JITAddGenerator.cpp
deleted file mode 100644
index 5d91a516a..000000000
--- a/Source/JavaScriptCore/jit/JITAddGenerator.cpp
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITAddGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITAddGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(m_scratchGPR != InvalidGPRReg);
- ASSERT(m_scratchGPR != m_left.payloadGPR());
- ASSERT(m_scratchGPR != m_right.payloadGPR());
-#if USE(JSVALUE32_64)
- ASSERT(m_scratchGPR != m_left.tagGPR());
- ASSERT(m_scratchGPR != m_right.tagGPR());
- ASSERT(m_scratchFPR != InvalidFPRReg);
-#endif
-
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
-
- if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber()) {
- ASSERT(!m_didEmitFastPath);
- return;
- }
-
- m_didEmitFastPath = true;
-
- if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
- JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
- SnippetOperand& varOpr = m_leftOperand.isConstInt32() ? m_rightOperand : m_leftOperand;
- SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
-
- // Try to do intVar + intConstant.
- CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(var);
-
- m_slowPathJumpList.append(jit.branchAdd32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR));
-
- jit.boxInt32(m_scratchGPR, m_result);
- m_endJumpList.append(jit.jump());
-
- if (!jit.supportsFloatingPoint()) {
- m_slowPathJumpList.append(notInt32);
- return;
- }
-
- // Try to do doubleVar + double(intConstant).
- notInt32.link(&jit);
- if (!varOpr.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(var, m_scratchGPR));
-
- jit.unboxDoubleNonDestructive(var, m_leftFPR, m_scratchGPR, m_scratchFPR);
-
- jit.move(CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR);
- jit.convertInt32ToDouble(m_scratchGPR, m_rightFPR);
-
- // Fall thru to doubleVar + doubleVar.
-
- } else {
- ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
- CCallHelpers::Jump leftNotInt;
- CCallHelpers::Jump rightNotInt;
-
- // Try to do intVar + intVar.
- leftNotInt = jit.branchIfNotInt32(m_left);
- rightNotInt = jit.branchIfNotInt32(m_right);
-
- m_slowPathJumpList.append(jit.branchAdd32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), m_scratchGPR));
-
- jit.boxInt32(m_scratchGPR, m_result);
- m_endJumpList.append(jit.jump());
-
- if (!jit.supportsFloatingPoint()) {
- m_slowPathJumpList.append(leftNotInt);
- m_slowPathJumpList.append(rightNotInt);
- return;
- }
-
- leftNotInt.link(&jit);
- if (!m_leftOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
- if (!m_rightOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
-
- jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
- CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
-
- jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
- CCallHelpers::Jump rightWasInteger = jit.jump();
-
- rightNotInt.link(&jit);
- if (!m_rightOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
-
- jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
-
- rightIsDouble.link(&jit);
- jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
-
- rightWasInteger.link(&jit);
-
- // Fall thru to doubleVar + doubleVar.
- }
-
- // Do doubleVar + doubleVar.
- jit.addDouble(m_rightFPR, m_leftFPR);
- jit.boxDouble(m_leftFPR, m_result);
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITAddGenerator.h b/Source/JavaScriptCore/jit/JITAddGenerator.h
deleted file mode 100644
index c28db7209..000000000
--- a/Source/JavaScriptCore/jit/JITAddGenerator.h
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITAddGenerator_h
-#define JITAddGenerator_h
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "SnippetOperand.h"
-
-namespace JSC {
-
-class JITAddGenerator {
-public:
- JITAddGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right,
- FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR)
- : m_leftOperand(leftOperand)
- , m_rightOperand(rightOperand)
- , m_result(result)
- , m_left(left)
- , m_right(right)
- , m_leftFPR(leftFPR)
- , m_rightFPR(rightFPR)
- , m_scratchGPR(scratchGPR)
- , m_scratchFPR(scratchFPR)
- {
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
- }
-
- void generateFastPath(CCallHelpers&);
-
- bool didEmitFastPath() const { return m_didEmitFastPath; }
- CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
- CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
-
-private:
- SnippetOperand m_leftOperand;
- SnippetOperand m_rightOperand;
- JSValueRegs m_result;
- JSValueRegs m_left;
- JSValueRegs m_right;
- FPRReg m_leftFPR;
- FPRReg m_rightFPR;
- GPRReg m_scratchGPR;
- FPRReg m_scratchFPR;
- bool m_didEmitFastPath { false };
-
- CCallHelpers::JumpList m_endJumpList;
- CCallHelpers::JumpList m_slowPathJumpList;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITAddGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITArithmetic.cpp b/Source/JavaScriptCore/jit/JITArithmetic.cpp
index 2751720b7..b9c70570c 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,22 +29,13 @@
#include "JIT.h"
#include "CodeBlock.h"
-#include "JITAddGenerator.h"
-#include "JITBitAndGenerator.h"
-#include "JITBitOrGenerator.h"
-#include "JITBitXorGenerator.h"
-#include "JITDivGenerator.h"
#include "JITInlines.h"
-#include "JITLeftShiftGenerator.h"
-#include "JITMulGenerator.h"
-#include "JITNegGenerator.h"
#include "JITOperations.h"
-#include "JITRightShiftGenerator.h"
-#include "JITSubGenerator.h"
+#include "JITStubs.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "ResultType.h"
#include "SamplingTool.h"
#include "SlowPathCall.h"
@@ -198,15 +189,187 @@ void JIT::emitSlow_op_jngreatereq(Instruction* currentInstruction, Vector<SlowCa
#if USE(JSVALUE64)
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+
+ Jump srcNotInt = emitJumpIfNotImmediateInteger(regT0);
+ addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
+ neg32(regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ emitJumpSlowCaseIfNotImmediateNumber(regT0);
+
+ move(TrustedImm64((int64_t)0x8000000000000000ull), regT1);
+ xor64(regT1, regT0);
+
+ end.link(this);
+ emitPutVirtualRegister(dst);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter); // 0x7fffffff check
+ linkSlowCase(iter); // double check
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ // FIXME: would we be better using 'emitJumpSlowCaseIfNotImmediateIntegers'? - we *probably* ought to be consistent.
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ emitFastArithImmToInt(regT0);
+ emitFastArithImmToInt(regT2);
+ lshift32(regT2, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ // isOperandConstantImmediateInt(op2) => 1 SlowCase
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ rshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ if (supportsFloatingPointTruncate()) {
+ Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
+ // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ lhsIsInt.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ } else {
+ // !supportsFloatingPoint() => 2 SlowCases
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ }
+ emitFastArithImmToInt(regT2);
+ rshift32(regT2, regT0);
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter);
+
+ else {
+ if (supportsFloatingPointTruncate()) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ }
+ }
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_rshift);
+ slowPathCall.call();
+}
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ // isOperandConstantImmediateInt(op2) => 1 SlowCase
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ // Mask with 0x1f as per ecma-262 11.7.2 step 7.
+ urshift32(Imm32(getConstantOperandImmediateInt(op2) & 0x1f), regT0);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT2);
+ if (supportsFloatingPointTruncate()) {
+ Jump lhsIsInt = emitJumpIfImmediateInteger(regT0);
+ // supportsFloatingPoint() && USE(JSVALUE64) => 3 SlowCases
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
+ addSlowCase(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ lhsIsInt.link(this);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ } else {
+ // !supportsFloatingPoint() => 2 SlowCases
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+ }
+ emitFastArithImmToInt(regT2);
+ urshift32(regT2, regT0);
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter);
+
+ else {
+ if (supportsFloatingPointTruncate()) {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ } else {
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ }
+ }
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_urshift);
+ slowPathCall.call();
+}
+
void JIT::emit_op_unsigned(Instruction* currentInstruction)
{
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotInt(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
addSlowCase(branch32(LessThan, regT0, TrustedImm32(0)));
- emitTagInt(regT0, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(result, regT0);
}
@@ -226,7 +389,7 @@ void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, Relat
// - constant int immediate to int immediate
// - int immediate to int immediate
- if (isOperandConstantChar(op1)) {
+ if (isOperandConstantImmediateChar(op1)) {
emitGetVirtualRegister(op2, regT0);
addSlowCase(emitJumpIfNotJSCell(regT0));
JumpList failures;
@@ -235,7 +398,7 @@ void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, Relat
addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantChar(op2)) {
+ if (isOperandConstantImmediateChar(op2)) {
emitGetVirtualRegister(op1, regT0);
addSlowCase(emitJumpIfNotJSCell(regT0));
JumpList failures;
@@ -244,20 +407,20 @@ void JIT::emit_compareAndJump(OpcodeID, int op1, int op2, unsigned target, Relat
addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantInt(op2)) {
+ if (isOperandConstantImmediateInt(op2)) {
emitGetVirtualRegister(op1, regT0);
- emitJumpSlowCaseIfNotInt(regT0);
- int32_t op2imm = getOperandConstantInt(op2);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t op2imm = getConstantOperandImmediateInt(op2);
addJump(branch32(condition, regT0, Imm32(op2imm)), target);
- } else if (isOperandConstantInt(op1)) {
+ } else if (isOperandConstantImmediateInt(op1)) {
emitGetVirtualRegister(op2, regT1);
- emitJumpSlowCaseIfNotInt(regT1);
- int32_t op1imm = getOperandConstantInt(op1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ int32_t op1imm = getConstantOperandImmediateInt(op1);
addJump(branch32(commute(condition), regT1, Imm32(op1imm)), target);
} else {
emitGetVirtualRegisters(op1, regT0, op2, regT1);
- emitJumpSlowCaseIfNotInt(regT0);
- emitJumpSlowCaseIfNotInt(regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
addJump(branch32(condition, regT0, regT1), target);
}
@@ -277,7 +440,7 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
// - floating-point number to constant int immediate
// - constant int immediate to floating-point number
// - floating-point number to floating-point number.
- if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
@@ -290,11 +453,11 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
return;
}
- if (isOperandConstantInt(op2)) {
+ if (isOperandConstantImmediateInt(op2)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotNumber(regT0);
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
add64(tagTypeNumberRegister, regT0);
move64ToDouble(regT0, fpRegT0);
@@ -313,11 +476,11 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
emitGetVirtualRegister(op2, regT1);
callOperation(operation, regT0, regT1);
emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
- } else if (isOperandConstantInt(op1)) {
+ } else if (isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotNumber(regT1);
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT1);
add64(tagTypeNumberRegister, regT1);
move64ToDouble(regT1, fpRegT1);
@@ -340,9 +503,9 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
linkSlowCase(iter);
if (supportsFloatingPoint()) {
- Jump fail1 = emitJumpIfNotNumber(regT0);
- Jump fail2 = emitJumpIfNotNumber(regT1);
- Jump fail3 = emitJumpIfInt(regT1);
+ Jump fail1 = emitJumpIfNotImmediateNumber(regT0);
+ Jump fail2 = emitJumpIfNotImmediateNumber(regT1);
+ Jump fail3 = emitJumpIfImmediateInteger(regT1);
add64(tagTypeNumberRegister, regT0);
add64(tagTypeNumberRegister, regT1);
move64ToDouble(regT0, fpRegT0);
@@ -363,14 +526,50 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
}
}
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t imm = getConstantOperandImmediateInt(op1);
+ and64(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ int32_t imm = getConstantOperandImmediateInt(op2);
+ and64(Imm32(imm), regT0);
+ if (imm >= 0)
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+ } else {
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ and64(regT1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ }
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
+ slowPathCall.call();
+}
+
void JIT::emit_op_inc(Instruction* currentInstruction)
{
int srcDst = currentInstruction[1].u.operand;
emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotInt(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
addSlowCase(branchAdd32(Overflow, TrustedImm32(1), regT0));
- emitTagInt(regT0, regT0);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
@@ -387,9 +586,9 @@ void JIT::emit_op_dec(Instruction* currentInstruction)
int srcDst = currentInstruction[1].u.operand;
emitGetVirtualRegister(srcDst, regT0);
- emitJumpSlowCaseIfNotInt(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
addSlowCase(branchSub32(Overflow, TrustedImm32(1), regT0));
- emitTagInt(regT0, regT0);
+ emitFastArithIntToImmNoCheck(regT0, regT0);
emitPutVirtualRegister(srcDst);
}
@@ -413,26 +612,24 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
// Make sure registers are correct for x86 IDIV instructions.
ASSERT(regT0 == X86Registers::eax);
- auto edx = X86Registers::edx;
- auto ecx = X86Registers::ecx;
- ASSERT(regT4 != edx);
- ASSERT(regT4 != ecx);
-
- emitGetVirtualRegisters(op1, regT4, op2, ecx);
- emitJumpSlowCaseIfNotInt(regT4);
- emitJumpSlowCaseIfNotInt(ecx);
-
- move(regT4, regT0);
- addSlowCase(branchTest32(Zero, ecx));
- Jump denominatorNotNeg1 = branch32(NotEqual, ecx, TrustedImm32(-1));
+ ASSERT(regT1 == X86Registers::edx);
+ ASSERT(regT2 == X86Registers::ecx);
+
+ emitGetVirtualRegisters(op1, regT3, op2, regT2);
+ emitJumpSlowCaseIfNotImmediateInteger(regT3);
+ emitJumpSlowCaseIfNotImmediateInteger(regT2);
+
+ move(regT3, regT0);
+ addSlowCase(branchTest32(Zero, regT2));
+ Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
denominatorNotNeg1.link(this);
- x86ConvertToDoubleWord32();
- x86Div32(ecx);
- Jump numeratorPositive = branch32(GreaterThanOrEqual, regT4, TrustedImm32(0));
- addSlowCase(branchTest32(Zero, edx));
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT2);
+ Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
+ addSlowCase(branchTest32(Zero, regT1));
numeratorPositive.link(this);
- emitTagInt(edx, regT0);
+ emitFastArithReTagImmediate(regT1, regT0);
emitPutVirtualRegister(result);
}
@@ -464,417 +661,315 @@ void JIT::emitSlow_op_mod(Instruction*, Vector<SlowCaseEntry>::iterator&)
/* ------------------------------ END: OP_MOD ------------------------------ */
-#endif // USE(JSVALUE64)
+/* ------------------------------ BEGIN: USE(JSVALUE64) (OP_ADD, OP_SUB, OP_MUL) ------------------------------ */
+
+void JIT::compileBinaryArithOp(OpcodeID opcodeID, int, int op1, int op2, OperandTypes)
+{
+ emitGetVirtualRegisters(op1, regT0, op2, regT1);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
+ RareCaseProfile* profile = m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+ if (opcodeID == op_add)
+ addSlowCase(branchAdd32(Overflow, regT1, regT0));
+ else if (opcodeID == op_sub)
+ addSlowCase(branchSub32(Overflow, regT1, regT0));
+ else {
+ ASSERT(opcodeID == op_mul);
+ if (shouldEmitProfiling()) {
+ // We want to be able to measure if this is taking the slow case just
+ // because of negative zero. If this produces positive zero, then we
+ // don't want the slow case to be taken because that will throw off
+ // speculative compilation.
+ move(regT0, regT2);
+ addSlowCase(branchMul32(Overflow, regT1, regT2));
+ JumpList done;
+ done.append(branchTest32(NonZero, regT2));
+ Jump negativeZero = branch32(LessThan, regT0, TrustedImm32(0));
+ done.append(branch32(GreaterThanOrEqual, regT1, TrustedImm32(0)));
+ negativeZero.link(this);
+ // We only get here if we have a genuine negative zero. Record this,
+ // so that the speculative JIT knows that we failed speculation
+ // because of a negative zero.
+ add32(TrustedImm32(1), AbsoluteAddress(&profile->m_counter));
+ addSlowCase(jump());
+ done.link(this);
+ move(regT2, regT0);
+ } else {
+ addSlowCase(branchMul32(Overflow, regT1, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ }
+ }
+ emitFastArithIntToImmNoCheck(regT0, regT0);
+}
-void JIT::emit_op_negate(Instruction* currentInstruction)
+void JIT::compileBinaryArithOpSlowCase(Instruction* currentInstruction, OpcodeID opcodeID, Vector<SlowCaseEntry>::iterator& iter, int result, int op1, int op2, OperandTypes types, bool op1HasImmediateIntFastCase, bool op2HasImmediateIntFastCase)
{
- int result = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
-
-#if USE(JSVALUE64)
- JSValueRegs srcRegs = JSValueRegs(regT0);
- JSValueRegs resultRegs = srcRegs;
- GPRReg scratchGPR = regT2;
-#else
- JSValueRegs srcRegs = JSValueRegs(regT1, regT0);
- JSValueRegs resultRegs = srcRegs;
- GPRReg scratchGPR = regT4;
-#endif
+ // We assume that subtracting TagTypeNumber is equivalent to adding DoubleEncodeOffset.
+ COMPILE_ASSERT(((TagTypeNumber + DoubleEncodeOffset) == 0), TagTypeNumber_PLUS_DoubleEncodeOffset_EQUALS_0);
- emitGetVirtualRegister(src, srcRegs);
+ Jump notImm1;
+ Jump notImm2;
+ if (op1HasImmediateIntFastCase) {
+ notImm2 = getSlowCase(iter);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1 = getSlowCase(iter);
+ } else {
+ notImm1 = getSlowCase(iter);
+ notImm2 = getSlowCase(iter);
+ }
- JITNegGenerator gen(resultRegs, srcRegs, scratchGPR);
- gen.generateFastPath(*this);
+ linkSlowCase(iter); // Integer overflow case - we could handle this in JIT code, but this is likely rare.
+ if (opcodeID == op_mul && !op1HasImmediateIntFastCase && !op2HasImmediateIntFastCase) // op_mul has an extra slow case to handle 0 * negative number.
+ linkSlowCase(iter);
- ASSERT(gen.didEmitFastPath());
- gen.endJumpList().link(this);
- emitPutVirtualRegister(result, resultRegs);
+ Label stubFunctionCall(this);
- addSlowCase(gen.slowPathJumpList());
-}
+ JITSlowPathCall slowPathCall(this, currentInstruction, opcodeID == op_add ? slow_path_add : opcodeID == op_sub ? slow_path_sub : slow_path_mul);
+ slowPathCall.call();
+ Jump end = jump();
+
+ if (op1HasImmediateIntFastCase) {
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op1, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT2);
+ } else if (op2HasImmediateIntFastCase) {
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ emitGetVirtualRegister(op2, regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT2);
+ } else {
+ // if we get here, eax is not an int32, edx not yet checked.
+ notImm1.link(this);
+ if (!types.first().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT0).linkTo(stubFunctionCall, this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT1);
+ Jump op2isDouble = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT2);
+ Jump op2wasInteger = jump();
+
+ // if we get here, eax IS an int32, edx is not.
+ notImm2.link(this);
+ if (!types.second().definitelyIsNumber())
+ emitJumpIfNotImmediateNumber(regT1).linkTo(stubFunctionCall, this);
+ convertInt32ToDouble(regT0, fpRegT1);
+ op2isDouble.link(this);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT2);
+ op2wasInteger.link(this);
+ }
-void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+ if (opcodeID == op_add)
+ addDouble(fpRegT2, fpRegT1);
+ else if (opcodeID == op_sub)
+ subDouble(fpRegT2, fpRegT1);
+ else if (opcodeID == op_mul)
+ mulDouble(fpRegT2, fpRegT1);
+ else {
+ ASSERT(opcodeID == op_div);
+ divDouble(fpRegT2, fpRegT1);
+ }
+ moveDoubleTo64(fpRegT1, regT0);
+ sub64(tagTypeNumberRegister, regT0);
+ emitPutVirtualRegister(result, regT0);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
- slowPathCall.call();
+ end.link(this);
}
-template<typename SnippetGenerator>
-void JIT::emitBitBinaryOpFastPath(Instruction* currentInstruction)
+void JIT::emit_op_add(Instruction* currentInstruction)
{
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-#if USE(JSVALUE64)
- JSValueRegs leftRegs = JSValueRegs(regT0);
- JSValueRegs rightRegs = JSValueRegs(regT1);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT2;
-#else
- JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
- JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT4;
-#endif
-
- SnippetOperand leftOperand;
- SnippetOperand rightOperand;
-
- if (isOperandConstantInt(op1))
- leftOperand.setConstInt32(getOperandConstantInt(op1));
- else if (isOperandConstantInt(op2))
- rightOperand.setConstInt32(getOperandConstantInt(op2));
-
- RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
-
- if (!leftOperand.isConst())
- emitGetVirtualRegister(op1, leftRegs);
- if (!rightOperand.isConst())
- emitGetVirtualRegister(op2, rightRegs);
-
- SnippetGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs, scratchGPR);
-
- gen.generateFastPath(*this);
-
- ASSERT(gen.didEmitFastPath());
- gen.endJumpList().link(this);
- emitPutVirtualRegister(result, resultRegs);
-
- addSlowCase(gen.slowPathJumpList());
-}
-
-void JIT::emit_op_bitand(Instruction* currentInstruction)
-{
- emitBitBinaryOpFastPath<JITBitAndGenerator>(currentInstruction);
-}
-
-void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
- slowPathCall.call();
-}
-
-void JIT::emit_op_bitor(Instruction* currentInstruction)
-{
- emitBitBinaryOpFastPath<JITBitOrGenerator>(currentInstruction);
-}
-
-void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
- slowPathCall.call();
-}
-
-void JIT::emit_op_bitxor(Instruction* currentInstruction)
-{
- emitBitBinaryOpFastPath<JITBitXorGenerator>(currentInstruction);
-}
-
-void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
- slowPathCall.call();
-}
-
-void JIT::emit_op_lshift(Instruction* currentInstruction)
-{
- emitBitBinaryOpFastPath<JITLeftShiftGenerator>(currentInstruction);
-}
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ addSlowCase();
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
+ slowPathCall.call();
+ return;
+ }
-void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+ if (isOperandConstantImmediateInt(op1)) {
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op1)), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT0);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchAdd32(Overflow, regT0, Imm32(getConstantOperandImmediateInt(op2)), regT1));
+ emitFastArithIntToImmNoCheck(regT1, regT0);
+ } else
+ compileBinaryArithOp(op_add, result, op1, op2, types);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
- slowPathCall.call();
+ emitPutVirtualRegister(result);
}
-void JIT::emitRightShiftFastPath(Instruction* currentInstruction, OpcodeID opcodeID)
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ASSERT(opcodeID == op_rshift || opcodeID == op_urshift);
-
- JITRightShiftGenerator::ShiftType snippetShiftType = opcodeID == op_rshift ?
- JITRightShiftGenerator::SignedShift : JITRightShiftGenerator::UnsignedShift;
-
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-#if USE(JSVALUE64)
- JSValueRegs leftRegs = JSValueRegs(regT0);
- JSValueRegs rightRegs = JSValueRegs(regT1);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT2;
- FPRReg scratchFPR = InvalidFPRReg;
-#else
- JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
- JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT4;
- FPRReg scratchFPR = fpRegT2;
-#endif
-
- SnippetOperand leftOperand;
- SnippetOperand rightOperand;
-
- if (isOperandConstantInt(op1))
- leftOperand.setConstInt32(getOperandConstantInt(op1));
- else if (isOperandConstantInt(op2))
- rightOperand.setConstInt32(getOperandConstantInt(op2));
-
- RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
-
- if (!leftOperand.isConst())
- emitGetVirtualRegister(op1, leftRegs);
- if (!rightOperand.isConst())
- emitGetVirtualRegister(op2, rightRegs);
-
- JITRightShiftGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
- fpRegT0, scratchGPR, scratchFPR, snippetShiftType);
-
- gen.generateFastPath(*this);
-
- ASSERT(gen.didEmitFastPath());
- gen.endJumpList().link(this);
- emitPutVirtualRegister(result, resultRegs);
-
- addSlowCase(gen.slowPathJumpList());
-}
-
-void JIT::emit_op_rshift(Instruction* currentInstruction)
-{
- emitRightShiftFastPath(currentInstruction, op_rshift);
-}
-
-void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_rshift);
- slowPathCall.call();
-}
-
-void JIT::emit_op_urshift(Instruction* currentInstruction)
-{
- emitRightShiftFastPath(currentInstruction, op_urshift);
-}
-
-void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ linkDummySlowCase(iter);
+ return;
+ }
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_urshift);
- slowPathCall.call();
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1);
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2);
+ compileBinaryArithOpSlowCase(currentInstruction, op_add, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
}
-void JIT::emit_op_add(Instruction* currentInstruction)
+void JIT::emit_op_mul(Instruction* currentInstruction)
{
int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-#if USE(JSVALUE64)
- JSValueRegs leftRegs = JSValueRegs(regT0);
- JSValueRegs rightRegs = JSValueRegs(regT1);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT2;
- FPRReg scratchFPR = InvalidFPRReg;
-#else
- JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
- JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT4;
- FPRReg scratchFPR = fpRegT2;
-#endif
-
- SnippetOperand leftOperand(types.first());
- SnippetOperand rightOperand(types.second());
-
- if (isOperandConstantInt(op1))
- leftOperand.setConstInt32(getOperandConstantInt(op1));
- else if (isOperandConstantInt(op2))
- rightOperand.setConstInt32(getOperandConstantInt(op2));
-
- RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
-
- if (!leftOperand.isConst())
- emitGetVirtualRegister(op1, leftRegs);
- if (!rightOperand.isConst())
- emitGetVirtualRegister(op2, rightRegs);
-
- JITAddGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
- fpRegT0, fpRegT1, scratchGPR, scratchFPR);
-
- gen.generateFastPath(*this);
+ // For now, only plant a fast int case if the constant operand is greater than zero.
+ int32_t value;
+ if (isOperandConstantImmediateInt(op1) && ((value = getConstantOperandImmediateInt(op1)) > 0)) {
+ // Add a special fast case profile because the DFG JIT will expect one.
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+ emitGetVirtualRegister(op2, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
+ emitFastArithReTagImmediate(regT1, regT0);
+ } else if (isOperandConstantImmediateInt(op2) && ((value = getConstantOperandImmediateInt(op2)) > 0)) {
+ // Add a special fast case profile because the DFG JIT will expect one.
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+ emitGetVirtualRegister(op1, regT0);
+ emitJumpSlowCaseIfNotImmediateInteger(regT0);
+ addSlowCase(branchMul32(Overflow, Imm32(value), regT0, regT1));
+ emitFastArithReTagImmediate(regT1, regT0);
+ } else
+ compileBinaryArithOp(op_mul, result, op1, op2, types);
- if (gen.didEmitFastPath()) {
- gen.endJumpList().link(this);
- emitPutVirtualRegister(result, resultRegs);
-
- addSlowCase(gen.slowPathJumpList());
- } else {
- ASSERT(gen.endJumpList().empty());
- ASSERT(gen.slowPathJumpList().empty());
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
- slowPathCall.call();
- }
+ emitPutVirtualRegister(result);
}
-void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
- slowPathCall.call();
+ bool op1HasImmediateIntFastCase = isOperandConstantImmediateInt(op1) && getConstantOperandImmediateInt(op1) > 0;
+ bool op2HasImmediateIntFastCase = !op1HasImmediateIntFastCase && isOperandConstantImmediateInt(op2) && getConstantOperandImmediateInt(op2) > 0;
+ compileBinaryArithOpSlowCase(currentInstruction, op_mul, iter, result, op1, op2, types, op1HasImmediateIntFastCase, op2HasImmediateIntFastCase);
}
void JIT::emit_op_div(Instruction* currentInstruction)
{
- int result = currentInstruction[1].u.operand;
+ int dst = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-#if USE(JSVALUE64)
- JSValueRegs leftRegs = JSValueRegs(regT0);
- JSValueRegs rightRegs = JSValueRegs(regT1);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT2;
-#else
- JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
- JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT4;
-#endif
- FPRReg scratchFPR = fpRegT2;
-
- ResultProfile* resultProfile = nullptr;
- if (shouldEmitProfiling())
- resultProfile = m_codeBlock->ensureResultProfile(m_bytecodeOffset);
-
- SnippetOperand leftOperand(types.first());
- SnippetOperand rightOperand(types.second());
-
- if (isOperandConstantInt(op1))
- leftOperand.setConstInt32(getOperandConstantInt(op1));
-#if USE(JSVALUE64)
- else if (isOperandConstantDouble(op1))
- leftOperand.setConstDouble(getOperandConstantDouble(op1));
-#endif
-
- if (isOperandConstantInt(op2))
- rightOperand.setConstInt32(getOperandConstantInt(op2));
-#if USE(JSVALUE64)
- else if (isOperandConstantDouble(op2))
- rightOperand.setConstDouble(getOperandConstantDouble(op2));
-#endif
-
- RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
-
- if (!leftOperand.isConst())
- emitGetVirtualRegister(op1, leftRegs);
- if (!rightOperand.isConst())
- emitGetVirtualRegister(op2, rightRegs);
-
- JITDivGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
- fpRegT0, fpRegT1, scratchGPR, scratchFPR, resultProfile);
-
- gen.generateFastPath(*this);
-
- if (gen.didEmitFastPath()) {
- gen.endJumpList().link(this);
- emitPutVirtualRegister(result, resultRegs);
+ if (isOperandConstantImmediateDouble(op1)) {
+ emitGetVirtualRegister(op1, regT0);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
+ } else if (isOperandConstantImmediateInt(op1)) {
+ emitLoadInt32ToDouble(op1, fpRegT0);
+ } else {
+ emitGetVirtualRegister(op1, regT0);
+ if (!types.first().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT0);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT0);
+ convertInt32ToDouble(regT0, fpRegT0);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ add64(tagTypeNumberRegister, regT0);
+ move64ToDouble(regT0, fpRegT0);
+ skipDoubleLoad.link(this);
+ }
- addSlowCase(gen.slowPathJumpList());
+ if (isOperandConstantImmediateDouble(op2)) {
+ emitGetVirtualRegister(op2, regT1);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
+ } else if (isOperandConstantImmediateInt(op2)) {
+ emitLoadInt32ToDouble(op2, fpRegT1);
} else {
- ASSERT(gen.endJumpList().empty());
- ASSERT(gen.slowPathJumpList().empty());
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
- slowPathCall.call();
+ emitGetVirtualRegister(op2, regT1);
+ if (!types.second().definitelyIsNumber())
+ emitJumpSlowCaseIfNotImmediateNumber(regT1);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT1);
+ convertInt32ToDouble(regT1, fpRegT1);
+ Jump skipDoubleLoad = jump();
+ notInt.link(this);
+ add64(tagTypeNumberRegister, regT1);
+ move64ToDouble(regT1, fpRegT1);
+ skipDoubleLoad.link(this);
}
+ divDouble(fpRegT1, fpRegT0);
+
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT0, regT0, notInteger, fpRegT1);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitFastArithReTagImmediate(regT0, regT0);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ moveDoubleTo64(fpRegT0, regT0);
+ Jump doubleZero = branchTest64(Zero, regT0);
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset)->m_counter));
+ sub64(tagTypeNumberRegister, regT0);
+ Jump trueDouble = jump();
+ doubleZero.link(this);
+ move(tagTypeNumberRegister, regT0);
+ trueDouble.link(this);
+ isInteger.link(this);
+
+ emitPutVirtualRegister(dst, regT0);
}
void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
- slowPathCall.call();
-}
-
-void JIT::emit_op_mul(Instruction* currentInstruction)
-{
- int result = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-
-#if USE(JSVALUE64)
- JSValueRegs leftRegs = JSValueRegs(regT0);
- JSValueRegs rightRegs = JSValueRegs(regT1);
- JSValueRegs resultRegs = JSValueRegs(regT2);
- GPRReg scratchGPR = regT3;
- FPRReg scratchFPR = InvalidFPRReg;
-#else
- JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
- JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT4;
- FPRReg scratchFPR = fpRegT2;
+ if (types.first().definitelyIsNumber() && types.second().definitelyIsNumber()) {
+#ifndef NDEBUG
+ breakpoint();
#endif
-
- ResultProfile* resultProfile = nullptr;
- if (shouldEmitProfiling())
- resultProfile = m_codeBlock->ensureResultProfile(m_bytecodeOffset);
-
- SnippetOperand leftOperand(types.first());
- SnippetOperand rightOperand(types.second());
-
- if (isOperandConstantInt(op1))
- leftOperand.setConstInt32(getOperandConstantInt(op1));
- else if (isOperandConstantInt(op2))
- rightOperand.setConstInt32(getOperandConstantInt(op2));
-
- RELEASE_ASSERT(!leftOperand.isConst() || !rightOperand.isConst());
-
- if (!leftOperand.isPositiveConstInt32())
- emitGetVirtualRegister(op1, leftRegs);
- if (!rightOperand.isPositiveConstInt32())
- emitGetVirtualRegister(op2, rightRegs);
-
- JITMulGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
- fpRegT0, fpRegT1, scratchGPR, scratchFPR, resultProfile);
-
- gen.generateFastPath(*this);
-
- if (gen.didEmitFastPath()) {
- gen.endJumpList().link(this);
- emitPutVirtualRegister(result, resultRegs);
-
- addSlowCase(gen.slowPathJumpList());
- } else {
- ASSERT(gen.endJumpList().empty());
- ASSERT(gen.slowPathJumpList().empty());
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
- slowPathCall.call();
+ return;
}
-}
-
-void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
+ if (!isOperandConstantImmediateDouble(op1) && !isOperandConstantImmediateInt(op1)) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ if (!isOperandConstantImmediateDouble(op2) && !isOperandConstantImmediateInt(op2)) {
+ if (!types.second().definitelyIsNumber())
+ linkSlowCase(iter);
+ }
+ // There is an extra slow case for (op1 * -N) or (-N * op2), to check for 0 since this should produce a result of -0.
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
slowPathCall.call();
}
@@ -885,48 +980,24 @@ void JIT::emit_op_sub(Instruction* currentInstruction)
int op2 = currentInstruction[3].u.operand;
OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
-#if USE(JSVALUE64)
- JSValueRegs leftRegs = JSValueRegs(regT0);
- JSValueRegs rightRegs = JSValueRegs(regT1);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT2;
- FPRReg scratchFPR = InvalidFPRReg;
-#else
- JSValueRegs leftRegs = JSValueRegs(regT1, regT0);
- JSValueRegs rightRegs = JSValueRegs(regT3, regT2);
- JSValueRegs resultRegs = leftRegs;
- GPRReg scratchGPR = regT4;
- FPRReg scratchFPR = fpRegT2;
-#endif
-
- SnippetOperand leftOperand(types.first());
- SnippetOperand rightOperand(types.second());
-
- emitGetVirtualRegister(op1, leftRegs);
- emitGetVirtualRegister(op2, rightRegs);
-
- JITSubGenerator gen(leftOperand, rightOperand, resultRegs, leftRegs, rightRegs,
- fpRegT0, fpRegT1, scratchGPR, scratchFPR);
-
- gen.generateFastPath(*this);
-
- ASSERT(gen.didEmitFastPath());
- gen.endJumpList().link(this);
- emitPutVirtualRegister(result, resultRegs);
-
- addSlowCase(gen.slowPathJumpList());
+ compileBinaryArithOp(op_sub, result, op1, op2, types);
+ emitPutVirtualRegister(result);
}
void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkAllSlowCasesForBytecodeOffset(m_slowCases, iter, m_bytecodeOffset);
+ int result = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_sub);
- slowPathCall.call();
+ compileBinaryArithOpSlowCase(currentInstruction, op_sub, iter, result, op1, op2, types, false, false);
}
/* ------------------------------ END: OP_ADD, OP_SUB, OP_MUL ------------------------------ */
+#endif // USE(JSVALUE64)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
index 1fa14563a..53ac73894 100644
--- a/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITArithmetic32_64.cpp
@@ -1,5 +1,5 @@
/*
-* Copyright (C) 2008, 2015 Apple Inc. All rights reserved.
+* Copyright (C) 2008 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,10 +31,11 @@
#include "CodeBlock.h"
#include "JITInlines.h"
+#include "JITStubs.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "ResultType.h"
#include "SamplingTool.h"
#include "SlowPathCall.h"
@@ -42,13 +43,47 @@
namespace JSC {
+void JIT::emit_op_negate(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+
+ Jump srcNotInt = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchTest32(Zero, regT0, TrustedImm32(0x7fffffff)));
+ neg32(regT0);
+ emitStoreInt32(dst, regT0, (dst == src));
+
+ Jump end = jump();
+
+ srcNotInt.link(this);
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+
+ xor32(TrustedImm32(1 << 31), regT1);
+ store32(regT1, tagFor(dst));
+ if (dst != src)
+ store32(regT0, payloadFor(dst));
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_negate(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter); // 0x7fffffff check
+ linkSlowCase(iter); // double check
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_negate);
+ slowPathCall.call();
+}
+
void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target, RelationalCondition condition)
{
JumpList notInt32Op1;
JumpList notInt32Op2;
// Character less.
- if (isOperandConstantChar(op1)) {
+ if (isOperandConstantImmediateChar(op1)) {
emitLoad(op2, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
@@ -57,7 +92,7 @@ void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target
addJump(branch32(commute(condition), regT0, Imm32(asString(getConstantOperand(op1))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantChar(op2)) {
+ if (isOperandConstantImmediateChar(op2)) {
emitLoad(op1, regT1, regT0);
addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
JumpList failures;
@@ -66,11 +101,11 @@ void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target
addJump(branch32(condition, regT0, Imm32(asString(getConstantOperand(op2))->tryGetValue()[0])), target);
return;
}
- if (isOperandConstantInt(op1)) {
+ if (isOperandConstantImmediateInt(op1)) {
emitLoad(op2, regT3, regT2);
notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(commute(condition), regT2, Imm32(getConstantOperand(op1).asInt32())), target);
- } else if (isOperandConstantInt(op2)) {
+ } else if (isOperandConstantImmediateInt(op2)) {
emitLoad(op1, regT1, regT0);
notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
addJump(branch32(condition, regT0, Imm32(getConstantOperand(op2).asInt32())), target);
@@ -89,28 +124,28 @@ void JIT::emit_compareAndJump(OpcodeID opcode, int op1, int op2, unsigned target
Jump end = jump();
// Double less.
- emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantInt(op1), isOperandConstantInt(op1) || !isOperandConstantInt(op2));
+ emitBinaryDoubleOp(opcode, target, op1, op2, OperandTypes(), notInt32Op1, notInt32Op2, !isOperandConstantImmediateInt(op1), isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2));
end.link(this);
}
void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondition, size_t (JIT_OPERATION *operation)(ExecState*, EncodedJSValue, EncodedJSValue), bool invert, Vector<SlowCaseEntry>::iterator& iter)
{
- if (isOperandConstantChar(op1) || isOperandConstantChar(op2)) {
+ if (isOperandConstantImmediateChar(op1) || isOperandConstantImmediateChar(op2)) {
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
} else {
if (!supportsFloatingPoint()) {
- if (!isOperandConstantInt(op1) && !isOperandConstantInt(op2))
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
linkSlowCase(iter); // int32 check
linkSlowCase(iter); // int32 check
} else {
- if (!isOperandConstantInt(op1)) {
+ if (!isOperandConstantImmediateInt(op1)) {
linkSlowCase(iter); // double check
linkSlowCase(iter); // int32 check
}
- if (isOperandConstantInt(op1) || !isOperandConstantInt(op2))
+ if (isOperandConstantImmediateInt(op1) || !isOperandConstantImmediateInt(op2))
linkSlowCase(iter); // double check
}
}
@@ -120,6 +155,155 @@ void JIT::emit_compareAndJumpSlow(int op1, int op2, unsigned target, DoubleCondi
emitJumpSlowToHot(branchTest32(invert ? Zero : NonZero, returnValueGPR), target);
}
+// LeftShift (<<)
+
+void JIT::emit_op_lshift(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ lshift32(Imm32(getConstantOperand(op2).asInt32()), regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ lshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1 || dst == op2);
+}
+
+void JIT::emitSlow_op_lshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_lshift);
+ slowPathCall.call();
+}
+
+// RightShift (>>) and UnsignedRightShift (>>>) helper
+
+void JIT::emitRightShift(Instruction* currentInstruction, bool isUnsigned)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ // Slow case of rshift makes assumptions about what registers hold the
+ // shift arguments, so any changes must be updated there as well.
+ if (isOperandConstantImmediateInt(op2)) {
+ emitLoad(op1, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ int shift = getConstantOperand(op2).asInt32() & 0x1f;
+ if (shift) {
+ if (isUnsigned)
+ urshift32(Imm32(shift), regT0);
+ else
+ rshift32(Imm32(shift), regT0);
+ }
+ emitStoreInt32(dst, regT0, dst == op1);
+ } else {
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ if (!isOperandConstantImmediateInt(op1))
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ if (isUnsigned)
+ urshift32(regT2, regT0);
+ else
+ rshift32(regT2, regT0);
+ emitStoreInt32(dst, regT0, dst == op1);
+ }
+}
+
+void JIT::emitRightShiftSlowCase(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter, bool isUnsigned)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ if (isOperandConstantImmediateInt(op2)) {
+ int shift = getConstantOperand(op2).asInt32() & 0x1f;
+ // op1 = regT1:regT0
+ linkSlowCase(iter); // int32 check
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
+ emitLoadDouble(op1, fpRegT0);
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (shift) {
+ if (isUnsigned)
+ urshift32(Imm32(shift), regT0);
+ else
+ rshift32(Imm32(shift), regT0);
+ }
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ } else {
+ // op1 = regT1:regT0
+ // op2 = regT3:regT2
+ if (!isOperandConstantImmediateInt(op1)) {
+ linkSlowCase(iter); // int32 check -- op1 is not an int
+ if (supportsFloatingPointTruncate()) {
+ JumpList failures;
+ failures.append(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag))); // op1 is not a double
+ emitLoadDouble(op1, fpRegT0);
+ failures.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag))); // op2 is not an int
+ failures.append(branchTruncateDoubleToInt32(fpRegT0, regT0));
+ if (isUnsigned)
+ urshift32(regT2, regT0);
+ else
+ rshift32(regT2, regT0);
+ move(TrustedImm32(JSValue::Int32Tag), regT1);
+ emitStoreInt32(dst, regT0, false);
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_rshift));
+ failures.link(this);
+ }
+ }
+
+ linkSlowCase(iter); // int32 check - op2 is not an int
+ }
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, isUnsigned ? slow_path_urshift : slow_path_rshift);
+ slowPathCall.call();
+}
+
+// RightShift (>>)
+
+void JIT::emit_op_rshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, false);
+}
+
+void JIT::emitSlow_op_rshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, false);
+}
+
+// UnsignedRightShift (>>>)
+
+void JIT::emit_op_urshift(Instruction* currentInstruction)
+{
+ emitRightShift(currentInstruction, true);
+}
+
+void JIT::emitSlow_op_urshift(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ emitRightShiftSlowCase(currentInstruction, iter, true);
+}
+
void JIT::emit_op_unsigned(Instruction* currentInstruction)
{
int result = currentInstruction[1].u.operand;
@@ -141,6 +325,120 @@ void JIT::emitSlow_op_unsigned(Instruction* currentInstruction, Vector<SlowCaseE
slowPathCall.call();
}
+// BitAnd (&)
+
+void JIT::emit_op_bitand(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ int op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ and32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, dst == op);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ and32(regT2, regT0);
+ emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
+}
+
+void JIT::emitSlow_op_bitand(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitand);
+ slowPathCall.call();
+}
+
+// BitOr (|)
+
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ int op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ or32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, op == dst);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ or32(regT2, regT0);
+ emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
+ slowPathCall.call();
+}
+
+// BitXor (^)
+
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ int op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitLoad(op, regT1, regT0);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ xor32(Imm32(constant), regT0);
+ emitStoreInt32(dst, regT0, op == dst);
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+ xor32(regT2, regT0);
+ emitStoreInt32(dst, regT0, op1 == dst || op2 == dst);
+}
+
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+
+ if (!isOperandConstantImmediateInt(op1) && !isOperandConstantImmediateInt(op2))
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
+ slowPathCall.call();
+}
+
void JIT::emit_op_inc(Instruction* currentInstruction)
{
int srcDst = currentInstruction[1].u.operand;
@@ -181,6 +479,218 @@ void JIT::emitSlow_op_dec(Instruction* currentInstruction, Vector<SlowCaseEntry>
slowPathCall.call();
}
+// Addition (+)
+
+void JIT::emit_op_add(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ addSlowCase();
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
+ slowPathCall.call();
+ return;
+ }
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ int op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ emitAdd32Constant(dst, op, constant, op == op1 ? types.first() : types.second());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchAdd32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_add, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitAdd32Constant(int dst, int op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT2);
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchAdd32(Overflow, regT2, Imm32(constant), regT0));
+ emitStoreInt32(dst, regT0, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ addDouble(fpRegT1, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_add(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!types.first().mightBeNumber() || !types.second().mightBeNumber()) {
+ linkDummySlowCase(iter);
+ return;
+ }
+
+ int op;
+ int32_t constant;
+ if (getOperandConstantImmediateInt(op1, op2, op, constant)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter); // non-sse case
+ else {
+ ResultType opType = op == op1 ? types.first() : types.second();
+ if (!opType.definitelyIsNumber())
+ linkSlowCase(iter); // double check
+ }
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_add);
+ slowPathCall.call();
+}
+
+// Subtraction (-)
+
+void JIT::emit_op_sub(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ if (isOperandConstantImmediateInt(op2)) {
+ emitSub32Constant(dst, op1, getConstantOperand(op2).asInt32(), types.first());
+ return;
+ }
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ addSlowCase(branchSub32(Overflow, regT2, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_sub, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSub32Constant(int dst, int op, int32_t constant, ResultType opType)
+{
+ // Int32 case.
+ emitLoad(op, regT1, regT0);
+ Jump notInt32 = branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branchSub32(Overflow, regT0, Imm32(constant), regT2, regT3));
+ emitStoreInt32(dst, regT2, (op == dst));
+
+ // Double case.
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32);
+ return;
+ }
+ Jump end = jump();
+
+ notInt32.link(this);
+ if (!opType.definitelyIsNumber())
+ addSlowCase(branch32(Above, regT1, TrustedImm32(JSValue::LowestTag)));
+ move(Imm32(constant), regT2);
+ convertInt32ToDouble(regT2, fpRegT0);
+ emitLoadDouble(op, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+
+ end.link(this);
+}
+
+void JIT::emitSlow_op_sub(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (isOperandConstantImmediateInt(op2)) {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint() || !types.first().definitelyIsNumber())
+ linkSlowCase(iter); // int32 or double check
+ } else {
+ linkSlowCase(iter); // overflow check
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ } else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+ }
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_sub);
+ slowPathCall.call();
+}
+
void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, OperandTypes types, JumpList& notInt32Op1, JumpList& notInt32Op2, bool op1IsInRegisters, bool op2IsInRegisters)
{
JumpList end;
@@ -213,6 +723,50 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
// Do the math.
doTheMath.link(this);
switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op1, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op1, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op1, fpRegT1);
+ subDouble(fpRegT0, fpRegT1);
+ emitStoreDouble(dst, fpRegT1);
+ break;
+ case op_div: {
+ emitLoadDouble(op1, fpRegT1);
+ divDouble(fpRegT0, fpRegT1);
+
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT1, regT2, notInteger, fpRegT0);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT1);
+ isInteger.link(this);
+ break;
+ }
case op_jless:
emitLoadDouble(op1, fpRegT2);
addJump(branchDouble(DoubleLessThan, fpRegT2, fpRegT0), dst);
@@ -270,6 +824,49 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
// Do the math.
switch (opcodeID) {
+ case op_mul:
+ emitLoadDouble(op2, fpRegT2);
+ mulDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_add:
+ emitLoadDouble(op2, fpRegT2);
+ addDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_sub:
+ emitLoadDouble(op2, fpRegT2);
+ subDouble(fpRegT2, fpRegT0);
+ emitStoreDouble(dst, fpRegT0);
+ break;
+ case op_div: {
+ emitLoadDouble(op2, fpRegT2);
+ divDouble(fpRegT2, fpRegT0);
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ Jump isInteger = jump();
+ notInteger.link(this);
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT0);
+ isInteger.link(this);
+ break;
+ }
case op_jless:
emitLoadDouble(op2, fpRegT1);
addJump(branchDouble(DoubleLessThan, fpRegT0, fpRegT1), dst);
@@ -310,13 +907,169 @@ void JIT::emitBinaryDoubleOp(OpcodeID opcodeID, int dst, int op1, int op2, Opera
end.link(this);
}
+// Multiplication (*)
+
+void JIT::emit_op_mul(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ // Int32 case.
+ move(regT0, regT3);
+ addSlowCase(branchMul32(Overflow, regT2, regT0));
+ addSlowCase(branchTest32(Zero, regT0));
+ emitStoreInt32(dst, regT0, (op1 == dst || op2 == dst));
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(notInt32Op1);
+ addSlowCase(notInt32Op2);
+ return;
+ }
+ Jump end = jump();
+
+ // Double case.
+ emitBinaryDoubleOp(op_mul, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_mul(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ Jump overflow = getSlowCase(iter); // overflow check
+ linkSlowCase(iter); // zero result check
+
+ Jump negZero = branchOr32(Signed, regT2, regT3);
+ emitStoreInt32(dst, TrustedImm32(0), (op1 == dst || op2 == dst));
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_mul));
+
+ negZero.link(this);
+ // We only get here if we have a genuine negative zero. Record this,
+ // so that the speculative JIT knows that we failed speculation
+ // because of a negative zero.
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ overflow.link(this);
+
+ if (!supportsFloatingPoint()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // int32 check
+ }
+
+ if (supportsFloatingPoint()) {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_mul);
+ slowPathCall.call();
+}
+
+// Division (/)
+
+void JIT::emit_op_div(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int op1 = currentInstruction[2].u.operand;
+ int op2 = currentInstruction[3].u.operand;
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ m_codeBlock->addSpecialFastCaseProfile(m_bytecodeOffset);
+
+ if (!supportsFloatingPoint()) {
+ addSlowCase(jump());
+ return;
+ }
+
+ // Int32 divide.
+ JumpList notInt32Op1;
+ JumpList notInt32Op2;
+
+ JumpList end;
+
+ emitLoad2(op1, regT1, regT0, op2, regT3, regT2);
+
+ notInt32Op1.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ notInt32Op2.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
+
+ convertInt32ToDouble(regT0, fpRegT0);
+ convertInt32ToDouble(regT2, fpRegT1);
+ divDouble(fpRegT1, fpRegT0);
+ // Is the result actually an integer? The DFG JIT would really like to know. If it's
+ // not an integer, we increment a count. If this together with the slow case counter
+ // are below threshold then the DFG JIT will compile this division with a specualtion
+ // that the remainder is zero.
+
+ // As well, there are cases where a double result here would cause an important field
+ // in the heap to sometimes have doubles in it, resulting in double predictions getting
+ // propagated to a use site where it might cause damage (such as the index to an array
+ // access). So if we are DFG compiling anything in the program, we want this code to
+ // ensure that it produces integers whenever possible.
+
+ // FIXME: This will fail to convert to integer if the result is zero. We should
+ // distinguish between positive zero and negative zero here.
+
+ JumpList notInteger;
+ branchConvertDoubleToInt32(fpRegT0, regT2, notInteger, fpRegT1);
+ // If we've got an integer, we might as well make that the result of the division.
+ emitStoreInt32(dst, regT2);
+ end.append(jump());
+ notInteger.link(this);
+ add32(TrustedImm32(1), AbsoluteAddress(&m_codeBlock->specialFastCaseProfileForBytecodeOffset(m_bytecodeOffset)->m_counter));
+ emitStoreDouble(dst, fpRegT0);
+ end.append(jump());
+
+ // Double divide.
+ emitBinaryDoubleOp(op_div, dst, op1, op2, types, notInt32Op1, notInt32Op2);
+ end.link(this);
+}
+
+void JIT::emitSlow_op_div(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ OperandTypes types = OperandTypes::fromInt(currentInstruction[4].u.operand);
+
+ if (!supportsFloatingPoint())
+ linkSlowCase(iter);
+ else {
+ if (!types.first().definitelyIsNumber())
+ linkSlowCase(iter); // double check
+
+ if (!types.second().definitelyIsNumber()) {
+ linkSlowCase(iter); // int32 check
+ linkSlowCase(iter); // double check
+ }
+ }
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_div);
+ slowPathCall.call();
+}
+
// Mod (%)
/* ------------------------------ BEGIN: OP_MOD ------------------------------ */
void JIT::emit_op_mod(Instruction* currentInstruction)
{
-#if CPU(X86)
+#if CPU(X86) || CPU(X86_64)
int dst = currentInstruction[1].u.operand;
int op1 = currentInstruction[2].u.operand;
int op2 = currentInstruction[3].u.operand;
@@ -336,8 +1089,8 @@ void JIT::emit_op_mod(Instruction* currentInstruction)
Jump denominatorNotNeg1 = branch32(NotEqual, regT2, TrustedImm32(-1));
addSlowCase(branch32(Equal, regT0, TrustedImm32(-2147483647-1)));
denominatorNotNeg1.link(this);
- x86ConvertToDoubleWord32();
- x86Div32(regT2);
+ m_assembler.cdq();
+ m_assembler.idivl_r(regT2);
Jump numeratorPositive = branch32(GreaterThanOrEqual, regT3, TrustedImm32(0));
addSlowCase(branchTest32(Zero, regT1));
numeratorPositive.link(this);
diff --git a/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp b/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp
deleted file mode 100644
index 715b503d2..000000000
--- a/Source/JavaScriptCore/jit/JITBitAndGenerator.cpp
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITBitAndGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITBitAndGenerator::generateFastPath(CCallHelpers& jit)
-{
-#if USE(JSVALUE64)
- ASSERT(m_scratchGPR != InvalidGPRReg);
- ASSERT(m_scratchGPR != m_left.payloadGPR());
- ASSERT(m_scratchGPR != m_right.payloadGPR());
-#else
- UNUSED_PARAM(m_scratchGPR);
-#endif
-
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
-
- m_didEmitFastPath = true;
-
- if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
- JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
- SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
-
- // Try to do intVar & intConstant.
- m_slowPathJumpList.append(jit.branchIfNotInt32(var));
-
- jit.moveValueRegs(var, m_result);
- if (constOpr.asConstInt32() != static_cast<int32_t>(0xffffffff)) {
-#if USE(JSVALUE64)
- jit.and64(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
- if (constOpr.asConstInt32() >= 0)
- jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
-#else
- jit.and32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
-#endif
- }
-
- } else {
- ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
-
- // Try to do intVar & intVar.
-#if USE(JSVALUE64)
- jit.move(m_left.payloadGPR(), m_scratchGPR);
- jit.and64(m_right.payloadGPR(), m_scratchGPR);
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_scratchGPR));
- jit.move(m_scratchGPR, m_result.payloadGPR());
-#else
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
- jit.moveValueRegs(m_left, m_result);
- jit.and32(m_right.payloadGPR(), m_result.payloadGPR());
-#endif
- }
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitAndGenerator.h b/Source/JavaScriptCore/jit/JITBitAndGenerator.h
deleted file mode 100644
index de2436a30..000000000
--- a/Source/JavaScriptCore/jit/JITBitAndGenerator.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITBitAndGenerator_h
-#define JITBitAndGenerator_h
-
-#if ENABLE(JIT)
-
-#include "JITBitBinaryOpGenerator.h"
-
-namespace JSC {
-
-class JITBitAndGenerator : public JITBitBinaryOpGenerator {
-public:
- JITBitAndGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
- : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
- { }
-
- void generateFastPath(CCallHelpers&);
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITBitAndGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h b/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h
deleted file mode 100644
index e6ffbd68e..000000000
--- a/Source/JavaScriptCore/jit/JITBitBinaryOpGenerator.h
+++ /dev/null
@@ -1,71 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITBitBinaryOpGenerator_h
-#define JITBitBinaryOpGenerator_h
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "SnippetOperand.h"
-
-namespace JSC {
-
-class JITBitBinaryOpGenerator {
-public:
- JITBitBinaryOpGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
- : m_leftOperand(leftOperand)
- , m_rightOperand(rightOperand)
- , m_result(result)
- , m_left(left)
- , m_right(right)
- , m_scratchGPR(scratchGPR)
- {
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
- }
-
- bool didEmitFastPath() const { return m_didEmitFastPath; }
- CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
- CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
-
-protected:
- SnippetOperand m_leftOperand;
- SnippetOperand m_rightOperand;
- JSValueRegs m_result;
- JSValueRegs m_left;
- JSValueRegs m_right;
- GPRReg m_scratchGPR;
- bool m_didEmitFastPath { false };
-
- CCallHelpers::JumpList m_endJumpList;
- CCallHelpers::JumpList m_slowPathJumpList;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITBitBinaryOpGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp b/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp
deleted file mode 100644
index 9f843c135..000000000
--- a/Source/JavaScriptCore/jit/JITBitOrGenerator.cpp
+++ /dev/null
@@ -1,74 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITBitOrGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITBitOrGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
-
- m_didEmitFastPath = true;
-
- if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
- JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
- SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
-
- // Try to do intVar | intConstant.
- m_slowPathJumpList.append(jit.branchIfNotInt32(var));
-
- jit.moveValueRegs(var, m_result);
- if (constOpr.asConstInt32()) {
-#if USE(JSVALUE64)
- jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
- jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
-#else
- jit.or32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
-#endif
- }
-
- } else {
- ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
-
- // Try to do intVar | intVar.
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
-
- jit.moveValueRegs(m_left, m_result);
-#if USE(JSVALUE64)
- jit.or64(m_right.payloadGPR(), m_result.payloadGPR());
-#else
- jit.or32(m_right.payloadGPR(), m_result.payloadGPR());
-#endif
- }
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitOrGenerator.h b/Source/JavaScriptCore/jit/JITBitOrGenerator.h
deleted file mode 100644
index a78df48e0..000000000
--- a/Source/JavaScriptCore/jit/JITBitOrGenerator.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITBitOrGenerator_h
-#define JITBitOrGenerator_h
-
-#if ENABLE(JIT)
-
-#include "JITBitBinaryOpGenerator.h"
-
-namespace JSC {
-
-class JITBitOrGenerator : public JITBitBinaryOpGenerator {
-public:
- JITBitOrGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg unused = InvalidGPRReg)
- : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, unused)
- { }
-
- void generateFastPath(CCallHelpers&);
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITBitOrGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp b/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp
deleted file mode 100644
index 8ccf1b5d6..000000000
--- a/Source/JavaScriptCore/jit/JITBitXorGenerator.cpp
+++ /dev/null
@@ -1,73 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITBitXorGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITBitXorGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
-
- m_didEmitFastPath = true;
-
- if (m_leftOperand.isConstInt32() || m_rightOperand.isConstInt32()) {
- JSValueRegs var = m_leftOperand.isConstInt32() ? m_right : m_left;
- SnippetOperand& constOpr = m_leftOperand.isConstInt32() ? m_leftOperand : m_rightOperand;
-
- // Try to do intVar ^ intConstant.
- m_slowPathJumpList.append(jit.branchIfNotInt32(var));
-
- jit.moveValueRegs(var, m_result);
-#if USE(JSVALUE64)
- jit.xor32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
- jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
-#else
- jit.xor32(CCallHelpers::Imm32(constOpr.asConstInt32()), m_result.payloadGPR());
-#endif
-
- } else {
- ASSERT(!m_leftOperand.isConstInt32() && !m_rightOperand.isConstInt32());
-
- // Try to do intVar ^ intVar.
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
-
- jit.moveValueRegs(m_left, m_result);
-#if USE(JSVALUE64)
- jit.xor64(m_right.payloadGPR(), m_result.payloadGPR());
- jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
-#else
- jit.xor32(m_right.payloadGPR(), m_result.payloadGPR());
-#endif
- }
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITBitXorGenerator.h b/Source/JavaScriptCore/jit/JITBitXorGenerator.h
deleted file mode 100644
index 81bbd15c2..000000000
--- a/Source/JavaScriptCore/jit/JITBitXorGenerator.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITBitXorGenerator_h
-#define JITBitXorGenerator_h
-
-#if ENABLE(JIT)
-
-#include "JITBitBinaryOpGenerator.h"
-
-namespace JSC {
-
-class JITBitXorGenerator : public JITBitBinaryOpGenerator {
-public:
- JITBitXorGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg unused = InvalidGPRReg)
- : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, unused)
- { }
-
- void generateFastPath(CCallHelpers&);
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITBitOrGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITCall.cpp b/Source/JavaScriptCore/jit/JITCall.cpp
index d406d5b6f..90c2e4fb9 100644
--- a/Source/JavaScriptCore/jit/JITCall.cpp
+++ b/Source/JavaScriptCore/jit/JITCall.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,18 +29,16 @@
#if USE(JSVALUE64)
#include "JIT.h"
-#include "CallFrameShuffler.h"
+#include "Arguments.h"
#include "CodeBlock.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSFunction.h"
#include "Interpreter.h"
-#include "JSCInlines.h"
-#include "LinkBuffer.h"
+#include "Operations.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
-#include "SetupVarargsFrame.h"
-#include "StackAlignment.h"
#include "ThunkGenerators.h"
#include <wtf/StringPrintStream.h>
@@ -54,50 +52,74 @@ void JIT::emitPutCallResult(Instruction* instruction)
emitPutVirtualRegister(dst);
}
-void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info)
+void JIT::compileLoadVarargs(Instruction* instruction)
{
int thisValue = instruction[3].u.operand;
int arguments = instruction[4].u.operand;
int firstFreeRegister = instruction[5].u.operand;
- int firstVarArgOffset = instruction[6].u.operand;
+
+ JumpList slowCase;
+ JumpList end;
+ bool canOptimize = m_codeBlock->usesArguments()
+ && arguments == m_codeBlock->argumentsRegister().offset()
+ && !m_codeBlock->symbolTable()->slowArguments();
+
+ if (canOptimize) {
+ emitGetVirtualRegister(arguments, regT0);
+ slowCase.append(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(JSValue()))));
+
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
+ slowCase.append(branch32(Above, regT0, TrustedImm32(Arguments::MaxArguments + 1)));
+ // regT0: argumentCountIncludingThis
+
+ move(regT0, regT1);
+ neg64(regT1);
+ add64(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT1);
+ lshift64(TrustedImm32(3), regT1);
+ addPtr(callFrameRegister, regT1);
+ // regT1: newCallFrame
+
+ slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT1));
+
+ // Initialize ArgumentCount.
+ store32(regT0, Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+
+ // Initialize 'this'.
+ emitGetVirtualRegister(thisValue, regT2);
+ store64(regT2, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+
+ // Copy arguments.
+ signExtend32ToPtr(regT0, regT0);
+ end.append(branchSub64(Zero, TrustedImm32(1), regT0));
+ // regT0: argumentCount
+
+ Label copyLoop = label();
+ load64(BaseIndex(callFrameRegister, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT2);
+ store64(regT2, BaseIndex(regT1, regT0, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
+ branchSub64(NonZero, TrustedImm32(1), regT0).linkTo(copyLoop, this);
+
+ end.append(jump());
+ }
+
+ if (canOptimize)
+ slowCase.link(this);
emitGetVirtualRegister(arguments, regT1);
- callOperation(operationSizeFrameForVarargs, regT1, -firstFreeRegister, firstVarArgOffset);
- move(TrustedImm32(-firstFreeRegister), regT1);
- emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
- addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 5 * sizeof(void*)))), regT1, stackPointerRegister);
+ callOperation(operationSizeAndAllocFrameForVarargs, regT1, firstFreeRegister);
+ emitGetVirtualRegister(thisValue, regT1);
emitGetVirtualRegister(arguments, regT2);
- callOperation(operationSetupVarargsFrame, regT1, regT2, firstVarArgOffset, regT0);
+ callOperation(operationLoadVarargs, returnValueGPR, regT1, regT2);
move(returnValueGPR, regT1);
- // Profile the argument count.
- load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
- load8(info->addressOfMaxNumArguments(), regT0);
- Jump notBiggest = branch32(Above, regT0, regT2);
- Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255));
- move(TrustedImm32(255), regT2);
- notSaturated.link(this);
- store8(regT2, info->addressOfMaxNumArguments());
- notBiggest.link(this);
-
- // Initialize 'this'.
- emitGetVirtualRegister(thisValue, regT0);
- store64(regT0, Address(regT1, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
-
- addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
+ if (canOptimize)
+ end.link(this);
}
void JIT::compileCallEval(Instruction* instruction)
{
- addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
- storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
-
- callOperation(operationCallEval, regT1);
-
+ callOperationWithCallFrameRollbackOnException(operationCallEval);
addSlowCase(branch64(Equal, regT0, TrustedImm64(JSValue::encode(JSValue()))));
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
sampleCodeBlock(m_codeBlock);
@@ -106,21 +128,10 @@ void JIT::compileCallEval(Instruction* instruction)
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
- CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
- info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
-
linkSlowCase(iter);
- int registerOffset = -instruction[4].u.operand;
- addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
-
- load64(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
- move(TrustedImmPtr(info), regT2);
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info);
- info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true));
- emitNakedCall(virtualThunk.code());
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ emitGetFromCallFrameHeader64(JSStack::Callee, regT0);
+ emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
sampleCodeBlock(m_codeBlock);
@@ -136,23 +147,17 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
- Initializes ArgumentCount; CallerFrame; Callee.
For a JS call:
+ - Caller initializes ScopeChain.
- Callee initializes ReturnPC; CodeBlock.
- Callee restores callFrameRegister before return.
For a non-JS call:
- - Caller initializes ReturnPC; CodeBlock.
+ - Caller initializes ScopeChain; ReturnPC; CodeBlock.
- Caller restores callFrameRegister after return.
*/
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct), call_and_construct_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_call_varargs), call_and_call_varargs_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_construct_varargs), call_and_construct_varargs_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call), call_and_tail_call_opcodes_must_be_same_length);
- COMPILE_ASSERT(OPCODE_LENGTH(op_call) == OPCODE_LENGTH(op_tail_call_varargs), call_and_tail_call_varargs_opcodes_must_be_same_length);
- CallLinkInfo* info;
- if (opcodeID != op_call_eval)
- info = m_codeBlock->addCallLinkInfo();
- if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs)
- compileSetupVarargsFrame(instruction, info);
+
+ if (opcodeID == op_call_varargs)
+ compileLoadVarargs(instruction);
else {
int argCount = instruction[3].u.operand;
int registerOffset = -instruction[4].u.operand;
@@ -160,21 +165,23 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
if (opcodeID == op_call && shouldEmitProfiling()) {
emitGetVirtualRegister(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0);
Jump done = emitJumpIfNotJSCell(regT0);
- load32(Address(regT0, JSCell::structureIDOffset()), regT0);
- store32(regT0, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ storePtr(regT0, instruction[6].u.arrayProfile->addressOfLastSeenStructure());
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
- store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
- } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
+ addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT1);
+ store32(TrustedImm32(argCount), Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ } // regT1 holds newCallFrame with ArgumentCount initialized.
uint32_t bytecodeOffset = instruction - m_codeBlock->instructions().begin();
- uint32_t locationBits = CallSiteIndex(bytecodeOffset).bits();
- store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + TagOffset));
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(bytecodeOffset);
+ store32(TrustedImm32(locationBits), Address(callFrameRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
emitGetVirtualRegister(callee, regT0); // regT0 holds callee.
- store64(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) - sizeof(CallerFrameAndPC)));
+ store64(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
+ store64(regT0, Address(regT1, JSStack::Callee * static_cast<int>(sizeof(Register))));
+ move(regT1, callFrameRegister);
if (opcodeID == op_call_eval) {
compileCallEval(instruction);
@@ -185,44 +192,15 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
addSlowCase(slowCase);
- ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
- info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
- m_callCompilationInfo.append(CallCompilationInfo());
- m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
-
- if (opcodeID == op_tail_call) {
- CallFrameShuffleData shuffleData;
- shuffleData.tagTypeNumber = GPRInfo::tagTypeNumberRegister;
- shuffleData.numLocals =
- instruction[4].u.operand - sizeof(CallerFrameAndPC) / sizeof(Register);
- shuffleData.args.resize(instruction[3].u.operand);
- for (int i = 0; i < instruction[3].u.operand; ++i) {
- shuffleData.args[i] =
- ValueRecovery::displacedInJSStack(
- virtualRegisterForArgument(i) - instruction[4].u.operand,
- DataFormatJS);
- }
- shuffleData.callee =
- ValueRecovery::inGPR(regT0, DataFormatJS);
- shuffleData.setupCalleeSaveRegisters(m_codeBlock);
- info->setFrameShuffleData(shuffleData);
- CallFrameShuffler(*this, shuffleData).prepareForTailCall();
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
- return;
- }
+ ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
+ m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
- if (opcodeID == op_tail_call_varargs) {
- emitRestoreCalleeSaves();
- prepareForTailCallSlow();
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
- return;
- }
-
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
+ emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
sampleCodeBlock(m_codeBlock);
@@ -238,34 +216,62 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
linkSlowCase(iter);
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
- emitRestoreCalleeSaves();
-
- move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
-
- m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
-
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
- abortWithReason(JITDidReturnFromTailCall);
- return;
- }
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructThunkGenerator).code() : m_vm->getCTIStub(linkCallThunkGenerator).code());
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}
-void JIT::emit_op_call(Instruction* currentInstruction)
+void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
{
- compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
+ JumpList slowCases;
+
+ slowCases.append(branchTestPtr(NonZero, regT0, tagMaskRegister));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
+
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
+ emitPutToCallFrameHeader(regT1, JSStack::ScopeChain);
+
+ Call call = nearCall();
+ Jump done = jump();
+
+ slowCases.link(this);
+ move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
+ restoreReturnAddressBeforeReturn(regT2);
+ Jump slow = jump();
+
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*m_codeBlock).data(),
+ callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(),
+ toCString(pointerDump(calleeCodeBlock)).data())),
+ *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
+ callLinkInfo->codeOrigin));
+
+ RepatchBuffer repatchBuffer(m_codeBlock);
+
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
+ CodeLocationLabel(stubRoutine->code().code()));
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code());
+
+ callLinkInfo->stub = stubRoutine.release();
}
-void JIT::emit_op_tail_call(Instruction* currentInstruction)
+void JIT::emit_op_call(Instruction* currentInstruction)
{
- compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
+ compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
}
void JIT::emit_op_call_eval(Instruction* currentInstruction)
@@ -278,16 +284,6 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
-
void JIT::emit_op_construct(Instruction* currentInstruction)
{
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
@@ -298,11 +294,6 @@ void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry
compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_call_eval, currentInstruction, iter, m_callLinkInfoIndex);
@@ -313,16 +304,6 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
diff --git a/Source/JavaScriptCore/jit/JITCall32_64.cpp b/Source/JavaScriptCore/jit/JITCall32_64.cpp
index 4d163c2d4..6086038a2 100644
--- a/Source/JavaScriptCore/jit/JITCall32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITCall32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,17 +29,16 @@
#if USE(JSVALUE32_64)
#include "JIT.h"
+#include "Arguments.h"
#include "CodeBlock.h"
#include "Interpreter.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSFunction.h"
-#include "JSCInlines.h"
-#include "LinkBuffer.h"
+#include "Operations.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
-#include "SetupVarargsFrame.h"
-#include "StackAlignment.h"
#include <wtf/StringPrintStream.h>
@@ -57,21 +56,43 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
unsigned dst = currentInstruction[1].u.operand;
emitLoad(dst, regT1, regT0);
+ emitGetReturnPCFromCallFrameHeaderPtr(regT2);
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
- checkStackPointerAlignment();
- emitRestoreCalleeSaves();
- emitFunctionEpilogue();
+ restoreReturnAddressBeforeReturn(regT2);
ret();
}
-void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
{
- compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
+ unsigned result = currentInstruction[1].u.operand;
+ unsigned thisReg = currentInstruction[2].u.operand;
+
+ emitLoad(result, regT1, regT0);
+ Jump notJSCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump notObject = emitJumpIfNotObject(regT2);
+
+ emitGetReturnPCFromCallFrameHeaderPtr(regT2);
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
+
+ notJSCell.link(this);
+ notObject.link(this);
+ emitLoad(thisReg, regT1, regT0);
+
+ emitGetReturnPCFromCallFrameHeaderPtr(regT2);
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
+
+ restoreReturnAddressBeforeReturn(regT2);
+ ret();
}
-void JIT::emitSlow_op_tail_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_call(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- compileOpCallSlowCase(op_tail_call, currentInstruction, iter, m_callLinkInfoIndex++);
+ compileOpCallSlowCase(op_call, currentInstruction, iter, m_callLinkInfoIndex++);
}
void JIT::emitSlow_op_call_eval(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -84,16 +105,6 @@ void JIT::emitSlow_op_call_varargs(Instruction* currentInstruction, Vector<SlowC
compileOpCallSlowCase(op_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
}
-void JIT::emitSlow_op_tail_call_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_tail_call_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
-void JIT::emitSlow_op_construct_varargs(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- compileOpCallSlowCase(op_construct_varargs, currentInstruction, iter, m_callLinkInfoIndex++);
-}
-
void JIT::emitSlow_op_construct(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
compileOpCallSlowCase(op_construct, currentInstruction, iter, m_callLinkInfoIndex++);
@@ -104,11 +115,6 @@ void JIT::emit_op_call(Instruction* currentInstruction)
compileOpCall(op_call, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::emit_op_tail_call(Instruction* currentInstruction)
-{
- compileOpCall(op_tail_call, currentInstruction, m_callLinkInfoIndex++);
-}
-
void JIT::emit_op_call_eval(Instruction* currentInstruction)
{
compileOpCall(op_call_eval, currentInstruction, m_callLinkInfoIndex);
@@ -119,65 +125,82 @@ void JIT::emit_op_call_varargs(Instruction* currentInstruction)
compileOpCall(op_call_varargs, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::emit_op_tail_call_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_tail_call_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
-
-void JIT::emit_op_construct_varargs(Instruction* currentInstruction)
-{
- compileOpCall(op_construct_varargs, currentInstruction, m_callLinkInfoIndex++);
-}
-
void JIT::emit_op_construct(Instruction* currentInstruction)
{
compileOpCall(op_construct, currentInstruction, m_callLinkInfoIndex++);
}
-void JIT::compileSetupVarargsFrame(Instruction* instruction, CallLinkInfo* info)
+void JIT::compileLoadVarargs(Instruction* instruction)
{
int thisValue = instruction[3].u.operand;
int arguments = instruction[4].u.operand;
int firstFreeRegister = instruction[5].u.operand;
- int firstVarArgOffset = instruction[6].u.operand;
+
+ JumpList slowCase;
+ JumpList end;
+ bool canOptimize = m_codeBlock->usesArguments()
+ && VirtualRegister(arguments) == m_codeBlock->argumentsRegister()
+ && !m_codeBlock->symbolTable()->slowArguments();
+
+ if (canOptimize) {
+ emitLoadTag(arguments, regT1);
+ slowCase.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+
+ load32(payloadFor(JSStack::ArgumentCount), regT2);
+ slowCase.append(branch32(Above, regT2, TrustedImm32(Arguments::MaxArguments + 1)));
+ // regT2: argumentCountIncludingThis
+
+ move(regT2, regT3);
+ neg32(regT3);
+ add32(TrustedImm32(firstFreeRegister - JSStack::CallFrameHeaderSize), regT3);
+ lshift32(TrustedImm32(3), regT3);
+ addPtr(callFrameRegister, regT3);
+ // regT3: newCallFrame
+
+ slowCase.append(branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), regT3));
+
+ // Initialize ArgumentCount.
+ store32(regT2, payloadFor(JSStack::ArgumentCount, regT3));
+
+ // Initialize 'this'.
+ emitLoad(thisValue, regT1, regT0);
+ store32(regT0, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ store32(regT1, Address(regT3, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+
+ // Copy arguments.
+ end.append(branchSub32(Zero, TrustedImm32(1), regT2));
+ // regT2: argumentCount;
+
+ Label copyLoop = label();
+ load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT0);
+ load32(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))), regT1);
+ store32(regT0, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ store32(regT1, BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) +(CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
+ branchSub32(NonZero, TrustedImm32(1), regT2).linkTo(copyLoop, this);
+
+ end.append(jump());
+ }
+
+ if (canOptimize)
+ slowCase.link(this);
emitLoad(arguments, regT1, regT0);
- callOperation(operationSizeFrameForVarargs, regT1, regT0, -firstFreeRegister, firstVarArgOffset);
- move(TrustedImm32(-firstFreeRegister), regT1);
- emitSetVarargsFrame(*this, returnValueGPR, false, regT1, regT1);
- addPtr(TrustedImm32(-(sizeof(CallerFrameAndPC) + WTF::roundUpToMultipleOf(stackAlignmentBytes(), 6 * sizeof(void*)))), regT1, stackPointerRegister);
- emitLoad(arguments, regT2, regT4);
- callOperation(operationSetupVarargsFrame, regT1, regT2, regT4, firstVarArgOffset, regT0);
- move(returnValueGPR, regT1);
-
- // Profile the argument count.
- load32(Address(regT1, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset), regT2);
- load8(info->addressOfMaxNumArguments(), regT0);
- Jump notBiggest = branch32(Above, regT0, regT2);
- Jump notSaturated = branch32(BelowOrEqual, regT2, TrustedImm32(255));
- move(TrustedImm32(255), regT2);
- notSaturated.link(this);
- store8(regT2, info->addressOfMaxNumArguments());
- notBiggest.link(this);
-
- // Initialize 'this'.
- emitLoad(thisValue, regT2, regT0);
- store32(regT0, Address(regT1, PayloadOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
- store32(regT2, Address(regT1, TagOffset + (CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)))));
-
- addPtr(TrustedImm32(sizeof(CallerFrameAndPC)), regT1, stackPointerRegister);
+ callOperation(operationSizeAndAllocFrameForVarargs, regT1, regT0, firstFreeRegister);
+ move(returnValueGPR, regT5);
+ emitLoad(thisValue, regT1, regT0);
+ emitLoad(arguments, regT3, regT2);
+ callOperation(operationLoadVarargs, regT5, regT1, regT0, regT3, regT2);
+ move(returnValueGPR, regT3);
+
+ if (canOptimize)
+ end.link(this);
}
void JIT::compileCallEval(Instruction* instruction)
{
- addPtr(TrustedImm32(-static_cast<ptrdiff_t>(sizeof(CallerFrameAndPC))), stackPointerRegister, regT1);
- storePtr(callFrameRegister, Address(regT1, CallFrame::callerFrameOffset()));
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
-
- callOperation(operationCallEval, regT1);
-
+ callOperationWithCallFrameRollbackOnException(operationCallEval);
addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
sampleCodeBlock(m_codeBlock);
@@ -186,25 +209,10 @@ void JIT::compileCallEval(Instruction* instruction)
void JIT::compileCallEvalSlowCase(Instruction* instruction, Vector<SlowCaseEntry>::iterator& iter)
{
- CallLinkInfo* info = m_codeBlock->addCallLinkInfo();
- info->setUpCall(CallLinkInfo::Call, CodeOrigin(m_bytecodeOffset), regT0);
-
linkSlowCase(iter);
- int registerOffset = -instruction[4].u.operand;
-
- addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
-
- loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT0);
- loadPtr(Address(stackPointerRegister, sizeof(Register) * JSStack::Callee - sizeof(CallerFrameAndPC)), regT1);
- move(TrustedImmPtr(info), regT2);
-
emitLoad(JSStack::Callee, regT1, regT0);
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(m_vm, *info);
- info->setSlowStub(createJITStubRoutine(virtualThunk, *m_vm, nullptr, true));
- emitNakedCall(virtualThunk.code());
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ emitNakedCall(m_vm->getCTIStub(virtualCallThunkGenerator).code());
sampleCodeBlock(m_codeBlock);
@@ -220,18 +228,17 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
- Initializes ArgumentCount; CallerFrame; Callee.
For a JS call:
+ - Caller initializes ScopeChain.
- Callee initializes ReturnPC; CodeBlock.
- Callee restores callFrameRegister before return.
For a non-JS call:
- - Caller initializes ReturnPC; CodeBlock.
+ - Caller initializes ScopeChain; ReturnPC; CodeBlock.
- Caller restores callFrameRegister after return.
*/
- CallLinkInfo* info;
- if (opcodeID != op_call_eval)
- info = m_codeBlock->addCallLinkInfo();
- if (opcodeID == op_call_varargs || opcodeID == op_construct_varargs || opcodeID == op_tail_call_varargs)
- compileSetupVarargsFrame(instruction, info);
+
+ if (opcodeID == op_call_varargs)
+ compileLoadVarargs(instruction);
else {
int argCount = instruction[3].u.operand;
int registerOffset = -instruction[4].u.operand;
@@ -239,55 +246,44 @@ void JIT::compileOpCall(OpcodeID opcodeID, Instruction* instruction, unsigned ca
if (opcodeID == op_call && shouldEmitProfiling()) {
emitLoad(registerOffset + CallFrame::argumentOffsetIncludingThis(0), regT0, regT1);
Jump done = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
- loadPtr(Address(regT1, JSCell::structureIDOffset()), regT1);
- storePtr(regT1, instruction[OPCODE_LENGTH(op_call) - 2].u.arrayProfile->addressOfLastSeenStructureID());
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT1);
+ storePtr(regT1, instruction[6].u.arrayProfile->addressOfLastSeenStructure());
done.link(this);
}
- addPtr(TrustedImm32(registerOffset * sizeof(Register) + sizeof(CallerFrameAndPC)), callFrameRegister, stackPointerRegister);
+ addPtr(TrustedImm32(registerOffset * sizeof(Register)), callFrameRegister, regT3);
- store32(TrustedImm32(argCount), Address(stackPointerRegister, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
- } // SP holds newCallFrame + sizeof(CallerFrameAndPC), with ArgumentCount initialized.
+ store32(TrustedImm32(argCount), payloadFor(JSStack::ArgumentCount, regT3));
+ } // regT3 holds newCallFrame with ArgumentCount initialized.
- uint32_t locationBits = CallSiteIndex(instruction).bits();
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
store32(TrustedImm32(locationBits), tagFor(JSStack::ArgumentCount, callFrameRegister));
emitLoad(callee, regT1, regT0); // regT1, regT0 holds callee.
- store32(regT0, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + PayloadOffset - sizeof(CallerFrameAndPC)));
- store32(regT1, Address(stackPointerRegister, JSStack::Callee * static_cast<int>(sizeof(Register)) + TagOffset - sizeof(CallerFrameAndPC)));
+ storePtr(callFrameRegister, Address(regT3, CallFrame::callerFrameOffset()));
+ emitStore(JSStack::Callee, regT1, regT0, regT3);
+ move(regT3, callFrameRegister);
if (opcodeID == op_call_eval) {
compileCallEval(instruction);
return;
}
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
- emitRestoreCalleeSaves();
-
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
-
DataLabelPtr addressOfLinkedFunctionCheck;
Jump slowCase = branchPtrWithPatch(NotEqual, regT0, addressOfLinkedFunctionCheck, TrustedImmPtr(0));
addSlowCase(slowCase);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- ASSERT(m_callCompilationInfo.size() == callLinkInfoIndex);
- info->setUpCall(CallLinkInfo::callTypeFor(opcodeID), CodeOrigin(m_bytecodeOffset), regT0);
- m_callCompilationInfo.append(CallCompilationInfo());
- m_callCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
- m_callCompilationInfo[callLinkInfoIndex].callLinkInfo = info;
-
- checkStackPointerAlignment();
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
- prepareForTailCallSlow();
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedTailCall();
- return;
- }
-
- m_callCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
+ ASSERT(m_callStructureStubCompilationInfo.size() == callLinkInfoIndex);
+ m_callStructureStubCompilationInfo.append(StructureStubCompilationInfo());
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathBegin = addressOfLinkedFunctionCheck;
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callType = CallLinkInfo::callTypeFor(opcodeID);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].bytecodeIndex = m_bytecodeOffset;
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ loadPtr(Address(regT0, OBJECT_OFFSETOF(JSFunction, m_scope)), regT1);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].hotPathOther = emitNakedCall();
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
@@ -303,25 +299,58 @@ void JIT::compileOpCallSlowCase(OpcodeID opcodeID, Instruction* instruction, Vec
linkSlowCase(iter);
linkSlowCase(iter);
- move(TrustedImmPtr(m_callCompilationInfo[callLinkInfoIndex].callLinkInfo), regT2);
-
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs)
- emitRestoreCalleeSaves();
-
- m_callCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(m_vm->getCTIStub(linkCallThunkGenerator).code());
-
- if (opcodeID == op_tail_call || opcodeID == op_tail_call_varargs) {
- abortWithReason(JITDidReturnFromTailCall);
- return;
- }
-
- addPtr(TrustedImm32(stackPointerOffsetFor(m_codeBlock) * sizeof(Register)), callFrameRegister, stackPointerRegister);
- checkStackPointerAlignment();
+ m_callStructureStubCompilationInfo[callLinkInfoIndex].callReturnLocation = emitNakedCall(opcodeID == op_construct ? m_vm->getCTIStub(linkConstructThunkGenerator).code() : m_vm->getCTIStub(linkCallThunkGenerator).code());
sampleCodeBlock(m_codeBlock);
emitPutCallResult(instruction);
}
+void JIT::privateCompileClosureCall(CallLinkInfo* callLinkInfo, CodeBlock* calleeCodeBlock, Structure* expectedStructure, ExecutableBase* expectedExecutable, MacroAssemblerCodePtr codePtr)
+{
+ JumpList slowCases;
+
+ slowCases.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(expectedStructure)));
+ slowCases.append(branchPtr(NotEqual, Address(regT0, JSFunction::offsetOfExecutable()), TrustedImmPtr(expectedExecutable)));
+
+ loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT1);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
+
+ Call call = nearCall();
+ Jump done = jump();
+
+ slowCases.link(this);
+ move(TrustedImmPtr(callLinkInfo->callReturnLocation.executableAddress()), regT2);
+ restoreReturnAddressBeforeReturn(regT2);
+ Jump slow = jump();
+
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo->hotPathOther.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(m_vm->getCTIStub(virtualCallThunkGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("Baseline closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*m_codeBlock).data(),
+ callLinkInfo->hotPathOther.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(),
+ toCString(pointerDump(calleeCodeBlock)).data())),
+ *m_vm, m_codeBlock->ownerExecutable(), expectedStructure, expectedExecutable,
+ callLinkInfo->codeOrigin));
+
+ RepatchBuffer repatchBuffer(m_codeBlock);
+
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo->hotPathBegin),
+ CodeLocationLabel(stubRoutine->code().code()));
+ repatchBuffer.relink(callLinkInfo->callReturnLocation, m_vm->getCTIStub(virtualCallThunkGenerator).code());
+
+ callLinkInfo->stub = stubRoutine.release();
+}
+
} // namespace JSC
#endif // USE(JSVALUE32_64)
diff --git a/Source/JavaScriptCore/jit/JITCode.cpp b/Source/JavaScriptCore/jit/JITCode.cpp
index 26b69c542..213b7000c 100644
--- a/Source/JavaScriptCore/jit/JITCode.cpp
+++ b/Source/JavaScriptCore/jit/JITCode.cpp
@@ -27,8 +27,7 @@
#include "JITCode.h"
#include "LLIntThunks.h"
-#include "JSCInlines.h"
-#include "ProtoCallFrame.h"
+#include "Operations.h"
#include <wtf/PrintStream.h>
namespace JSC {
@@ -42,42 +41,11 @@ JITCode::~JITCode()
{
}
-const char* JITCode::typeName(JITType jitType)
+JSValue JITCode::execute(VM* vm, ProtoCallFrame* protoCallFrame, Register* topOfStack)
{
- switch (jitType) {
- case None:
- return "None";
- case HostCallThunk:
- return "Host";
- case InterpreterThunk:
- return "LLInt";
- case BaselineJIT:
- return "Baseline";
- case DFGJIT:
- return "DFG";
- case FTLJIT:
- return "FTL";
- default:
- CRASH();
- return "";
- }
-}
-
-void JITCode::validateReferences(const TrackedReferences&)
-{
-}
-
-JSValue JITCode::execute(VM* vm, ProtoCallFrame* protoCallFrame)
-{
- void* entryAddress;
- JSFunction* function = jsDynamicCast<JSFunction*>(protoCallFrame->callee());
+ ASSERT(!vm->topCallFrame || ((Register*)(vm->topCallFrame) >= topOfStack));
- if (!function || !protoCallFrame->needArityCheck()) {
- ASSERT(!protoCallFrame->needArityCheck());
- entryAddress = executableAddress();
- } else
- entryAddress = addressForCall(MustCheckArity).executableAddress();
- JSValue result = JSValue::decode(vmEntryToJavaScript(entryAddress, vm, protoCallFrame));
+ JSValue result = JSValue::decode(callToJavaScript(executableAddress(), &vm->topCallFrame, protoCallFrame, topOfStack));
return vm->exception() ? jsNull() : result;
}
@@ -105,38 +73,52 @@ FTL::ForOSREntryJITCode* JITCode::ftlForOSREntry()
return 0;
}
-JITCodeWithCodeRef::JITCodeWithCodeRef(JITType jitType)
+PassRefPtr<JITCode> JITCode::hostFunction(JITCode::CodeRef code)
+{
+ return adoptRef(new DirectJITCode(code, HostCallThunk));
+}
+
+DirectJITCode::DirectJITCode(JITType jitType)
: JITCode(jitType)
{
}
-JITCodeWithCodeRef::JITCodeWithCodeRef(CodeRef ref, JITType jitType)
+DirectJITCode::DirectJITCode(const JITCode::CodeRef ref, JITType jitType)
: JITCode(jitType)
, m_ref(ref)
{
}
-JITCodeWithCodeRef::~JITCodeWithCodeRef()
+DirectJITCode::~DirectJITCode()
+{
+}
+
+void DirectJITCode::initializeCodeRef(const JITCode::CodeRef ref)
{
- if ((Options::dumpDisassembly() || (isOptimizingJIT(jitType()) && Options::dumpDFGDisassembly()))
- && m_ref.executableMemory())
- dataLog("Destroying JIT code at ", pointerDump(m_ref.executableMemory()), "\n");
+ RELEASE_ASSERT(!m_ref);
+ m_ref = ref;
}
-void* JITCodeWithCodeRef::executableAddressAtOffset(size_t offset)
+JITCode::CodePtr DirectJITCode::addressForCall()
+{
+ RELEASE_ASSERT(m_ref);
+ return m_ref.code();
+}
+
+void* DirectJITCode::executableAddressAtOffset(size_t offset)
{
RELEASE_ASSERT(m_ref);
return reinterpret_cast<char*>(m_ref.code().executableAddress()) + offset;
}
-void* JITCodeWithCodeRef::dataAddressAtOffset(size_t offset)
+void* DirectJITCode::dataAddressAtOffset(size_t offset)
{
RELEASE_ASSERT(m_ref);
ASSERT(offset <= size()); // use <= instead of < because it is valid to ask for an address at the exclusive end of the code.
return reinterpret_cast<char*>(m_ref.code().dataLocation()) + offset;
}
-unsigned JITCodeWithCodeRef::offsetOf(void* pointerIntoCode)
+unsigned DirectJITCode::offsetOf(void* pointerIntoCode)
{
RELEASE_ASSERT(m_ref);
intptr_t result = reinterpret_cast<intptr_t>(pointerIntoCode) - reinterpret_cast<intptr_t>(m_ref.code().executableAddress());
@@ -144,94 +126,47 @@ unsigned JITCodeWithCodeRef::offsetOf(void* pointerIntoCode)
return static_cast<unsigned>(result);
}
-size_t JITCodeWithCodeRef::size()
+size_t DirectJITCode::size()
{
RELEASE_ASSERT(m_ref);
return m_ref.size();
}
-bool JITCodeWithCodeRef::contains(void* address)
+bool DirectJITCode::contains(void* address)
{
RELEASE_ASSERT(m_ref);
return m_ref.executableMemory()->contains(address);
}
-DirectJITCode::DirectJITCode(JITType jitType)
- : JITCodeWithCodeRef(jitType)
-{
-}
-
-DirectJITCode::DirectJITCode(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck, JITType jitType)
- : JITCodeWithCodeRef(ref, jitType)
- , m_withArityCheck(withArityCheck)
-{
-}
-
-DirectJITCode::~DirectJITCode()
-{
-}
-
-void DirectJITCode::initializeCodeRef(JITCode::CodeRef ref, JITCode::CodePtr withArityCheck)
-{
- RELEASE_ASSERT(!m_ref);
- m_ref = ref;
- m_withArityCheck = withArityCheck;
-}
-
-JITCode::CodePtr DirectJITCode::addressForCall(ArityCheckMode arity)
-{
- switch (arity) {
- case ArityCheckNotRequired:
- RELEASE_ASSERT(m_ref);
- return m_ref.code();
- case MustCheckArity:
- RELEASE_ASSERT(m_withArityCheck);
- return m_withArityCheck;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return CodePtr();
-}
-
-NativeJITCode::NativeJITCode(JITType jitType)
- : JITCodeWithCodeRef(jitType)
-{
-}
-
-NativeJITCode::NativeJITCode(CodeRef ref, JITType jitType)
- : JITCodeWithCodeRef(ref, jitType)
-{
-}
-
-NativeJITCode::~NativeJITCode()
-{
-}
-
-void NativeJITCode::initializeCodeRef(CodeRef ref)
-{
- ASSERT(!m_ref);
- m_ref = ref;
-}
-
-JITCode::CodePtr NativeJITCode::addressForCall(ArityCheckMode)
-{
- RELEASE_ASSERT(!!m_ref);
- return m_ref.code();
-}
-
-#if ENABLE(JIT)
-RegisterSet JITCode::liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex)
-{
- return RegisterSet();
-}
-#endif
-
} // namespace JSC
namespace WTF {
void printInternal(PrintStream& out, JSC::JITCode::JITType type)
{
- out.print(JSC::JITCode::typeName(type));
+ switch (type) {
+ case JSC::JITCode::None:
+ out.print("None");
+ return;
+ case JSC::JITCode::HostCallThunk:
+ out.print("Host");
+ return;
+ case JSC::JITCode::InterpreterThunk:
+ out.print("LLInt");
+ return;
+ case JSC::JITCode::BaselineJIT:
+ out.print("Baseline");
+ return;
+ case JSC::JITCode::DFGJIT:
+ out.print("DFG");
+ return;
+ case JSC::JITCode::FTLJIT:
+ out.print("FTL");
+ return;
+ default:
+ CRASH();
+ return;
+ }
}
} // namespace WTF
diff --git a/Source/JavaScriptCore/jit/JITCode.h b/Source/JavaScriptCore/jit/JITCode.h
index 7fb7b3134..52c78111a 100644
--- a/Source/JavaScriptCore/jit/JITCode.h
+++ b/Source/JavaScriptCore/jit/JITCode.h
@@ -26,14 +26,14 @@
#ifndef JITCode_h
#define JITCode_h
-#include "ArityCheckMode.h"
+#if ENABLE(JIT) || ENABLE(LLINT)
#include "CallFrame.h"
-#include "CodeOrigin.h"
#include "Disassembler.h"
+#include "JITStubs.h"
#include "JSCJSValue.h"
+#include "LegacyProfiler.h"
#include "MacroAssemblerCodeRef.h"
-#include "RegisterSet.h"
-#include <wtf/Optional.h>
+#endif
namespace JSC {
@@ -47,7 +47,6 @@ class JITCode;
}
struct ProtoCallFrame;
-class TrackedReferences;
class VM;
class JITCode : public ThreadSafeRefCounted<JITCode> {
@@ -55,17 +54,8 @@ public:
typedef MacroAssemblerCodeRef CodeRef;
typedef MacroAssemblerCodePtr CodePtr;
- enum JITType : uint8_t {
- None,
- HostCallThunk,
- InterpreterThunk,
- BaselineJIT,
- DFGJIT,
- FTLJIT
- };
+ enum JITType { None, HostCallThunk, InterpreterThunk, BaselineJIT, DFGJIT, FTLJIT };
- static const char* typeName(JITType);
-
static JITType bottomTierJIT()
{
return BaselineJIT;
@@ -122,7 +112,7 @@ public:
return false;
}
}
-
+
static bool isLowerTier(JITType expectedLower, JITType expectedHigher)
{
RELEASE_ASSERT(isExecutableScript(expectedLower));
@@ -174,7 +164,7 @@ public:
return jitCode->jitType();
}
- virtual CodePtr addressForCall(ArityCheckMode) = 0;
+ virtual CodePtr addressForCall() = 0;
virtual void* executableAddressAtOffset(size_t offset) = 0;
void* executableAddress() { return executableAddressAtOffset(0); }
virtual void* dataAddressAtOffset(size_t offset) = 0;
@@ -185,9 +175,7 @@ public:
virtual FTL::JITCode* ftl();
virtual FTL::ForOSREntryJITCode* ftlForOSREntry();
- virtual void validateReferences(const TrackedReferences&);
-
- JSValue execute(VM*, ProtoCallFrame*);
+ JSValue execute(VM*, ProtoCallFrame*, Register*);
void* start() { return dataAddressAtOffset(0); }
virtual size_t size() = 0;
@@ -195,56 +183,29 @@ public:
virtual bool contains(void*) = 0;
-#if ENABLE(JIT)
- virtual RegisterSet liveRegistersToPreserveAtExceptionHandlingCallSite(CodeBlock*, CallSiteIndex);
- virtual Optional<CodeOrigin> findPC(CodeBlock*, void* pc) { UNUSED_PARAM(pc); return Nullopt; }
-#endif
+ static PassRefPtr<JITCode> hostFunction(CodeRef);
private:
JITType m_jitType;
};
-class JITCodeWithCodeRef : public JITCode {
-protected:
- JITCodeWithCodeRef(JITType);
- JITCodeWithCodeRef(CodeRef, JITType);
-
+class DirectJITCode : public JITCode {
public:
- virtual ~JITCodeWithCodeRef();
+ DirectJITCode(JITType);
+ DirectJITCode(const CodeRef, JITType);
+ virtual ~DirectJITCode();
+
+ void initializeCodeRef(CodeRef ref);
+ virtual CodePtr addressForCall() override;
virtual void* executableAddressAtOffset(size_t offset) override;
virtual void* dataAddressAtOffset(size_t offset) override;
virtual unsigned offsetOf(void* pointerIntoCode) override;
virtual size_t size() override;
virtual bool contains(void*) override;
-protected:
- CodeRef m_ref;
-};
-
-class DirectJITCode : public JITCodeWithCodeRef {
-public:
- DirectJITCode(JITType);
- DirectJITCode(CodeRef, CodePtr withArityCheck, JITType);
- virtual ~DirectJITCode();
-
- void initializeCodeRef(CodeRef, CodePtr withArityCheck);
-
- virtual CodePtr addressForCall(ArityCheckMode) override;
-
private:
- CodePtr m_withArityCheck;
-};
-
-class NativeJITCode : public JITCodeWithCodeRef {
-public:
- NativeJITCode(JITType);
- NativeJITCode(CodeRef, JITType);
- virtual ~NativeJITCode();
-
- void initializeCodeRef(CodeRef);
-
- virtual CodePtr addressForCall(ArityCheckMode) override;
+ CodeRef m_ref;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITCompilationEffort.h b/Source/JavaScriptCore/jit/JITCompilationEffort.h
index 29e95426a..5eb680178 100644
--- a/Source/JavaScriptCore/jit/JITCompilationEffort.h
+++ b/Source/JavaScriptCore/jit/JITCompilationEffort.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.cpp b/Source/JavaScriptCore/jit/JITDisassembler.cpp
index 04e1b4d49..2d91a6466 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.cpp
+++ b/Source/JavaScriptCore/jit/JITDisassembler.cpp
@@ -26,13 +26,11 @@
#include "config.h"
#include "JITDisassembler.h"
-#if ENABLE(JIT)
+#if ENABLE(JIT) && ENABLE(DISASSEMBLER)
#include "CodeBlock.h"
#include "CodeBlockWithJITType.h"
#include "JIT.h"
-#include "JSCInlines.h"
-#include "LinkBuffer.h"
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -166,5 +164,5 @@ void JITDisassembler::dumpDisassembly(PrintStream& out, LinkBuffer& linkBuffer,
} // namespace JSC
-#endif // ENABLE(JIT)
+#endif // ENABLE(JIT) && ENABLE(DISASSEMBLER)
diff --git a/Source/JavaScriptCore/jit/JITDisassembler.h b/Source/JavaScriptCore/jit/JITDisassembler.h
index 6655de893..7ea13f47d 100644
--- a/Source/JavaScriptCore/jit/JITDisassembler.h
+++ b/Source/JavaScriptCore/jit/JITDisassembler.h
@@ -26,20 +26,20 @@
#ifndef JITDisassembler_h
#define JITDisassembler_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
+#include "LinkBuffer.h"
#include "MacroAssembler.h"
+#include "ProfilerDatabase.h"
#include <wtf/Vector.h>
-#include <wtf/text/CString.h>
namespace JSC {
class CodeBlock;
-class LinkBuffer;
-namespace Profiler {
-class Compilation;
-}
+#if ENABLE(DISASSEMBLER)
class JITDisassembler {
WTF_MAKE_FAST_ALLOCATED;
@@ -86,6 +86,25 @@ private:
MacroAssembler::Label m_endOfCode;
};
+#else // ENABLE(DISASSEMBLER)
+
+class JITDisassembler {
+ WTF_MAKE_FAST_ALLOCATED;
+public:
+ JITDisassembler(CodeBlock*) { }
+
+ void setStartOfCode(MacroAssembler::Label) { }
+ void setForBytecodeMainPath(unsigned, MacroAssembler::Label) { }
+ void setForBytecodeSlowPath(unsigned, MacroAssembler::Label) { }
+ void setEndOfSlowPath(MacroAssembler::Label) { }
+ void setEndOfCode(MacroAssembler::Label) { }
+
+ void dump(LinkBuffer&) { }
+ void reportToProfiler(Profiler::Compilation*, LinkBuffer&) { }
+};
+
+#endif // ENABLE(DISASSEMBLER)
+
} // namespace JSC
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITDivGenerator.cpp b/Source/JavaScriptCore/jit/JITDivGenerator.cpp
deleted file mode 100644
index 6b2a7f286..000000000
--- a/Source/JavaScriptCore/jit/JITDivGenerator.cpp
+++ /dev/null
@@ -1,116 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITDivGenerator.h"
-
-#if ENABLE(JIT)
-
-#include "JSCJSValueInlines.h"
-
-namespace JSC {
-
-void JITDivGenerator::loadOperand(CCallHelpers& jit, SnippetOperand& opr, JSValueRegs oprRegs, FPRReg destFPR)
-{
- if (opr.isConstInt32()) {
- jit.move(CCallHelpers::Imm32(opr.asConstInt32()), m_scratchGPR);
- jit.convertInt32ToDouble(m_scratchGPR, destFPR);
-#if USE(JSVALUE64)
- } else if (opr.isConstDouble()) {
- jit.move(CCallHelpers::Imm64(opr.asRawBits()), m_scratchGPR);
- jit.move64ToDouble(m_scratchGPR, destFPR);
-#endif
- } else {
- if (!opr.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(oprRegs, m_scratchGPR));
- CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(oprRegs);
- jit.convertInt32ToDouble(oprRegs.payloadGPR(), destFPR);
- CCallHelpers::Jump oprIsLoaded = jit.jump();
- notInt32.link(&jit);
- jit.unboxDoubleNonDestructive(oprRegs, destFPR, m_scratchGPR, m_scratchFPR);
- oprIsLoaded.link(&jit);
- }
-}
-
-void JITDivGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(m_scratchGPR != InvalidGPRReg);
- ASSERT(m_scratchGPR != m_left.payloadGPR());
- ASSERT(m_scratchGPR != m_right.payloadGPR());
-#if USE(JSVALUE32_64)
- ASSERT(m_scratchGPR != m_left.tagGPR());
- ASSERT(m_scratchGPR != m_right.tagGPR());
- ASSERT(m_scratchFPR != InvalidFPRReg);
-#endif
-
- ASSERT(!m_didEmitFastPath);
- if (!jit.supportsFloatingPoint())
- return;
- if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber())
- return;
-
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
- m_didEmitFastPath = true;
- loadOperand(jit, m_leftOperand, m_left, m_leftFPR);
- loadOperand(jit, m_rightOperand, m_right, m_rightFPR);
-
- jit.divDouble(m_rightFPR, m_leftFPR);
-
- // Is the result actually an integer? The DFG JIT would really like to know. If it's
- // not an integer, we increment a count. If this together with the slow case counter
- // are below threshold then the DFG JIT will compile this division with a speculation
- // that the remainder is zero.
-
- // As well, there are cases where a double result here would cause an important field
- // in the heap to sometimes have doubles in it, resulting in double predictions getting
- // propagated to a use site where it might cause damage (such as the index to an array
- // access). So if we are DFG compiling anything in the program, we want this code to
- // ensure that it produces integers whenever possible.
-
- CCallHelpers::JumpList notInt32;
- jit.branchConvertDoubleToInt32(m_leftFPR, m_scratchGPR, notInt32, m_scratchFPR);
-
- // If we've got an integer, we might as well make that the result of the division.
- jit.boxInt32(m_scratchGPR, m_result);
- m_endJumpList.append(jit.jump());
-
- notInt32.link(&jit);
-#if USE(JSVALUE64)
- jit.moveDoubleTo64(m_leftFPR, m_scratchGPR);
- CCallHelpers::Jump notDoubleZero = jit.branchTest64(CCallHelpers::NonZero, m_scratchGPR);
-
- jit.move(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
- m_endJumpList.append(jit.jump());
-
- notDoubleZero.link(&jit);
-#endif
- if (m_resultProfile)
- jit.add32(CCallHelpers::TrustedImm32(1), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfSpecialFastPathCount()));
- jit.boxDouble(m_leftFPR, m_result);
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITDivGenerator.h b/Source/JavaScriptCore/jit/JITDivGenerator.h
deleted file mode 100644
index f9911f347..000000000
--- a/Source/JavaScriptCore/jit/JITDivGenerator.h
+++ /dev/null
@@ -1,85 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITDivGenerator_h
-#define JITDivGenerator_h
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "SnippetOperand.h"
-
-namespace JSC {
-
-class JITDivGenerator {
-public:
- JITDivGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right,
- FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR,
- ResultProfile* resultProfile = nullptr)
- : m_leftOperand(leftOperand)
- , m_rightOperand(rightOperand)
- , m_result(result)
- , m_left(left)
- , m_right(right)
- , m_leftFPR(leftFPR)
- , m_rightFPR(rightFPR)
- , m_scratchGPR(scratchGPR)
- , m_scratchFPR(scratchFPR)
- , m_resultProfile(resultProfile)
- {
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
- }
-
- void generateFastPath(CCallHelpers&);
-
- bool didEmitFastPath() const { return m_didEmitFastPath; }
- CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
- CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
-
-private:
- void loadOperand(CCallHelpers&, SnippetOperand&, JSValueRegs opRegs, FPRReg destFPR);
-
- SnippetOperand m_leftOperand;
- SnippetOperand m_rightOperand;
- JSValueRegs m_result;
- JSValueRegs m_left;
- JSValueRegs m_right;
- FPRReg m_leftFPR;
- FPRReg m_rightFPR;
- GPRReg m_scratchGPR;
- FPRReg m_scratchFPR;
- ResultProfile* m_resultProfile;
- bool m_didEmitFastPath { false };
-
- CCallHelpers::JumpList m_endJumpList;
- CCallHelpers::JumpList m_slowPathJumpList;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITDivGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITExceptions.cpp b/Source/JavaScriptCore/jit/JITExceptions.cpp
index cf2ea28af..8084f773b 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.cpp
+++ b/Source/JavaScriptCore/jit/JITExceptions.cpp
@@ -27,53 +27,38 @@
#include "JITExceptions.h"
#include "CallFrame.h"
+#include "CallFrameInlines.h"
#include "CodeBlock.h"
#include "Interpreter.h"
+#include "JITStubs.h"
#include "JSCJSValue.h"
#include "LLIntData.h"
#include "LLIntOpcode.h"
#include "LLIntThunks.h"
#include "Opcode.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "VM.h"
namespace JSC {
-void genericUnwind(VM* vm, ExecState* callFrame, UnwindStart unwindStart)
+void genericUnwind(VM* vm, ExecState* callFrame, JSValue exceptionValue)
{
- if (Options::breakOnThrow()) {
- CodeBlock* codeBlock = callFrame->codeBlock();
- if (codeBlock)
- dataLog("In call frame ", RawPointer(callFrame), " for code block ", *codeBlock, "\n");
- else
- dataLog("In call frame ", RawPointer(callFrame), " with null CodeBlock\n");
- CRASH();
- }
-
- Exception* exception = vm->exception();
- RELEASE_ASSERT(exception);
- HandlerInfo* handler = vm->interpreter->unwind(*vm, callFrame, exception, unwindStart); // This may update callFrame.
+ RELEASE_ASSERT(exceptionValue);
+ HandlerInfo* handler = vm->interpreter->unwind(callFrame, exceptionValue); // This may update callFrame.
void* catchRoutine;
Instruction* catchPCForInterpreter = 0;
if (handler) {
- // handler->target is meaningless for getting a code offset when catching
- // the exception in a DFG/FTL frame. This bytecode target offset could be
- // something that's in an inlined frame, which means an array access
- // with this bytecode offset in the machine frame is utterly meaningless
- // and can cause an overflow. OSR exit properly exits to handler->target
- // in the proper frame.
- if (!JITCode::isOptimizingJIT(callFrame->codeBlock()->jitType()))
- catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
+ catchPCForInterpreter = &callFrame->codeBlock()->instructions()[handler->target];
#if ENABLE(JIT)
catchRoutine = handler->nativeCode.executableAddress();
#else
catchRoutine = catchPCForInterpreter->u.pointer;
#endif
} else
- catchRoutine = LLInt::getCodePtr(handleUncaughtException);
+ catchRoutine = LLInt::getCodePtr(returnFromJavaScript);
- vm->callFrameForCatch = callFrame;
+ vm->callFrameForThrow = callFrame;
vm->targetMachinePCForThrow = catchRoutine;
vm->targetInterpreterPCForThrow = catchPCForInterpreter;
diff --git a/Source/JavaScriptCore/jit/JITExceptions.h b/Source/JavaScriptCore/jit/JITExceptions.h
index 3ccac84c5..376e269f1 100644
--- a/Source/JavaScriptCore/jit/JITExceptions.h
+++ b/Source/JavaScriptCore/jit/JITExceptions.h
@@ -26,17 +26,20 @@
#ifndef JITExceptions_h
#define JITExceptions_h
-#include "Interpreter.h"
#include "JSCJSValue.h"
+#if ENABLE(JIT) || ENABLE(LLINT)
+
namespace JSC {
class ExecState;
class VM;
-void genericUnwind(VM*, ExecState*, UnwindStart = UnwindFromCurrentFrame);
+void genericUnwind(VM*, ExecState*, JSValue exceptionValue);
} // namespace JSC
+#endif // ENABLE(JIT) || ENABLE(LLINT)
+
#endif // JITExceptions_h
diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
index 3ca56f65d..74b086a7c 100644
--- a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
+++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,38 +30,42 @@
#include "CodeBlock.h"
#include "LinkBuffer.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC {
static StructureStubInfo* garbageStubInfo()
{
- static StructureStubInfo* stubInfo = new StructureStubInfo(AccessType::Get);
+ static StructureStubInfo* stubInfo = new StructureStubInfo();
return stubInfo;
}
-JITInlineCacheGenerator::JITInlineCacheGenerator(
- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType)
+JITInlineCacheGenerator::JITInlineCacheGenerator(CodeBlock* codeBlock, CodeOrigin codeOrigin)
: m_codeBlock(codeBlock)
{
- m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo(accessType) : garbageStubInfo();
+ m_stubInfo = m_codeBlock ? m_codeBlock->addStubInfo() : garbageStubInfo();
m_stubInfo->codeOrigin = codeOrigin;
- m_stubInfo->callSiteIndex = callSite;
}
JITByIdGenerator::JITByIdGenerator(
- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, AccessType accessType,
- const RegisterSet& usedRegisters, JSValueRegs base, JSValueRegs value)
- : JITInlineCacheGenerator(codeBlock, codeOrigin, callSite, accessType)
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
+ GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value, bool registersFlushed)
+ : JITInlineCacheGenerator(codeBlock, codeOrigin)
, m_base(base)
, m_value(value)
{
+ m_stubInfo->patch.registersFlushed = registersFlushed;
m_stubInfo->patch.usedRegisters = usedRegisters;
+ // This is a convenience - in cases where the only registers you're using are base/value,
+ // it allows you to pass RegisterSet() as the usedRegisters argument.
+ m_stubInfo->patch.usedRegisters.set(base);
+ m_stubInfo->patch.usedRegisters.set(value);
+
+ m_stubInfo->patch.callFrameRegister = static_cast<int8_t>(callFrameRegister);
m_stubInfo->patch.baseGPR = static_cast<int8_t>(base.payloadGPR());
m_stubInfo->patch.valueGPR = static_cast<int8_t>(value.payloadGPR());
#if USE(JSVALUE32_64)
- m_stubInfo->patch.baseTagGPR = static_cast<int8_t>(base.tagGPR());
m_stubInfo->patch.valueTagGPR = static_cast<int8_t>(value.tagGPR());
#endif
}
@@ -87,6 +91,8 @@ void JITByIdGenerator::finalize(LinkBuffer& fastPath, LinkBuffer& slowPath)
callReturnLocation, slowPath.locationOf(m_slowPathBegin));
m_stubInfo->patch.deltaCallToDone = MacroAssembler::differenceBetweenCodePtr(
callReturnLocation, fastPath.locationOf(m_done));
+ m_stubInfo->patch.deltaCallToStorageLoad = MacroAssembler::differenceBetweenCodePtr(
+ callReturnLocation, fastPath.locationOf(m_propertyStorageLoad));
}
void JITByIdGenerator::finalize(LinkBuffer& linkBuffer)
@@ -94,46 +100,42 @@ void JITByIdGenerator::finalize(LinkBuffer& linkBuffer)
finalize(linkBuffer, linkBuffer);
}
-void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit)
+void JITByIdGenerator::generateFastPathChecks(MacroAssembler& jit, GPRReg butterfly)
{
- m_structureCheck = jit.patchableBranch32WithPatch(
+ m_structureCheck = jit.patchableBranchPtrWithPatch(
MacroAssembler::NotEqual,
- MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureIDOffset()),
- m_structureImm, MacroAssembler::TrustedImm32(0));
-}
-
-JITGetByIdGenerator::JITGetByIdGenerator(
- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
- JSValueRegs base, JSValueRegs value)
- : JITByIdGenerator(
- codeBlock, codeOrigin, callSite, AccessType::Get, usedRegisters, base, value)
-{
- RELEASE_ASSERT(base.payloadGPR() != value.tagGPR());
+ MacroAssembler::Address(m_base.payloadGPR(), JSCell::structureOffset()),
+ m_structureImm, MacroAssembler::TrustedImmPtr(reinterpret_cast<void*>(unusedPointer)));
+
+ m_propertyStorageLoad = jit.convertibleLoadPtr(
+ MacroAssembler::Address(m_base.payloadGPR(), JSObject::butterflyOffset()), butterfly);
}
void JITGetByIdGenerator::generateFastPath(MacroAssembler& jit)
{
- generateFastPathChecks(jit);
+ generateFastPathChecks(jit, m_value.payloadGPR());
#if USE(JSVALUE64)
m_loadOrStore = jit.load64WithCompactAddressOffsetPatch(
- MacroAssembler::Address(m_base.payloadGPR(), 0), m_value.payloadGPR()).label();
+ MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
#else
m_tagLoadOrStore = jit.load32WithCompactAddressOffsetPatch(
- MacroAssembler::Address(m_base.payloadGPR(), 0), m_value.tagGPR()).label();
+ MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.tagGPR()).label();
m_loadOrStore = jit.load32WithCompactAddressOffsetPatch(
- MacroAssembler::Address(m_base.payloadGPR(), 0), m_value.payloadGPR()).label();
+ MacroAssembler::Address(m_value.payloadGPR(), 0), m_value.payloadGPR()).label();
#endif
m_done = jit.label();
}
JITPutByIdGenerator::JITPutByIdGenerator(
- CodeBlock* codeBlock, CodeOrigin codeOrigin, CallSiteIndex callSite, const RegisterSet& usedRegisters,
- JSValueRegs base, JSValueRegs value, GPRReg scratch,
- ECMAMode ecmaMode, PutKind putKind)
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
+ GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value, GPRReg scratch,
+ bool registersFlushed, ECMAMode ecmaMode, PutKind putKind)
: JITByIdGenerator(
- codeBlock, codeOrigin, callSite, AccessType::Put, usedRegisters, base, value)
+ codeBlock, codeOrigin, usedRegisters, callFrameRegister, base, value,
+ registersFlushed)
+ , m_scratch(scratch)
, m_ecmaMode(ecmaMode)
, m_putKind(putKind)
{
@@ -142,16 +144,16 @@ JITPutByIdGenerator::JITPutByIdGenerator(
void JITPutByIdGenerator::generateFastPath(MacroAssembler& jit)
{
- generateFastPathChecks(jit);
+ generateFastPathChecks(jit, m_scratch);
#if USE(JSVALUE64)
m_loadOrStore = jit.store64WithAddressOffsetPatch(
- m_value.payloadGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
+ m_value.payloadGPR(), MacroAssembler::Address(m_scratch, 0)).label();
#else
m_tagLoadOrStore = jit.store32WithAddressOffsetPatch(
- m_value.tagGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
+ m_value.tagGPR(), MacroAssembler::Address(m_scratch, 0)).label();
m_loadOrStore = jit.store32WithAddressOffsetPatch(
- m_value.payloadGPR(), MacroAssembler::Address(m_base.payloadGPR(), 0)).label();
+ m_value.payloadGPR(), MacroAssembler::Address(m_scratch, 0)).label();
#endif
m_done = jit.label();
diff --git a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
index 0ada0f8b4..6ff0c09b5 100644
--- a/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
+++ b/Source/JavaScriptCore/jit/JITInlineCacheGenerator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -33,7 +33,6 @@
#include "JSCJSValue.h"
#include "PutKind.h"
#include "RegisterSet.h"
-#include "StructureStubInfo.h"
namespace JSC {
@@ -42,7 +41,7 @@ class CodeBlock;
class JITInlineCacheGenerator {
protected:
JITInlineCacheGenerator() { }
- JITInlineCacheGenerator(CodeBlock*, CodeOrigin, CallSiteIndex, AccessType);
+ JITInlineCacheGenerator(CodeBlock*, CodeOrigin);
public:
StructureStubInfo* stubInfo() const { return m_stubInfo; }
@@ -57,8 +56,8 @@ protected:
JITByIdGenerator() { }
JITByIdGenerator(
- CodeBlock*, CodeOrigin, CallSiteIndex, AccessType, const RegisterSet&, JSValueRegs base,
- JSValueRegs value);
+ CodeBlock*, CodeOrigin, const RegisterSet&, GPRReg callFrameRegister,
+ JSValueRegs base, JSValueRegs value, bool registersFlushed);
public:
void reportSlowPathCall(MacroAssembler::Label slowPathBegin, MacroAssembler::Call call)
@@ -74,13 +73,14 @@ public:
void finalize(LinkBuffer&);
protected:
- void generateFastPathChecks(MacroAssembler&);
+ void generateFastPathChecks(MacroAssembler&, GPRReg butterfly);
JSValueRegs m_base;
JSValueRegs m_value;
- MacroAssembler::DataLabel32 m_structureImm;
+ MacroAssembler::DataLabelPtr m_structureImm;
MacroAssembler::PatchableJump m_structureCheck;
+ MacroAssembler::ConvertibleLoadLabel m_propertyStorageLoad;
AssemblerLabel m_loadOrStore;
#if USE(JSVALUE32_64)
AssemblerLabel m_tagLoadOrStore;
@@ -95,8 +95,14 @@ public:
JITGetByIdGenerator() { }
JITGetByIdGenerator(
- CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base,
- JSValueRegs value);
+ CodeBlock* codeBlock, CodeOrigin codeOrigin, const RegisterSet& usedRegisters,
+ GPRReg callFrameRegister, JSValueRegs base, JSValueRegs value,
+ bool registersFlushed)
+ : JITByIdGenerator(
+ codeBlock, codeOrigin, usedRegisters, callFrameRegister, base, value,
+ registersFlushed)
+ {
+ }
void generateFastPath(MacroAssembler&);
};
@@ -106,14 +112,16 @@ public:
JITPutByIdGenerator() { }
JITPutByIdGenerator(
- CodeBlock*, CodeOrigin, CallSiteIndex, const RegisterSet& usedRegisters, JSValueRegs base,
- JSValueRegs, GPRReg scratch, ECMAMode, PutKind);
+ CodeBlock*, CodeOrigin, const RegisterSet& usedRegisters, GPRReg callFrameRegister,
+ JSValueRegs base, JSValueRegs value, GPRReg scratch, bool registersFlushed,
+ ECMAMode, PutKind);
void generateFastPath(MacroAssembler&);
V_JITOperation_ESsiJJI slowPathFunction();
private:
+ GPRReg m_scratch;
ECMAMode m_ecmaMode;
PutKind m_putKind;
};
diff --git a/Source/JavaScriptCore/jit/JITInlines.h b/Source/JavaScriptCore/jit/JITInlines.h
index 77adc02df..9330e773e 100644
--- a/Source/JavaScriptCore/jit/JITInlines.h
+++ b/Source/JavaScriptCore/jit/JITInlines.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2012, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,58 +26,14 @@
#ifndef JITInlines_h
#define JITInlines_h
+
#if ENABLE(JIT)
-#include "JSCInlines.h"
+#include "CallFrameInlines.h"
namespace JSC {
-#if USE(JSVALUE64)
-inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
-{
- JumpList slowCases = emitDoubleLoad(instruction, badType);
- moveDoubleTo64(fpRegT0, regT0);
- sub64(tagTypeNumberRegister, regT0);
- return slowCases;
-}
-#else
-inline MacroAssembler::JumpList JIT::emitDoubleGetByVal(Instruction* instruction, PatchableJump& badType)
-{
- JumpList slowCases = emitDoubleLoad(instruction, badType);
- moveDoubleToInts(fpRegT0, regT0, regT1);
- return slowCases;
-}
-#endif // USE(JSVALUE64)
-
-ALWAYS_INLINE MacroAssembler::JumpList JIT::emitLoadForArrayMode(Instruction* currentInstruction, JITArrayMode arrayMode, PatchableJump& badType)
-{
- switch (arrayMode) {
- case JITInt32:
- return emitInt32Load(currentInstruction, badType);
- case JITDouble:
- return emitDoubleLoad(currentInstruction, badType);
- case JITContiguous:
- return emitContiguousLoad(currentInstruction, badType);
- case JITArrayStorage:
- return emitArrayStorageLoad(currentInstruction, badType);
- default:
- break;
- }
- RELEASE_ASSERT_NOT_REACHED();
- return MacroAssembler::JumpList();
-}
-
-inline MacroAssembler::JumpList JIT::emitContiguousGetByVal(Instruction* instruction, PatchableJump& badType, IndexingType expectedShape)
-{
- return emitContiguousLoad(instruction, badType, expectedShape);
-}
-
-inline MacroAssembler::JumpList JIT::emitArrayStorageGetByVal(Instruction* instruction, PatchableJump& badType)
-{
- return emitArrayStorageLoad(instruction, badType);
-}
-
-ALWAYS_INLINE bool JIT::isOperandConstantDouble(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateDouble(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isDouble();
}
@@ -98,9 +54,26 @@ ALWAYS_INLINE void JIT::emitPutIntToCallFrameHeader(RegisterID from, JSStack::Ca
#endif
}
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ loadPtr(Address(from, entry * sizeof(Register)), to);
+}
+
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load32(Address(from, entry * sizeof(Register)), to);
+}
+
+#if USE(JSVALUE64)
+ALWAYS_INLINE void JIT::emitGetFromCallFrameHeader64(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
+{
+ load64(Address(from, entry * sizeof(Register)), to);
+}
+#endif
+
ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst, JumpList& failures)
{
- failures.append(branchStructure(NotEqual, Address(src, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
+ failures.append(branchPtr(NotEqual, Address(src, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
failures.append(branch32(NotEqual, MacroAssembler::Address(src, ThunkHelpers::jsStringLengthOffset()), TrustedImm32(1)));
loadPtr(MacroAssembler::Address(src, ThunkHelpers::jsStringValueOffset()), dst);
failures.append(branchTest32(Zero, dst));
@@ -119,16 +92,9 @@ ALWAYS_INLINE void JIT::emitLoadCharacterString(RegisterID src, RegisterID dst,
ALWAYS_INLINE JIT::Call JIT::emitNakedCall(CodePtr function)
{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- Call nakedCall = nearCall();
- m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
- return nakedCall;
-}
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
-ALWAYS_INLINE JIT::Call JIT::emitNakedTailCall(CodePtr function)
-{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
- Call nakedCall = nearTailCall();
+ Call nakedCall = nearCall();
m_calls.append(CallRecord(nakedCall, m_bytecodeOffset, function.executableAddress()));
return nakedCall;
}
@@ -138,9 +104,9 @@ ALWAYS_INLINE void JIT::updateTopCallFrame()
ASSERT(static_cast<int>(m_bytecodeOffset) >= 0);
#if USE(JSVALUE32_64)
Instruction* instruction = m_codeBlock->instructions().begin() + m_bytecodeOffset + 1;
- uint32_t locationBits = CallSiteIndex(instruction).bits();
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeInstruction(instruction);
#else
- uint32_t locationBits = CallSiteIndex(m_bytecodeOffset + 1).bits();
+ uint32_t locationBits = CallFrame::Location::encodeAsBytecodeOffset(m_bytecodeOffset + 1);
#endif
store32(TrustedImm32(locationBits), intTagFor(JSStack::ArgumentCount));
storePtr(callFrameRegister, &m_vm->topCallFrame);
@@ -154,16 +120,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheck(const Funct
return call;
}
-#if OS(WINDOWS) && CPU(X86_64)
-ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckAndSlowPathReturnType(const FunctionPtr& function)
-{
- updateTopCallFrame();
- MacroAssembler::Call call = appendCallWithSlowPathReturnType(function);
- exceptionCheck();
- return call;
-}
-#endif
-
ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithCallFrameRollbackOnException(const FunctionPtr& function)
{
updateTopCallFrame(); // The callee is responsible for setting topCallFrame to their caller
@@ -195,49 +151,12 @@ ALWAYS_INLINE MacroAssembler::Call JIT::appendCallWithExceptionCheckSetJSValueRe
return call;
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_E operation)
-{
- setupArgumentsExecState();
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(Z_JITOperation_E operation)
-{
- setupArgumentsExecState();
- updateTopCallFrame();
- return appendCall(operation);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_E operation)
{
setupArgumentsExecState();
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJsc operation, GPRReg arg1)
-{
- setupArgumentsWithExecState(arg1);
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EJscZ operation, GPRReg arg1, int32_t arg2)
-{
- setupArgumentsWithExecState(arg1, TrustedImm32(arg2));
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, GPRReg arg1)
-{
- setupArgumentsWithExecState(arg1);
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EL operation, TrustedImmPtr arg1)
-{
- setupArgumentsWithExecState(arg1);
- return appendCallWithExceptionCheck(operation);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(C_JITOperation_EO operation, GPRReg arg)
{
setupArgumentsWithExecState(arg);
@@ -286,18 +205,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EC operatio
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscC operation, int dst, GPRReg arg1, JSCell* cell)
-{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(cell));
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2)
-{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), arg2);
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EP operation, int dst, void* pointer)
{
setupArgumentsWithExecState(TrustedImmPtr(pointer));
@@ -310,22 +217,16 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(WithProfileTag, J_JITOpera
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EPc operation, int dst, Instruction* bytecodePC)
-{
- setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZ operation, int dst, int32_t arg)
{
setupArgumentsWithExecState(TrustedImm32(arg));
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZZ operation, int dst, int32_t arg1, int32_t arg2)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(P_JITOperation_EZ operation, int32_t op)
{
- setupArgumentsWithExecState(TrustedImm32(arg1), TrustedImm32(arg2));
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
+ setupArgumentsWithExecState(TrustedImm32(op));
+ return appendCallWithExceptionCheck(operation);
}
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_ECC operation, RegisterID regOp1, RegisterID regOp2)
@@ -340,17 +241,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EOJss opera
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Sprt_JITOperation_EZ operation, int32_t op)
-{
-#if OS(WINDOWS) && CPU(X86_64)
- setupArgumentsWithExecStateForCallWithSlowPathReturnType(TrustedImm32(op));
- return appendCallWithExceptionCheckAndSlowPathReturnType(operation);
-#else
- setupArgumentsWithExecState(TrustedImm32(op));
- return appendCallWithExceptionCheck(operation);
-#endif
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_E operation)
{
setupArgumentsExecState();
@@ -369,13 +259,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECC operati
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EE operation, RegisterID regOp)
-{
- setupArgumentsWithExecState(regOp);
- updateTopCallFrame();
- return appendCallWithExceptionCheck(operation);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EPc operation, Instruction* bytecodePC)
{
setupArgumentsWithExecState(TrustedImmPtr(bytecodePC));
@@ -406,26 +289,21 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationWithCallFrameRollbackOnExce
return appendCallWithCallFrameRollbackOnException(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4)
-{
- setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4);
- return appendCallWithExceptionCheck(operation);
-}
#if USE(JSVALUE64)
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1, int32_t arg2, int32_t arg3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1, int32_t arg3)
{
- setupArgumentsWithExecState(arg1, TrustedImm32(arg2), TrustedImm32(arg3));
+ setupArgumentsWithExecState(arg1, TrustedImm32(arg3));
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2, int32_t arg3, GPRReg arg4)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
{
- setupArgumentsWithExecState(arg1, arg2, TrustedImm32(arg3), arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, UniquedStringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1, RegisterID regOp2, StringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1, regOp2, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -437,25 +315,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operat
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ArrayProfile* arrayProfile)
-{
- setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(arrayProfile));
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1, RegisterID regOp2, RegisterID regOp3, ByValInfo* byValInfo)
-{
- setupArgumentsWithExecState(regOp1, regOp2, regOp3, TrustedImmPtr(byValInfo));
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, GPRReg arg)
-{
- setupArgumentsWithExecState(TrustedImm32(dst), arg);
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, UniquedStringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1, StringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
@@ -491,24 +351,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operati
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1, GPRReg arg2, ArrayProfile* arrayProfile)
-{
- setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(arrayProfile));
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1, GPRReg arg2, ByValInfo* byValInfo)
-{
- setupArgumentsWithExecState(arg1, arg2, TrustedImmPtr(byValInfo));
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1, GPRReg arg2, GPRReg arg3)
-{
- setupArgumentsWithExecState(arg1, arg2, arg3);
- return appendCallWithExceptionCheck(operation);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOperation_EJ operation, GPRReg arg1)
{
setupArgumentsWithExecState(arg1);
@@ -534,15 +376,9 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operati
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2, int32_t op3)
{
- setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EZSymtabJ operation, int op1, SymbolTable* symbolTable, RegisterID regOp3)
-{
- setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), regOp3);
+ setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2, TrustedImm32(op3));
return appendCallWithExceptionCheck(operation);
}
@@ -552,15 +388,9 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZJJ operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJIdJJ operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
{
- setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID regOp1, RegisterID regOp2, int32_t op3, RegisterID regOp4)
-{
- setupArgumentsWithExecState(regOp1, regOp2, TrustedImm32(op3), regOp4);
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
return appendCallWithExceptionCheck(operation);
}
@@ -602,21 +432,20 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperationNoExceptionCheck(V_JITOpera
return appendCall(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJOJ operation, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2, GPRReg arg3Tag, GPRReg arg3Payload)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EJZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2)
{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, arg2, EABI_32BIT_DUMMY_ARG arg3Payload, arg3Tag);
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(Z_JITOperation_EJZZ operation, GPRReg arg1Tag, GPRReg arg1Payload, int32_t arg2, int32_t arg3)
-{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2), TrustedImm32(arg3));
+#if CPU(SH4)
+ // We have to put arg3 in the 4th argument register (r7) as 64-bit value arg2 will be put on stack for sh4 architecure.
+ setupArgumentsWithExecState(arg1Payload, arg1Tag, TrustedImm32(arg2));
+#else
+ setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, TrustedImm32(arg2));
+#endif
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJZZ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, int32_t arg3, GPRReg arg4)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(F_JITOperation_EFJJ operation, GPRReg arg1, GPRReg arg2Tag, GPRReg arg2Payload, GPRReg arg3Tag, GPRReg arg3Payload)
{
- setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
+ setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, arg3Payload, arg3Tag);
return appendCallWithExceptionCheck(operation);
}
@@ -632,7 +461,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJ operatio
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, UniquedStringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_ESsiJI operation, int dst, StructureStubInfo* stubInfo, GPRReg arg1Tag, GPRReg arg1Payload, StringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), arg1Payload, arg1Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheckSetJSValueResultWithProfile(operation, dst);
@@ -650,18 +479,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJ operati
return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJAp operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ArrayProfile* arrayProfile)
-{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag, TrustedImmPtr(arrayProfile));
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJJBy operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload, ByValInfo* byValInfo)
-{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag, TrustedImmPtr(byValInfo));
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(JIT::WithProfileTag, J_JITOperation_EJJ operation, int dst, GPRReg arg1Tag, GPRReg arg1Payload, GPRReg arg2Tag, GPRReg arg2Payload)
{
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG arg1Payload, arg1Tag, SH4_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
@@ -686,15 +503,9 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(S_JITOperation_EJJ operati
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECIZCC operation, RegisterID regOp1, UniquedStringImpl* identOp2, int32_t op3, RegisterID regOp4, RegisterID regOp5)
-{
- setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), TrustedImm32(op3), regOp4, regOp5);
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECJZC operation, RegisterID arg1, RegisterID arg2Tag, RegisterID arg2Payload, int32_t arg3, RegisterID arg4)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ECICC operation, RegisterID regOp1, const Identifier* identOp2, RegisterID regOp3, RegisterID regOp4)
{
- setupArgumentsWithExecState(arg1, arg2Payload, arg2Tag, TrustedImm32(arg3), arg4);
+ setupArgumentsWithExecState(regOp1, TrustedImmPtr(identOp2), regOp3, regOp4);
return appendCallWithExceptionCheck(operation);
}
@@ -704,13 +515,13 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJ operatio
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZSymtabJ operation, int32_t op1, SymbolTable* symbolTable, RegisterID regOp3Tag, RegisterID regOp3Payload)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EIdJZ operation, const Identifier* identOp1, RegisterID regOp2Tag, RegisterID regOp2Payload, int32_t op3)
{
- setupArgumentsWithExecState(TrustedImm32(op1), TrustedImmPtr(symbolTable), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
+ setupArgumentsWithExecState(TrustedImmPtr(identOp1), regOp2Payload, regOp2Tag, TrustedImm32(op3));
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, UniquedStringImpl* uid)
+ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_ESsiJJI operation, StructureStubInfo* stubInfo, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, StringImpl* uid)
{
setupArgumentsWithExecState(TrustedImmPtr(stubInfo), regOp1Payload, regOp1Tag, regOp2Payload, regOp2Tag, TrustedImmPtr(uid));
return appendCallWithExceptionCheck(operation);
@@ -722,24 +533,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJ operat
return appendCallWithExceptionCheck(operation);
}
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJAp operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ArrayProfile* arrayProfile)
-{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(arrayProfile));
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJJJBy operation, RegisterID regOp1Tag, RegisterID regOp1Payload, RegisterID regOp2Tag, RegisterID regOp2Payload, RegisterID regOp3Tag, RegisterID regOp3Payload, ByValInfo* byValInfo)
-{
- setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, SH4_32BIT_DUMMY_ARG regOp2Payload, regOp2Tag, regOp3Payload, regOp3Tag, TrustedImmPtr(byValInfo));
- return appendCallWithExceptionCheck(operation);
-}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EZJ operation, int dst, RegisterID regOp1Tag, RegisterID regOp1Payload)
-{
- setupArgumentsWithExecState(TrustedImm32(dst), regOp1Payload, regOp1Tag);
- return appendCallWithExceptionCheck(operation);
-}
-
ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZ operation, RegisterID regOp1Tag, RegisterID regOp1Payload, int32_t op2)
{
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2));
@@ -751,12 +544,6 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(V_JITOperation_EJZJ operat
setupArgumentsWithExecState(EABI_32BIT_DUMMY_ARG regOp1Payload, regOp1Tag, TrustedImm32(op2), EABI_32BIT_DUMMY_ARG regOp3Payload, regOp3Tag);
return appendCallWithExceptionCheck(operation);
}
-
-ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ operation, int dst, GPRReg arg1, JSCell* cell, GPRReg arg2Tag, GPRReg arg2Payload)
-{
- setupArgumentsWithExecState(arg1, TrustedImmPtr(cell), EABI_32BIT_DUMMY_ARG arg2Payload, arg2Tag);
- return appendCallWithExceptionCheckSetJSValueResult(operation, dst);
-}
#undef EABI_32BIT_DUMMY_ARG
#undef SH4_32BIT_DUMMY_ARG
@@ -765,7 +552,7 @@ ALWAYS_INLINE MacroAssembler::Call JIT::callOperation(J_JITOperation_EJscCJ oper
ALWAYS_INLINE JIT::Jump JIT::checkStructure(RegisterID reg, Structure* structure)
{
- return branchStructure(NotEqual, Address(reg, JSCell::structureIDOffset()), structure);
+ return branchPtr(NotEqual, Address(reg, JSCell::structureOffset()), TrustedImmPtr(structure));
}
ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator& iter, int vReg)
@@ -774,24 +561,16 @@ ALWAYS_INLINE void JIT::linkSlowCaseIfNotJSCell(Vector<SlowCaseEntry>::iterator&
linkSlowCase(iter);
}
-ALWAYS_INLINE void JIT::linkAllSlowCasesForBytecodeOffset(Vector<SlowCaseEntry>& slowCases, Vector<SlowCaseEntry>::iterator& iter, unsigned bytecodeOffset)
-{
- while (iter != slowCases.end() && iter->to == bytecodeOffset) {
- iter->from.link(this);
- ++iter;
- }
-}
-
ALWAYS_INLINE void JIT::addSlowCase(Jump jump)
{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_slowCases.append(SlowCaseEntry(jump, m_bytecodeOffset));
}
ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
const JumpList::JumpVector& jumpVector = jumpList.jumps();
size_t size = jumpVector.size();
@@ -801,7 +580,7 @@ ALWAYS_INLINE void JIT::addSlowCase(JumpList jumpList)
ALWAYS_INLINE void JIT::addSlowCase()
{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
Jump emptyJump; // Doing it this way to make Windows happy.
m_slowCases.append(SlowCaseEntry(emptyJump, m_bytecodeOffset));
@@ -809,26 +588,21 @@ ALWAYS_INLINE void JIT::addSlowCase()
ALWAYS_INLINE void JIT::addJump(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
m_jmpTable.append(JumpTable(jump, m_bytecodeOffset + relativeOffset));
}
ALWAYS_INLINE void JIT::emitJumpSlowToHot(Jump jump, int relativeOffset)
{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
jump.linkTo(m_labels[m_bytecodeOffset + relativeOffset], this);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellObject(RegisterID cellReg)
-{
- return branch8(AboveOrEqual, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
-}
-
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfCellNotObject(RegisterID cellReg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotObject(RegisterID structureReg)
{
- return branch8(Below, Address(cellReg, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType));
+ return branch8(Below, Address(structureReg, Structure::typeInfoTypeOffset()), TrustedImm32(ObjectType));
}
#if ENABLE(SAMPLING_FLAGS)
@@ -884,7 +658,7 @@ ALWAYS_INLINE void JIT::sampleCodeBlock(CodeBlock* codeBlock)
#endif
#endif
-ALWAYS_INLINE bool JIT::isOperandConstantChar(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateChar(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isString() && asString(getConstantOperand(src).asCell())->length() == 1;
}
@@ -892,22 +666,18 @@ ALWAYS_INLINE bool JIT::isOperandConstantChar(int src)
template<typename StructureType>
inline void JIT::emitAllocateJSObject(RegisterID allocator, StructureType structure, RegisterID result, RegisterID scratch)
{
- if (Options::forceGCSlowPaths())
- addSlowCase(jump());
- else {
- loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
- addSlowCase(branchTestPtr(Zero, result));
- }
+ loadPtr(Address(allocator, MarkedAllocator::offsetOfFreeListHead()), result);
+ addSlowCase(branchTestPtr(Zero, result));
// remove the object from the free list
loadPtr(Address(result), scratch);
storePtr(scratch, Address(allocator, MarkedAllocator::offsetOfFreeListHead()));
+ // initialize the object's structure
+ storePtr(structure, Address(result, JSCell::structureOffset()));
+
// initialize the object's property storage pointer
storePtr(TrustedImmPtr(0), Address(result, JSObject::butterflyOffset()));
-
- // initialize the object's structure
- emitStoreStructureWithTypeInfo(structure, result, scratch);
}
inline void JIT::emitValueProfilingSite(ValueProfile* valueProfile)
@@ -943,19 +713,22 @@ inline void JIT::emitValueProfilingSite()
emitValueProfilingSite(m_bytecodeOffset);
}
-inline void JIT::emitArrayProfilingSiteWithCell(RegisterID cell, RegisterID indexingType, ArrayProfile* arrayProfile)
+inline void JIT::emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile* arrayProfile)
{
- if (shouldEmitProfiling()) {
- load32(MacroAssembler::Address(cell, JSCell::structureIDOffset()), indexingType);
- store32(indexingType, arrayProfile->addressOfLastSeenStructureID());
- }
+ UNUSED_PARAM(scratch); // We had found this scratch register useful here before, so I will keep it for now.
+
+ RegisterID structure = structureAndIndexingType;
+ RegisterID indexingType = structureAndIndexingType;
+
+ if (shouldEmitProfiling())
+ storePtr(structure, arrayProfile->addressOfLastSeenStructure());
- load8(Address(cell, JSCell::indexingTypeOffset()), indexingType);
+ load8(Address(structure, Structure::indexingTypeOffset()), indexingType);
}
-inline void JIT::emitArrayProfilingSiteForBytecodeIndexWithCell(RegisterID cell, RegisterID indexingType, unsigned bytecodeIndex)
+inline void JIT::emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex)
{
- emitArrayProfilingSiteWithCell(cell, indexingType, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
+ emitArrayProfilingSite(structureAndIndexingType, scratch, m_codeBlock->getOrAddArrayProfile(bytecodeIndex));
}
inline void JIT::emitArrayProfileStoreToHoleSpecialCase(ArrayProfile* arrayProfile)
@@ -987,16 +760,6 @@ inline JITArrayMode JIT::chooseArrayMode(ArrayProfile* profile)
return JITContiguous;
}
-ALWAYS_INLINE int32_t JIT::getOperandConstantInt(int src)
-{
- return getConstantOperand(src).asInt32();
-}
-
-ALWAYS_INLINE double JIT::getOperandConstantDouble(int src)
-{
- return getConstantOperand(src).asDouble();
-}
-
#if USE(JSVALUE32_64)
inline void JIT::emitLoadTag(int index, RegisterID tag)
@@ -1025,16 +788,6 @@ inline void JIT::emitLoad(const JSValue& v, RegisterID tag, RegisterID payload)
move(Imm32(v.tag()), tag);
}
-ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
-{
- emitLoad(src, dst.tagGPR(), dst.payloadGPR());
-}
-
-ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
-{
- emitStore(dst, from.tagGPR(), from.payloadGPR());
-}
-
inline void JIT::emitLoad(int index, RegisterID tag, RegisterID payload, RegisterID base)
{
RELEASE_ASSERT(tag != payload);
@@ -1066,7 +819,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(TrustedImmPtr(&inConstantPool), value);
+ loadDouble(&inConstantPool, value);
} else
loadDouble(addressFor(index), value);
}
@@ -1151,20 +904,20 @@ inline void JIT::emitJumpSlowCaseIfNotJSCell(int virtualRegisterIndex, RegisterI
}
}
-ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(int src)
{
return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
-ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t& constant)
+ALWAYS_INLINE bool JIT::getOperandConstantImmediateInt(int op1, int op2, int& op, int32_t& constant)
{
- if (isOperandConstantInt(op1)) {
+ if (isOperandConstantImmediateInt(op1)) {
constant = getConstantOperand(op1).asInt32();
op = op2;
return true;
}
- if (isOperandConstantInt(op2)) {
+ if (isOperandConstantImmediateInt(op2)) {
constant = getConstantOperand(op2).asInt32();
op = op1;
return true;
@@ -1178,7 +931,7 @@ ALWAYS_INLINE bool JIT::getOperandConstantInt(int op1, int op2, int& op, int32_t
// get arg puts an arg from the SF register array into a h/w register
ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
{
- ASSERT(m_bytecodeOffset != std::numeric_limits<unsigned>::max()); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
+ ASSERT(m_bytecodeOffset != (unsigned)-1); // This method should only be called during hot/cold path generation, so that m_bytecodeOffset is set.
// TODO: we want to reuse values that are already in registers if we can - add a register allocator!
if (m_codeBlock->isConstantRegisterIndex(src)) {
@@ -1193,11 +946,6 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, RegisterID dst)
load64(Address(callFrameRegister, src * sizeof(Register)), dst);
}
-ALWAYS_INLINE void JIT::emitGetVirtualRegister(int src, JSValueRegs dst)
-{
- emitGetVirtualRegister(src, dst.payloadGPR());
-}
-
ALWAYS_INLINE void JIT::emitGetVirtualRegister(VirtualRegister src, RegisterID dst)
{
emitGetVirtualRegister(src.offset(), dst);
@@ -1214,19 +962,19 @@ ALWAYS_INLINE void JIT::emitGetVirtualRegisters(VirtualRegister src1, RegisterID
emitGetVirtualRegisters(src1.offset(), dst1, src2.offset(), dst2);
}
-ALWAYS_INLINE bool JIT::isOperandConstantInt(int src)
+ALWAYS_INLINE int32_t JIT::getConstantOperandImmediateInt(int src)
{
- return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
+ return getConstantOperand(src).asInt32();
}
-ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
+ALWAYS_INLINE bool JIT::isOperandConstantImmediateInt(int src)
{
- store64(from, Address(callFrameRegister, dst * sizeof(Register)));
+ return m_codeBlock->isConstantRegisterIndex(src) && getConstantOperand(src).isInt32();
}
-ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, JSValueRegs from)
+ALWAYS_INLINE void JIT::emitPutVirtualRegister(int dst, RegisterID from)
{
- emitPutVirtualRegister(dst, from.payloadGPR());
+ store64(from, Address(callFrameRegister, dst * sizeof(Register)));
}
ALWAYS_INLINE void JIT::emitPutVirtualRegister(VirtualRegister dst, RegisterID from)
@@ -1271,7 +1019,7 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
WriteBarrier<Unknown>& inConstantPool = m_codeBlock->constantRegister(index);
- loadDouble(TrustedImmPtr(&inConstantPool), value);
+ loadDouble(&inConstantPool, value);
} else
loadDouble(addressFor(index), value);
}
@@ -1279,50 +1027,50 @@ inline void JIT::emitLoadDouble(int index, FPRegisterID value)
inline void JIT::emitLoadInt32ToDouble(int index, FPRegisterID value)
{
if (m_codeBlock->isConstantRegisterIndex(index)) {
- ASSERT(isOperandConstantInt(index));
+ ASSERT(isOperandConstantImmediateInt(index));
convertInt32ToDouble(Imm32(getConstantOperand(index).asInt32()), value);
} else
convertInt32ToDouble(addressFor(index), value);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfInt(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfImmediateInteger(RegisterID reg)
{
return branch64(AboveOrEqual, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateInteger(RegisterID reg)
{
return branch64(Below, reg, tagTypeNumberRegister);
}
-ALWAYS_INLINE JIT::PatchableJump JIT::emitPatchableJumpIfNotInt(RegisterID reg)
+ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
- return patchableBranch64(Below, reg, tagTypeNumberRegister);
+ move(reg1, scratch);
+ and64(reg2, scratch);
+ return emitJumpIfNotImmediateInteger(scratch);
}
-ALWAYS_INLINE JIT::Jump JIT::emitJumpIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateInteger(RegisterID reg)
{
- move(reg1, scratch);
- and64(reg2, scratch);
- return emitJumpIfNotInt(scratch);
+ addSlowCase(emitJumpIfNotImmediateInteger(reg));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateIntegers(RegisterID reg1, RegisterID reg2, RegisterID scratch)
{
- addSlowCase(emitJumpIfNotInt(reg));
+ addSlowCase(emitJumpIfNotImmediateIntegers(reg1, reg2, scratch));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotInt(RegisterID reg1, RegisterID reg2, RegisterID scratch)
+ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotImmediateNumber(RegisterID reg)
{
- addSlowCase(emitJumpIfNotInt(reg1, reg2, scratch));
+ addSlowCase(emitJumpIfNotImmediateNumber(reg));
}
-ALWAYS_INLINE void JIT::emitJumpSlowCaseIfNotNumber(RegisterID reg)
+ALWAYS_INLINE void JIT::emitFastArithReTagImmediate(RegisterID src, RegisterID dest)
{
- addSlowCase(emitJumpIfNotNumber(reg));
+ emitFastArithIntToImmNoCheck(src, dest);
}
-ALWAYS_INLINE void JIT::emitTagBool(RegisterID reg)
+ALWAYS_INLINE void JIT::emitTagAsBoolImmediate(RegisterID reg)
{
or32(TrustedImm32(static_cast<int32_t>(ValueFalse)), reg);
}
diff --git a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp
deleted file mode 100644
index 1ddaa6ab1..000000000
--- a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.cpp
+++ /dev/null
@@ -1,84 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITLeftShiftGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITLeftShiftGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(m_scratchGPR != InvalidGPRReg);
- ASSERT(m_scratchGPR != m_left.payloadGPR());
- ASSERT(m_scratchGPR != m_right.payloadGPR());
-#if USE(JSVALUE32_64)
- ASSERT(m_scratchGPR != m_left.tagGPR());
- ASSERT(m_scratchGPR != m_right.tagGPR());
-#endif
-
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
-
- m_didEmitFastPath = true;
-
- if (m_rightOperand.isConstInt32()) {
- // Try to do (intVar << intConstant).
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
-
- jit.moveValueRegs(m_left, m_result);
- jit.lshift32(CCallHelpers::Imm32(m_rightOperand.asConstInt32()), m_result.payloadGPR());
-
- } else {
- // Try to do (intConstant << intVar) or (intVar << intVar).
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
-
- GPRReg rightOperandGPR = m_right.payloadGPR();
- if (rightOperandGPR == m_result.payloadGPR()) {
- jit.move(rightOperandGPR, m_scratchGPR);
- rightOperandGPR = m_scratchGPR;
- }
-
- if (m_leftOperand.isConstInt32()) {
-#if USE(JSVALUE32_64)
- jit.move(m_right.tagGPR(), m_result.tagGPR());
-#endif
- jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR());
- } else {
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_left));
- jit.moveValueRegs(m_left, m_result);
- }
-
- jit.lshift32(rightOperandGPR, m_result.payloadGPR());
- }
-
-#if USE(JSVALUE64)
- jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
-#endif
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h b/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h
deleted file mode 100644
index 633bcb3b1..000000000
--- a/Source/JavaScriptCore/jit/JITLeftShiftGenerator.h
+++ /dev/null
@@ -1,49 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITLeftShiftGenerator_h
-#define JITLeftShiftGenerator_h
-
-#if ENABLE(JIT)
-
-#include "JITBitBinaryOpGenerator.h"
-
-namespace JSC {
-
-class JITLeftShiftGenerator : public JITBitBinaryOpGenerator {
-public:
- JITLeftShiftGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right, GPRReg scratchGPR)
- : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
- { }
-
- void generateFastPath(CCallHelpers&);
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITLeftShiftGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITMulGenerator.cpp b/Source/JavaScriptCore/jit/JITMulGenerator.cpp
deleted file mode 100644
index b1fb0b0d0..000000000
--- a/Source/JavaScriptCore/jit/JITMulGenerator.cpp
+++ /dev/null
@@ -1,198 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITMulGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITMulGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(m_scratchGPR != InvalidGPRReg);
- ASSERT(m_scratchGPR != m_left.payloadGPR());
- ASSERT(m_scratchGPR != m_right.payloadGPR());
-#if USE(JSVALUE64)
- ASSERT(m_scratchGPR != m_result.payloadGPR());
-#else
- ASSERT(m_scratchGPR != m_left.tagGPR());
- ASSERT(m_scratchGPR != m_right.tagGPR());
- ASSERT(m_scratchFPR != InvalidFPRReg);
-#endif
-
- ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
-
- if (!m_leftOperand.mightBeNumber() || !m_rightOperand.mightBeNumber()) {
- ASSERT(!m_didEmitFastPath);
- return;
- }
-
- m_didEmitFastPath = true;
-
- if (m_leftOperand.isPositiveConstInt32() || m_rightOperand.isPositiveConstInt32()) {
- JSValueRegs var = m_leftOperand.isPositiveConstInt32() ? m_right : m_left;
- SnippetOperand& varOpr = m_leftOperand.isPositiveConstInt32() ? m_rightOperand : m_leftOperand;
- SnippetOperand& constOpr = m_leftOperand.isPositiveConstInt32() ? m_leftOperand : m_rightOperand;
-
- // Try to do intVar * intConstant.
- CCallHelpers::Jump notInt32 = jit.branchIfNotInt32(var);
-
- GPRReg multiplyResultGPR = m_result.payloadGPR();
- if (multiplyResultGPR == var.payloadGPR())
- multiplyResultGPR = m_scratchGPR;
-
- m_slowPathJumpList.append(jit.branchMul32(CCallHelpers::Overflow, var.payloadGPR(), CCallHelpers::Imm32(constOpr.asConstInt32()), multiplyResultGPR));
-
- jit.boxInt32(multiplyResultGPR, m_result);
- m_endJumpList.append(jit.jump());
-
- if (!jit.supportsFloatingPoint()) {
- m_slowPathJumpList.append(notInt32);
- return;
- }
-
- // Try to do doubleVar * double(intConstant).
- notInt32.link(&jit);
- if (!varOpr.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(var, m_scratchGPR));
-
- jit.unboxDoubleNonDestructive(var, m_leftFPR, m_scratchGPR, m_scratchFPR);
-
- jit.move(CCallHelpers::Imm32(constOpr.asConstInt32()), m_scratchGPR);
- jit.convertInt32ToDouble(m_scratchGPR, m_rightFPR);
-
- // Fall thru to doubleVar * doubleVar.
-
- } else {
- ASSERT(!m_leftOperand.isPositiveConstInt32() && !m_rightOperand.isPositiveConstInt32());
-
- CCallHelpers::Jump leftNotInt;
- CCallHelpers::Jump rightNotInt;
-
- // Try to do intVar * intVar.
- leftNotInt = jit.branchIfNotInt32(m_left);
- rightNotInt = jit.branchIfNotInt32(m_right);
-
- m_slowPathJumpList.append(jit.branchMul32(CCallHelpers::Overflow, m_right.payloadGPR(), m_left.payloadGPR(), m_scratchGPR));
- m_slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_scratchGPR)); // Go slow if potential negative zero.
-
- jit.boxInt32(m_scratchGPR, m_result);
- m_endJumpList.append(jit.jump());
-
- if (!jit.supportsFloatingPoint()) {
- m_slowPathJumpList.append(leftNotInt);
- m_slowPathJumpList.append(rightNotInt);
- return;
- }
-
- leftNotInt.link(&jit);
- if (!m_leftOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
- if (!m_rightOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
-
- jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
- CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
-
- jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
- CCallHelpers::Jump rightWasInteger = jit.jump();
-
- rightNotInt.link(&jit);
- if (!m_rightOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
-
- jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
-
- rightIsDouble.link(&jit);
- jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
-
- rightWasInteger.link(&jit);
-
- // Fall thru to doubleVar * doubleVar.
- }
-
- // Do doubleVar * doubleVar.
- jit.mulDouble(m_rightFPR, m_leftFPR);
-
- if (!m_resultProfile)
- jit.boxDouble(m_leftFPR, m_result);
- else {
- // The Int52 overflow check below intentionally omits 1ll << 51 as a valid negative Int52 value.
- // Therefore, we will get a false positive if the result is that value. This is intentionally
- // done to simplify the checking algorithm.
-
- const int64_t negativeZeroBits = 1ll << 63;
-#if USE(JSVALUE64)
- jit.moveDoubleTo64(m_leftFPR, m_result.payloadGPR());
- CCallHelpers::Jump notNegativeZero = jit.branch64(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm64(negativeZeroBits));
-
- jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
- CCallHelpers::Jump done = jit.jump();
-
- notNegativeZero.link(&jit);
- jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
-
- jit.move(m_result.payloadGPR(), m_scratchGPR);
- jit.urshiftPtr(CCallHelpers::Imm32(52), m_scratchGPR);
- jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
- CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
-
- jit.or32(CCallHelpers::TrustedImm32(ResultProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
- noInt52Overflow.link(&jit);
-
- done.link(&jit);
- jit.sub64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR()); // Box the double.
-#else
- jit.boxDouble(m_leftFPR, m_result);
- CCallHelpers::JumpList notNegativeZero;
- notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.payloadGPR(), CCallHelpers::TrustedImm32(0)));
- notNegativeZero.append(jit.branch32(CCallHelpers::NotEqual, m_result.tagGPR(), CCallHelpers::TrustedImm32(negativeZeroBits >> 32)));
-
- jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
- CCallHelpers::Jump done = jit.jump();
-
- notNegativeZero.link(&jit);
- jit.or32(CCallHelpers::TrustedImm32(ResultProfile::NonNegZeroDouble), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
-
- jit.move(m_result.tagGPR(), m_scratchGPR);
- jit.urshiftPtr(CCallHelpers::Imm32(52 - 32), m_scratchGPR);
- jit.and32(CCallHelpers::Imm32(0x7ff), m_scratchGPR);
- CCallHelpers::Jump noInt52Overflow = jit.branch32(CCallHelpers::LessThanOrEqual, m_scratchGPR, CCallHelpers::TrustedImm32(0x431));
-
- jit.or32(CCallHelpers::TrustedImm32(ResultProfile::Int52Overflow), CCallHelpers::AbsoluteAddress(m_resultProfile->addressOfFlags()));
-
- m_endJumpList.append(noInt52Overflow);
- if (m_scratchGPR == m_result.tagGPR() || m_scratchGPR == m_result.payloadGPR())
- jit.boxDouble(m_leftFPR, m_result);
-
- m_endJumpList.append(done);
-#endif
- }
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITMulGenerator.h b/Source/JavaScriptCore/jit/JITMulGenerator.h
deleted file mode 100644
index faa033bc6..000000000
--- a/Source/JavaScriptCore/jit/JITMulGenerator.h
+++ /dev/null
@@ -1,83 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITMulGenerator_h
-#define JITMulGenerator_h
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "SnippetOperand.h"
-
-namespace JSC {
-
-class JITMulGenerator {
-public:
- JITMulGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right,
- FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR,
- ResultProfile* resultProfile = nullptr)
- : m_leftOperand(leftOperand)
- , m_rightOperand(rightOperand)
- , m_result(result)
- , m_left(left)
- , m_right(right)
- , m_leftFPR(leftFPR)
- , m_rightFPR(rightFPR)
- , m_scratchGPR(scratchGPR)
- , m_scratchFPR(scratchFPR)
- , m_resultProfile(resultProfile)
- {
- ASSERT(!m_leftOperand.isPositiveConstInt32() || !m_rightOperand.isPositiveConstInt32());
- }
-
- void generateFastPath(CCallHelpers&);
-
- bool didEmitFastPath() const { return m_didEmitFastPath; }
- CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
- CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
-
-private:
- SnippetOperand m_leftOperand;
- SnippetOperand m_rightOperand;
- JSValueRegs m_result;
- JSValueRegs m_left;
- JSValueRegs m_right;
- FPRReg m_leftFPR;
- FPRReg m_rightFPR;
- GPRReg m_scratchGPR;
- FPRReg m_scratchFPR;
- ResultProfile* m_resultProfile;
- bool m_didEmitFastPath { false };
-
- CCallHelpers::JumpList m_endJumpList;
- CCallHelpers::JumpList m_slowPathJumpList;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITMulGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITNegGenerator.cpp b/Source/JavaScriptCore/jit/JITNegGenerator.cpp
deleted file mode 100644
index c6851676d..000000000
--- a/Source/JavaScriptCore/jit/JITNegGenerator.cpp
+++ /dev/null
@@ -1,72 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITNegGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITNegGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(m_scratchGPR != m_src.payloadGPR());
- ASSERT(m_scratchGPR != m_result.payloadGPR());
- ASSERT(m_scratchGPR != InvalidGPRReg);
-#if USE(JSVALUE32_64)
- ASSERT(m_scratchGPR != m_src.tagGPR());
- ASSERT(m_scratchGPR != m_result.tagGPR());
-#endif
-
- m_didEmitFastPath = true;
-
- jit.moveValueRegs(m_src, m_result);
- CCallHelpers::Jump srcNotInt = jit.branchIfNotInt32(m_src);
-
- // -0 should produce a double, and hence cannot be negated as an int.
- // The negative int32 0x80000000 doesn't have a positive int32 representation, and hence cannot be negated as an int.
- m_slowPathJumpList.append(jit.branchTest32(CCallHelpers::Zero, m_src.payloadGPR(), CCallHelpers::TrustedImm32(0x7fffffff)));
-
- jit.neg32(m_result.payloadGPR());
-#if USE(JSVALUE64)
- jit.boxInt32(m_result.payloadGPR(), m_result);
-#endif
- m_endJumpList.append(jit.jump());
-
- srcNotInt.link(&jit);
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_src, m_scratchGPR));
-
- // For a double, all we need to do is to invert the sign bit.
-#if USE(JSVALUE64)
- jit.move(CCallHelpers::TrustedImm64((int64_t)(1ull << 63)), m_scratchGPR);
- jit.xor64(m_scratchGPR, m_result.payloadGPR());
-#else
- jit.xor32(CCallHelpers::TrustedImm32(1 << 31), m_result.tagGPR());
-#endif
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp
index 738cb63fe..2bdae1914 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -28,23 +28,18 @@
#if ENABLE(JIT)
#include "JIT.h"
-#include "BasicBlockLocation.h"
+#include "Arguments.h"
#include "CopiedSpaceInlines.h"
#include "Debugger.h"
-#include "Exception.h"
#include "Heap.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSCell.h"
#include "JSFunction.h"
-#include "JSPropertyNameEnumerator.h"
+#include "JSPropertyNameIterator.h"
#include "LinkBuffer.h"
-#include "MaxFrameExtentForSlowPathCall.h"
#include "SlowPathCall.h"
-#include "TypeLocation.h"
-#include "TypeProfilerLog.h"
#include "VirtualRegister.h"
-#include "Watchdog.h"
namespace JSC {
@@ -64,13 +59,21 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
+void JIT::emit_op_captured_mov(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitGetVirtualRegister(src, regT0);
+ emitNotifyWrite(regT0, regT1, currentInstruction[3].u.watchpointSet);
+ emitPutVirtualRegister(dst);
+}
void JIT::emit_op_end(Instruction* currentInstruction)
{
RELEASE_ASSERT(returnValueGPR != callFrameRegister);
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
- emitRestoreCalleeSaves();
- emitFunctionEpilogue();
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset()));
ret();
}
@@ -104,29 +107,18 @@ void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCas
emitStoreCell(dst, returnValueGPR);
}
-void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
- int constructor = currentInstruction[2].u.operand;
- int hasInstanceValue = currentInstruction[3].u.operand;
-
- emitGetVirtualRegister(hasInstanceValue, regT0);
+ int baseVal = currentInstruction[3].u.operand;
- // We don't jump if we know what Symbol.hasInstance would do.
- Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
+ emitGetVirtualRegister(baseVal, regT0);
- emitGetVirtualRegister(constructor, regT0);
-
- // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
- test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
- emitTagBool(regT0);
- Jump done = jump();
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(regT0, baseVal);
- customhasInstanceValue.link(this);
- move(TrustedImm32(ValueTrue), regT0);
-
- done.link(this);
- emitPutVirtualRegister(dst);
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -140,12 +132,13 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT2);
emitGetVirtualRegister(proto, regT1);
- // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
emitJumpSlowCaseIfNotJSCell(regT2, value);
emitJumpSlowCaseIfNotJSCell(regT1, proto);
// Check that prototype is an object
- addSlowCase(emitJumpIfCellNotObject(regT1));
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(emitJumpIfNotObject(regT3));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -155,7 +148,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Load the prototype of the object in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- emitLoadStructure(regT2, regT2, regT3);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
load64(Address(regT2, Structure::prototypeOffset()), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
emitJumpIfJSCell(regT2).linkTo(loop, this);
@@ -168,12 +161,6 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_instanceof_custom(Instruction*)
-{
- // This always goes to slow path since we expect it to be rare.
- addSlowCase(jump());
-}
-
void JIT::emit_op_is_undefined(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -186,19 +173,19 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
Jump done = jump();
isCell.link(this);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump notMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
- emitLoadStructure(regT0, regT1, regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
comparePtr(Equal, regT0, regT1, regT0);
notMasqueradesAsUndefined.link(this);
done.link(this);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -210,7 +197,7 @@ void JIT::emit_op_is_boolean(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT0);
xor64(TrustedImm32(static_cast<int32_t>(ValueFalse)), regT0);
test64(Zero, regT0, TrustedImm32(static_cast<int32_t>(~1)), regT0);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -221,7 +208,7 @@ void JIT::emit_op_is_number(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT0);
test64(NonZero, regT0, tagTypeNumberRegister, regT0);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -233,8 +220,9 @@ void JIT::emit_op_is_string(Instruction* currentInstruction)
emitGetVirtualRegister(value, regT0);
Jump isNotCell = emitJumpIfNotJSCell(regT0);
- compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
- emitTagBool(regT0);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
+ emitTagAsBoolImmediate(regT0);
Jump done = jump();
isNotCell.link(this);
@@ -244,26 +232,48 @@ void JIT::emit_op_is_string(Instruction* currentInstruction)
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_is_object(Instruction* currentInstruction)
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
+ int activation = currentInstruction[1].u.operand;
+ Jump activationNotCreated = branchTest64(Zero, addressFor(activation));
+ emitGetVirtualRegister(activation, regT0);
+ callOperation(operationTearOffActivation, regT0);
+ activationNotCreated.link(this);
+}
- emitGetVirtualRegister(value, regT0);
- Jump isNotCell = emitJumpIfNotJSCell(regT0);
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
+{
+ int arguments = currentInstruction[1].u.operand;
+ int activation = currentInstruction[2].u.operand;
- compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
- emitTagBool(regT0);
- Jump done = jump();
+ Jump argsNotCreated = branchTest64(Zero, Address(callFrameRegister, sizeof(Register) * (unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset())));
+ emitGetVirtualRegister(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0);
+ emitGetVirtualRegister(activation, regT1);
+ callOperation(operationTearOffArguments, regT0, regT1);
+ argsNotCreated.link(this);
+}
- isNotCell.link(this);
- move(TrustedImm32(ValueFalse), regT0);
+void JIT::emit_op_ret(Instruction* currentInstruction)
+{
+ ASSERT(callFrameRegister != regT1);
+ ASSERT(regT1 != returnValueGPR);
+ ASSERT(returnValueGPR != callFrameRegister);
- done.link(this);
- emitPutVirtualRegister(dst);
+ // Return the result in %eax.
+ emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
+
+ // Grab the return address.
+ emitGetReturnPCFromCallFrameHeaderPtr(regT1);
+
+ // Restore our caller's "r".
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
}
-void JIT::emit_op_ret(Instruction* currentInstruction)
+void JIT::emit_op_ret_object_or_this(Instruction* currentInstruction)
{
ASSERT(callFrameRegister != regT1);
ASSERT(regT1 != returnValueGPR);
@@ -271,10 +281,33 @@ void JIT::emit_op_ret(Instruction* currentInstruction)
// Return the result in %eax.
emitGetVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
+ Jump notJSCell = emitJumpIfNotJSCell(returnValueGPR);
+ loadPtr(Address(returnValueGPR, JSCell::structureOffset()), regT2);
+ Jump notObject = emitJumpIfNotObject(regT2);
+
+ // Grab the return address.
+ emitGetReturnPCFromCallFrameHeaderPtr(regT1);
- checkStackPointerAlignment();
- emitRestoreCalleeSaves();
- emitFunctionEpilogue();
+ // Restore our caller's "r".
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
+ ret();
+
+ // Return 'this' in %eax.
+ notJSCell.link(this);
+ notObject.link(this);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, returnValueGPR);
+
+ // Grab the return address.
+ emitGetReturnPCFromCallFrameHeaderPtr(regT1);
+
+ // Restore our caller's "r".
+ emitGetCallerFrameFromCallFrameHeaderPtr(callFrameRegister);
+
+ // Return.
+ restoreReturnAddressBeforeReturn(regT1);
ret();
}
@@ -286,7 +319,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitGetVirtualRegister(src, regT0);
Jump isImm = emitJumpIfNotJSCell(regT0);
- addSlowCase(emitJumpIfCellObject(regT0));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
isImm.link(this);
if (dst != src)
@@ -320,7 +353,7 @@ void JIT::emit_op_jfalse(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0)))), target);
- Jump isNonZero = emitJumpIfInt(regT0);
+ Jump isNonZero = emitJumpIfImmediateInteger(regT0);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))), target);
addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))));
@@ -337,8 +370,8 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
- emitLoadStructure(regT0, regT2, regT1);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump masqueradesGlobalObjectIsForeign = jump();
@@ -360,8 +393,8 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
Jump isImmediate = emitJumpIfNotJSCell(regT0);
// First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
- addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
- emitLoadStructure(regT0, regT2, regT1);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
@@ -387,9 +420,9 @@ void JIT::emit_op_jneq_ptr(Instruction* currentInstruction)
void JIT::emit_op_eq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
compare32(Equal, regT1, regT0, regT0);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
@@ -399,7 +432,7 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
Jump isZero = branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNumber(0))));
- addJump(emitJumpIfInt(regT0), target);
+ addJump(emitJumpIfImmediateInteger(regT0), target);
addJump(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsBoolean(true)))), target);
addSlowCase(branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsBoolean(false)))));
@@ -410,27 +443,150 @@ void JIT::emit_op_jtrue(Instruction* currentInstruction)
void JIT::emit_op_neq(Instruction* currentInstruction)
{
emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
- emitJumpSlowCaseIfNotInt(regT0, regT1, regT2);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
compare32(NotEqual, regT1, regT0, regT0);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
+
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
+
+}
+void JIT::emit_op_bitxor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ xor64(regT1, regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
emitPutVirtualRegister(currentInstruction[1].u.operand);
+}
+void JIT::emit_op_bitor(Instruction* currentInstruction)
+{
+ emitGetVirtualRegisters(currentInstruction[2].u.operand, regT0, currentInstruction[3].u.operand, regT1);
+ emitJumpSlowCaseIfNotImmediateIntegers(regT0, regT1, regT2);
+ or64(regT1, regT0);
+ emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emit_op_throw(Instruction* currentInstruction)
{
ASSERT(regT0 == returnValueGPR);
- copyCalleeSavesToVMCalleeSavesBuffer();
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
callOperationNoExceptionCheck(operationThrow, regT0);
jumpToExceptionHandler();
}
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitGetVirtualRegister(base, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(emitJumpIfNotJSCell(regT0));
+ if (base != m_codeBlock->thisRegister().offset() || m_codeBlock->isStrictMode()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(emitJumpIfNotObject(regT2));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ callOperation(operationGetPNames, regT0);
+ emitStoreCell(dst, returnValueGPR);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store64(tagTypeNumberRegister, addressFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, intPayloadFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ move(regT0, regT1);
+ and32(TrustedImm32(~TagBitUndefined), regT1);
+ addJump(branch32(Equal, regT1, TrustedImm32(ValueNull)), breakTarget);
+ callOperation(operationToObject, base, regT0);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
+}
+
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
+
+ JumpList callHasProperty;
+
+ Label begin(this);
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
+
+ // Grab key @ i
+ loadPtr(addressFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+
+ load64(BaseIndex(regT2, regT0, TimesEight), regT2);
+
+ emitPutVirtualRegister(dst, regT2);
+
+ // Increment i
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
+
+ // Verify that i is valid:
+ emitGetVirtualRegister(base, regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ load64(Address(regT2, Structure::prototypeOffset()), regT2);
+ callHasProperty.append(emitJumpIfNotJSCell(regT2));
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ emitGetVirtualRegister(dst, regT1);
+ callOperation(operationHasProperty, regT0, regT1);
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
+}
+
void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope);
- slowPathCall.call();
+ emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
+ callOperation(operationPushWithScope, regT0);
+}
+
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ callOperation(operationPopScope);
}
void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqType type)
@@ -448,18 +604,18 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
// Jump slow if either is a double. First test if it's an integer, which is fine, and then test
// if it's a double.
- Jump leftOK = emitJumpIfInt(regT0);
- addSlowCase(emitJumpIfNumber(regT0));
+ Jump leftOK = emitJumpIfImmediateInteger(regT0);
+ addSlowCase(emitJumpIfImmediateNumber(regT0));
leftOK.link(this);
- Jump rightOK = emitJumpIfInt(regT1);
- addSlowCase(emitJumpIfNumber(regT1));
+ Jump rightOK = emitJumpIfImmediateInteger(regT1);
+ addSlowCase(emitJumpIfImmediateNumber(regT1));
rightOK.link(this);
if (type == OpStrictEq)
compare64(Equal, regT1, regT0, regT0);
else
compare64(NotEqual, regT1, regT0, regT0);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -479,64 +635,24 @@ void JIT::emit_op_to_number(Instruction* currentInstruction)
int srcVReg = currentInstruction[2].u.operand;
emitGetVirtualRegister(srcVReg, regT0);
- addSlowCase(emitJumpIfNotNumber(regT0));
+ addSlowCase(emitJumpIfNotImmediateNumber(regT0));
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
-void JIT::emit_op_to_string(Instruction* currentInstruction)
+void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
{
- int srcVReg = currentInstruction[2].u.operand;
- emitGetVirtualRegister(srcVReg, regT0);
-
- addSlowCase(emitJumpIfNotJSCell(regT0));
- addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
-
- emitPutVirtualRegister(currentInstruction[1].u.operand);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT0, currentInstruction[3].u.operand);
}
void JIT::emit_op_catch(Instruction* currentInstruction)
{
- restoreCalleeSavesFromVMCalleeSavesBuffer();
-
- move(TrustedImmPtr(m_vm), regT3);
- load64(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
- storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
-
- addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
-
- callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
- Jump isCatchableException = branchTest32(Zero, returnValueGPR);
- jumpToExceptionHandler();
- isCatchableException.link(this);
-
move(TrustedImmPtr(m_vm), regT3);
+ load64(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister);
load64(Address(regT3, VM::exceptionOffset()), regT0);
store64(TrustedImm64(JSValue::encode(JSValue())), Address(regT3, VM::exceptionOffset()));
emitPutVirtualRegister(currentInstruction[1].u.operand);
-
- load64(Address(regT0, Exception::valueOffset()), regT0);
- emitPutVirtualRegister(currentInstruction[2].u.operand);
-}
-
-void JIT::emit_op_assert(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert);
- slowPathCall.call();
-}
-
-void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment);
- slowPathCall.call();
-}
-
-void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
-{
- int currentScope = currentInstruction[2].u.operand;
- emitGetVirtualRegister(currentScope, regT0);
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- emitStoreCell(currentInstruction[1].u.operand, regT0);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
@@ -548,7 +664,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ensureCTITable();
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
emitGetVirtualRegister(scrutinee, regT0);
callOperation(operationSwitchImmWithUnknownKeyType, regT0, tableIndex);
@@ -564,7 +680,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ensureCTITable();
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
emitGetVirtualRegister(scrutinee, regT0);
callOperation(operationSwitchCharWithUnknownKeyType, regT0, tableIndex);
@@ -608,12 +724,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
- emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(Equal, regT0, regT2, regT0);
@@ -627,7 +743,7 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
@@ -640,12 +756,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitGetVirtualRegister(src1, regT0);
Jump isImmediate = emitJumpIfNotJSCell(regT0);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT0);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
- emitLoadStructure(regT0, regT2, regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
comparePtr(NotEqual, regT0, regT2, regT0);
@@ -659,30 +775,53 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
wasNotImmediate.link(this);
wasNotMasqueradesAsUndefined.link(this);
- emitTagBool(regT0);
+ emitTagAsBoolImmediate(regT0);
emitPutVirtualRegister(dst);
}
-void JIT::emit_op_enter(Instruction*)
+void JIT::emit_op_enter(Instruction* currentInstruction)
{
+ emitEnterOptimizationCheck();
+
// Even though CTI doesn't use them, we initialize our constant
// registers to zap stale pointers, to avoid unnecessarily prolonging
// object lifetime and increasing GC pressure.
size_t count = m_codeBlock->m_numVars;
- for (size_t j = CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters(); j < count; ++j)
+ for (size_t j = 0; j < count; ++j)
emitInitRegister(virtualRegisterForLocal(j).offset());
- emitWriteBarrier(m_codeBlock);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_enter);
+ slowPathCall.call();
+}
- emitEnterOptimizationCheck();
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+ callOperation(operationCreateActivation, 0);
+ emitStoreCell(dst, returnValueGPR);
+ activationCreated.link(this);
}
-void JIT::emit_op_get_scope(Instruction* currentInstruction)
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
- loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
- emitStoreCell(dst, regT0);
+
+ Jump argsCreated = branchTest64(NonZero, Address(callFrameRegister, sizeof(Register) * dst));
+
+ callOperation(operationCreateArguments);
+ emitStoreCell(dst, returnValueGPR);
+ emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)), returnValueGPR);
+
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ store64(TrustedImm64((int64_t)0), Address(callFrameRegister, sizeof(Register) * dst));
}
void JIT::emit_op_to_this(Instruction* currentInstruction)
@@ -691,66 +830,60 @@ void JIT::emit_op_to_this(Instruction* currentInstruction)
emitGetVirtualRegister(currentInstruction[1].u.operand, regT1);
emitJumpSlowCaseIfNotJSCell(regT1);
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT0);
- addSlowCase(branch8(NotEqual, Address(regT1, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
+ addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
loadPtr(cachedStructure, regT2);
- addSlowCase(branchTestPtr(Zero, regT2));
- load32(Address(regT2, Structure::structureIDOffset()), regT2);
- addSlowCase(branch32(NotEqual, Address(regT1, JSCell::structureIDOffset()), regT2));
+ addSlowCase(branchPtr(NotEqual, regT0, regT2));
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[2].u.jsCell;
+ emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
+
+ loadPtr(cachedFunction, regT2);
+ addSlowCase(branchPtr(NotEqual, regT0, regT2));
+
+ emitPutVirtualRegister(result);
+}
+
+void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee);
+ slowPathCall.call();
}
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
- WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
RegisterID calleeReg = regT0;
- RegisterID rareDataReg = regT4;
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID structureReg = regT2;
- RegisterID cachedFunctionReg = regT4;
RegisterID scratchReg = regT3;
emitGetVirtualRegister(callee, calleeReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
- addSlowCase(branchTestPtr(Zero, rareDataReg));
- loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
- loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
addSlowCase(branchTestPtr(Zero, allocatorReg));
- loadPtr(cachedFunction, cachedFunctionReg);
- Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
- addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
- hasSeenMultipleCallees.link(this);
-
emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
emitPutVirtualRegister(currentInstruction[1].u.operand);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter); // doesn't have rare data
linkSlowCase(iter); // doesn't have an allocation profile
linkSlowCase(iter); // allocation failed
- linkSlowCase(iter); // cached function didn't match
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
slowPathCall.call();
}
-void JIT::emit_op_check_tdz(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- addSlowCase(branchTest64(Zero, regT0));
-}
-
-void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error);
- slowPathCall.call();
-}
-
void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
{
Jump profilerDone = branchTestPtr(Zero, AbsoluteAddress(m_vm->enabledProfilerAddress()));
@@ -775,7 +908,6 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn
linkSlowCase(iter);
linkSlowCase(iter);
linkSlowCase(iter);
- linkSlowCase(iter);
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_this);
slowPathCall.call();
@@ -811,11 +943,25 @@ void JIT::emitSlow_op_jtrue(Instruction* currentInstruction, Vector<SlowCaseEntr
emitJumpSlowToHot(branchTest32(NonZero, returnValueGPR), currentInstruction[2].u.operand);
}
+void JIT::emitSlow_op_bitxor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitxor);
+ slowPathCall.call();
+}
+
+void JIT::emitSlow_op_bitor(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_bitor);
+ slowPathCall.call();
+}
+
void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
linkSlowCase(iter);
callOperation(operationCompareEq, regT0, regT1);
- emitTagBool(returnValueGPR);
+ emitTagAsBoolImmediate(returnValueGPR);
emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
@@ -824,7 +970,7 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
linkSlowCase(iter);
callOperation(operationCompareEq, regT0, regT1);
xor32(TrustedImm32(0x1), regT0);
- emitTagBool(returnValueGPR);
+ emitTagAsBoolImmediate(returnValueGPR);
emitPutVirtualRegister(currentInstruction[1].u.operand, returnValueGPR);
}
@@ -846,34 +992,33 @@ void JIT::emitSlow_op_nstricteq(Instruction* currentInstruction, Vector<SlowCase
slowPathCall.call();
}
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
- int proto = currentInstruction[3].u.operand;
+ int baseVal = currentInstruction[3].u.operand;
- linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCaseIfNotJSCell(iter, baseVal);
linkSlowCase(iter);
emitGetVirtualRegister(value, regT0);
- emitGetVirtualRegister(proto, regT1);
- callOperation(operationInstanceOf, dst, regT0, regT1);
+ emitGetVirtualRegister(baseVal, regT1);
+ callOperation(operationCheckHasInstance, dst, regT0, regT1);
+
+ emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
}
-void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
- int constructor = currentInstruction[3].u.operand;
- int hasInstanceValue = currentInstruction[4].u.operand;
+ int proto = currentInstruction[3].u.operand;
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
emitGetVirtualRegister(value, regT0);
- emitGetVirtualRegister(constructor, regT1);
- emitGetVirtualRegister(hasInstanceValue, regT2);
- callOperation(operationInstanceOfCustom, regT0, regT1, regT2);
- emitTagBool(returnValueGPR);
- emitPutVirtualRegister(dst, returnValueGPR);
+ emitGetVirtualRegister(proto, regT1);
+ callOperation(operationInstanceOf, dst, regT0, regT1);
}
void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
@@ -884,63 +1029,125 @@ void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCase
slowPathCall.call();
}
-void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
{
- linkSlowCase(iter); // Not JSCell.
- linkSlowCase(iter); // Not JSString.
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT0);
+ sub32(TrustedImm32(1), regT0);
+ emitFastArithReTagImmediate(regT0, regT0);
+ emitPutVirtualRegister(dst, regT0);
+}
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string);
- slowPathCall.call();
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ callOperation(operationGetArgumentsLength, dst, base);
+}
+
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ addSlowCase(branchTest64(NonZero, addressFor(argumentsRegister)));
+ emitGetVirtualRegister(property, regT1);
+ addSlowCase(emitJumpIfNotImmediateInteger(regT1));
+ add32(TrustedImm32(1), regT1);
+ // regT1 now contains the integer index of the argument we want, including this
+ emitGetFromCallFrameHeader32(JSStack::ArgumentCount, regT2);
+ addSlowCase(branch32(AboveOrEqual, regT1, regT2));
+
+ signExtend32ToPtr(regT1, regT1);
+ load64(BaseIndex(callFrameRegister, regT1, TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
+ emitValueProfilingSite();
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int arguments = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
+
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+ callOperation(operationCreateArguments);
+ emitStoreCell(arguments, returnValueGPR);
+ emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)), returnValueGPR);
+
+ skipArgumentsCreation.link(this);
+ emitGetVirtualRegister(arguments, regT0);
+ emitGetVirtualRegister(property, regT1);
+ callOperation(WithProfile, operationGetByValGeneric, dst, regT0, regT1);
}
#endif // USE(JSVALUE64)
+void JIT::emit_op_touch_entry(Instruction* currentInstruction)
+{
+ if (m_codeBlock->symbolTable()->m_functionEnteredOnce.hasBeenInvalidated())
+ return;
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_touch_entry);
+ slowPathCall.call();
+}
+
void JIT::emit_op_loop_hint(Instruction*)
{
// Emit the JIT optimization check:
if (canBeOptimized()) {
- addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
- AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
+ if (Options::enableOSREntryInLoops()) {
+ addSlowCase(branchAdd32(PositiveOrZero, TrustedImm32(Options::executionCounterIncrementForLoop()),
+ AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())));
+ } else {
+ // Add with saturation.
+ move(TrustedImmPtr(m_codeBlock->addressOfJITExecuteCounter()), regT3);
+ load32(regT3, regT2);
+ Jump dontAdd = branch32(
+ GreaterThan, regT2,
+ TrustedImm32(std::numeric_limits<int32_t>::max() - Options::executionCounterIncrementForLoop()));
+ add32(TrustedImm32(Options::executionCounterIncrementForLoop()), regT2);
+ store32(regT2, regT3);
+ dontAdd.link(this);
+ }
}
+
+ // Emit the watchdog timer check:
+ if (m_vm->watchdog.isEnabled())
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog.timerDidFireAddress())));
}
void JIT::emitSlow_op_loop_hint(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
{
#if ENABLE(DFG_JIT)
// Emit the slow path for the JIT optimization check:
- if (canBeOptimized()) {
+ if (canBeOptimized() && Options::enableOSREntryInLoops()) {
linkSlowCase(iter);
-
- copyCalleeSavesFromFrameOrRegisterToVMCalleeSavesBuffer();
-
+
callOperation(operationOptimize, m_bytecodeOffset);
Jump noOptimizedEntry = branchTestPtr(Zero, returnValueGPR);
- if (!ASSERT_DISABLED) {
- Jump ok = branchPtr(MacroAssembler::Above, returnValueGPR, TrustedImmPtr(bitwise_cast<void*>(static_cast<intptr_t>(1000))));
- abortWithReason(JITUnreasonableLoopHintJumpTarget);
- ok.link(this);
- }
jump(returnValueGPR);
noOptimizedEntry.link(this);
emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
}
-#else
- UNUSED_PARAM(iter);
#endif
-}
-void JIT::emit_op_watchdog(Instruction*)
-{
- ASSERT(m_vm->watchdog());
- addSlowCase(branchTest8(NonZero, AbsoluteAddress(m_vm->watchdog()->timerDidFireAddress())));
-}
+ // Emit the slow path of the watchdog timer check:
+ if (m_vm->watchdog.isEnabled()) {
+ linkSlowCase(iter);
+ callOperation(operationHandleWatchdogTimer);
+
+ emitJumpSlowToHot(jump(), OPCODE_LENGTH(op_loop_hint));
+ }
-void JIT::emitSlow_op_watchdog(Instruction*, Vector<SlowCaseEntry>::iterator& iter)
-{
- ASSERT(m_vm->watchdog());
- linkSlowCase(iter);
- callOperation(operationHandleWatchdogTimer);
}
void JIT::emit_op_new_regexp(Instruction* currentInstruction)
@@ -948,81 +1155,38 @@ void JIT::emit_op_new_regexp(Instruction* currentInstruction)
callOperation(operationNewRegexp, currentInstruction[1].u.operand, m_codeBlock->regexp(currentInstruction[2].u.operand));
}
-void JIT::emitNewFuncCommon(Instruction* currentInstruction)
+void JIT::emit_op_new_func(Instruction* currentInstruction)
{
Jump lazyJump;
int dst = currentInstruction[1].u.operand;
-
-#if USE(JSVALUE64)
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ if (currentInstruction[3].u.operand) {
+#if USE(JSVALUE32_64)
+ lazyJump = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
#else
- emitLoadPayload(currentInstruction[2].u.operand, regT0);
+ lazyJump = branchTest64(NonZero, addressFor(dst));
#endif
- FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[3].u.operand);
-
- OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
- if (opcodeID == op_new_func)
- callOperation(operationNewFunction, dst, regT0, funcExec);
- else {
- ASSERT(opcodeID == op_new_generator_func);
- callOperation(operationNewGeneratorFunction, dst, regT0, funcExec);
}
-}
-void JIT::emit_op_new_func(Instruction* currentInstruction)
-{
- emitNewFuncCommon(currentInstruction);
-}
+ FunctionExecutable* funcExec = m_codeBlock->functionDecl(currentInstruction[2].u.operand);
+ callOperation(operationNewFunction, dst, funcExec);
-void JIT::emit_op_new_generator_func(Instruction* currentInstruction)
-{
- emitNewFuncCommon(currentInstruction);
+ if (currentInstruction[3].u.operand)
+ lazyJump.link(this);
}
-void JIT::emitNewFuncExprCommon(Instruction* currentInstruction)
+void JIT::emit_op_new_captured_func(Instruction* currentInstruction)
{
- Jump notUndefinedScope;
- int dst = currentInstruction[1].u.operand;
-#if USE(JSVALUE64)
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
- notUndefinedScope = branch64(NotEqual, regT0, TrustedImm64(JSValue::encode(jsUndefined())));
- store64(TrustedImm64(JSValue::encode(jsUndefined())), Address(callFrameRegister, sizeof(Register) * dst));
-#else
- emitLoadPayload(currentInstruction[2].u.operand, regT0);
- notUndefinedScope = branch32(NotEqual, tagFor(currentInstruction[2].u.operand), TrustedImm32(JSValue::UndefinedTag));
- emitStore(dst, jsUndefined());
-#endif
- Jump done = jump();
- notUndefinedScope.link(this);
-
- FunctionExecutable* function = m_codeBlock->functionExpr(currentInstruction[3].u.operand);
- OpcodeID opcodeID = m_vm->interpreter->getOpcodeID(currentInstruction->u.opcode);
-
- if (opcodeID == op_new_func_exp || opcodeID == op_new_arrow_func_exp)
- callOperation(operationNewFunction, dst, regT0, function);
- else {
- ASSERT(opcodeID == op_new_generator_func_exp);
- callOperation(operationNewGeneratorFunction, dst, regT0, function);
- }
-
- done.link(this);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_new_captured_func);
+ slowPathCall.call();
}
void JIT::emit_op_new_func_exp(Instruction* currentInstruction)
{
- emitNewFuncExprCommon(currentInstruction);
-}
-
-void JIT::emit_op_new_generator_func_exp(Instruction* currentInstruction)
-{
- emitNewFuncExprCommon(currentInstruction);
+ int dst = currentInstruction[1].u.operand;
+ FunctionExecutable* funcExpr = m_codeBlock->functionExpr(currentInstruction[2].u.operand);
+ callOperation(operationNewFunction, dst, funcExpr);
}
-void JIT::emit_op_new_arrow_func_exp(Instruction* currentInstruction)
-{
- emitNewFuncExprCommon(currentInstruction);
-}
-
void JIT::emit_op_new_array(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
@@ -1057,387 +1221,13 @@ void JIT::emit_op_new_array_buffer(Instruction* currentInstruction)
callOperation(operationNewArrayBufferWithProfile, dst, currentInstruction[4].u.arrayAllocationProfile, values, size);
}
-#if USE(JSVALUE64)
-void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int enumerator = currentInstruction[4].u.operand;
-
- emitGetVirtualRegister(base, regT0);
- emitGetVirtualRegister(enumerator, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
-
- load32(Address(regT0, JSCell::structureIDOffset()), regT0);
- addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
-
- move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
- emitPutVirtualRegister(dst);
-}
-
-void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
-{
- Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
-
- PatchableJump badType;
-
- // FIXME: Add support for other types like TypedArrays and Arguments.
- // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
- JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
- move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
- Jump done = jump();
-
- LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
-
- patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
- patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
-
- patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
-
- byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
- ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
-
- MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
-}
-
-void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_codeBlock->addByValInfo();
-
- emitGetVirtualRegisters(base, regT0, property, regT1);
-
- // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
- // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
- // number was signed since m_vectorLength is always less than intmax (since the total allocation
- // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
- // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
- // extending since it makes it easier to re-tag the value in the slow case.
- zeroExtend32ToPtr(regT1, regT1);
-
- emitJumpSlowCaseIfNotJSCell(regT0, base);
- emitArrayProfilingSiteWithCell(regT0, regT2, profile);
- and32(TrustedImm32(IndexingShapeMask), regT2);
-
- JITArrayMode mode = chooseArrayMode(profile);
- PatchableJump badType;
-
- // FIXME: Add support for other types like TypedArrays and Arguments.
- // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
- JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
-
- move(TrustedImm64(JSValue::encode(jsBoolean(true))), regT0);
-
- addSlowCase(badType);
- addSlowCase(slowCases);
-
- Label done = label();
-
- emitPutVirtualRegister(dst);
-
- Label nextHotPath = label();
-
- m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
-}
-
-void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
-
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base array check
- linkSlowCase(iter); // read barrier
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- Label slowPath = label();
-
- emitGetVirtualRegister(base, regT0);
- emitGetVirtualRegister(property, regT1);
- Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT0, regT1, byValInfo);
-
- m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
- m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
- m_byValInstructionIndex++;
-}
-
-void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int index = currentInstruction[4].u.operand;
- int enumerator = currentInstruction[5].u.operand;
-
- // Check that base is a cell
- emitGetVirtualRegister(base, regT0);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
-
- // Check the structure
- emitGetVirtualRegister(enumerator, regT2);
- load32(Address(regT0, JSCell::structureIDOffset()), regT1);
- addSlowCase(branch32(NotEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedStructureIDOffset())));
-
- // Compute the offset
- emitGetVirtualRegister(index, regT1);
- // If index is less than the enumerator's cached inline storage, then it's an inline access
- Jump outOfLineAccess = branch32(AboveOrEqual, regT1, Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
- addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
- signExtend32ToPtr(regT1, regT1);
- load64(BaseIndex(regT0, regT1, TimesEight), regT0);
-
- Jump done = jump();
-
- // Otherwise it's out of line
- outOfLineAccess.link(this);
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- addSlowCase(branchIfNotToSpace(regT0));
- sub32(Address(regT2, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT1);
- neg32(regT1);
- signExtend32ToPtr(regT1, regT1);
- int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
- load64(BaseIndex(regT0, regT1, TimesEight, offsetOfFirstProperty), regT0);
-
- done.link(this);
- emitValueProfilingSite();
- emitPutVirtualRegister(dst, regT0);
-}
-
-void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int base = currentInstruction[2].u.operand;
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname);
- slowPathCall.call();
-}
-
-void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int enumerator = currentInstruction[2].u.operand;
- int index = currentInstruction[3].u.operand;
-
- emitGetVirtualRegister(index, regT0);
- emitGetVirtualRegister(enumerator, regT1);
- Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
-
- move(TrustedImm64(JSValue::encode(jsNull())), regT0);
-
- Jump done = jump();
- inBounds.link(this);
-
- loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
- signExtend32ToPtr(regT0, regT0);
- load64(BaseIndex(regT1, regT0, TimesEight), regT0);
-
- done.link(this);
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int enumerator = currentInstruction[2].u.operand;
- int index = currentInstruction[3].u.operand;
-
- emitGetVirtualRegister(index, regT0);
- emitGetVirtualRegister(enumerator, regT1);
- Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
-
- move(TrustedImm64(JSValue::encode(jsNull())), regT0);
-
- Jump done = jump();
- inBounds.link(this);
-
- loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
- signExtend32ToPtr(regT0, regT0);
- load64(BaseIndex(regT1, regT0, TimesEight), regT0);
-
- done.link(this);
- emitPutVirtualRegister(dst);
-}
-
-void JIT::emit_op_profile_type(Instruction* currentInstruction)
-{
- TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
- int valueToProfile = currentInstruction[1].u.operand;
-
- emitGetVirtualRegister(valueToProfile, regT0);
-
- JumpList jumpToEnd;
-
- jumpToEnd.append(branchTest64(Zero, regT0));
-
- // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
- // These typechecks are inlined to match those of the 64-bit JSValue type checks.
- if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
- jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsUndefined()))));
- else if (cachedTypeLocation->m_lastSeenType == TypeNull)
- jumpToEnd.append(branch64(Equal, regT0, TrustedImm64(JSValue::encode(jsNull()))));
- else if (cachedTypeLocation->m_lastSeenType == TypeBoolean) {
- move(regT0, regT1);
- and64(TrustedImm32(~1), regT1);
- jumpToEnd.append(branch64(Equal, regT1, TrustedImm64(ValueFalse)));
- } else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt)
- jumpToEnd.append(emitJumpIfInt(regT0));
- else if (cachedTypeLocation->m_lastSeenType == TypeNumber)
- jumpToEnd.append(emitJumpIfNumber(regT0));
- else if (cachedTypeLocation->m_lastSeenType == TypeString) {
- Jump isNotCell = emitJumpIfNotJSCell(regT0);
- jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
- isNotCell.link(this);
- }
-
- // Load the type profiling log into T2.
- TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
- move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
- // Load the next log entry into T1.
- loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
-
- // Store the JSValue onto the log entry.
- store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset()));
-
- // Store the structureID of the cell if T0 is a cell, otherwise, store 0 on the log entry.
- Jump notCell = emitJumpIfNotJSCell(regT0);
- load32(Address(regT0, JSCell::structureIDOffset()), regT0);
- store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
- Jump skipIsCell = jump();
- notCell.link(this);
- store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
- skipIsCell.link(this);
-
- // Store the typeLocation on the log entry.
- move(TrustedImmPtr(cachedTypeLocation), regT0);
- store64(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
-
- // Increment the current log entry.
- addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
- store64(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
- Jump skipClearLog = branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr()));
- // Clear the log if we're at the end of the log.
- callOperation(operationProcessTypeProfilerLog);
- skipClearLog.link(this);
-
- jumpToEnd.link(this);
-}
-
-#endif // USE(JSVALUE64)
-
-void JIT::emit_op_get_enumerable_length(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_enumerable_length);
- slowPathCall.call();
-}
-
-void JIT::emitSlow_op_has_structure_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_captured_mov(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ VariableWatchpointSet* set = currentInstruction[3].u.watchpointSet;
+ if (!set || set->state() == IsInvalidated)
+ return;
linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_structure_property);
- slowPathCall.call();
-}
-
-void JIT::emit_op_has_generic_property(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_has_generic_property);
- slowPathCall.call();
-}
-
-void JIT::emit_op_get_property_enumerator(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_property_enumerator);
- slowPathCall.call();
-}
-
-void JIT::emit_op_to_index_string(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_index_string);
- slowPathCall.call();
-}
-
-void JIT::emit_op_profile_control_flow(Instruction* currentInstruction)
-{
- BasicBlockLocation* basicBlockLocation = currentInstruction[1].u.basicBlockLocation;
-#if USE(JSVALUE64)
- basicBlockLocation->emitExecuteCode(*this);
-#else
- basicBlockLocation->emitExecuteCode(*this, regT0);
-#endif
-}
-
-void JIT::emit_op_create_direct_arguments(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_direct_arguments);
- slowPathCall.call();
-}
-
-void JIT::emit_op_create_scoped_arguments(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_scoped_arguments);
- slowPathCall.call();
-}
-
-void JIT::emit_op_create_out_of_band_arguments(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_out_of_band_arguments);
- slowPathCall.call();
-}
-
-void JIT::emit_op_copy_rest(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_copy_rest);
- slowPathCall.call();
-}
-
-void JIT::emit_op_get_rest_length(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- unsigned numParamsToSkip = currentInstruction[2].u.unsignedValue;
- load32(payloadFor(JSStack::ArgumentCount), regT0);
- sub32(TrustedImm32(1), regT0);
- Jump zeroLength = branch32(LessThanOrEqual, regT0, Imm32(numParamsToSkip));
- sub32(Imm32(numParamsToSkip), regT0);
-#if USE(JSVALUE64)
- boxInt32(regT0, JSValueRegs(regT0));
-#endif
- Jump done = jump();
-
- zeroLength.link(this);
-#if USE(JSVALUE64)
- move(TrustedImm64(JSValue::encode(jsNumber(0))), regT0);
-#else
- move(TrustedImm32(0), regT0);
-#endif
-
- done.link(this);
-#if USE(JSVALUE64)
- emitPutVirtualRegister(dst, regT0);
-#else
- move(TrustedImm32(JSValue::Int32Tag), regT1);
- emitPutVirtualRegister(dst, JSValueRegs(regT1, regT0));
-#endif
-}
-
-void JIT::emit_op_save(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_save);
- slowPathCall.call();
-}
-
-void JIT::emit_op_resume(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resume);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_captured_mov);
slowPathCall.call();
}
diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
index c326ff3e0..29e8880aa 100644
--- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2009, 2012-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2009, 2012, 2013 Apple Inc. All rights reserved.
* Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com>
*
* Redistribution and use in source and binary forms, with or without
@@ -32,17 +32,14 @@
#include "CCallHelpers.h"
#include "Debugger.h"
-#include "Exception.h"
#include "JITInlines.h"
#include "JSArray.h"
#include "JSCell.h"
-#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
-#include "JSPropertyNameEnumerator.h"
+#include "JSPropertyNameIterator.h"
+#include "JSVariableObject.h"
#include "LinkBuffer.h"
-#include "MaxFrameExtentForSlowPathCall.h"
#include "SlowPathCall.h"
-#include "TypeProfilerLog.h"
#include "VirtualRegister.h"
namespace JSC {
@@ -51,24 +48,41 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
{
Call nativeCall;
- emitFunctionPrologue();
- emitPutToCallFrameHeader(0, JSStack::CodeBlock);
+ emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
storePtr(callFrameRegister, &m_vm->topCallFrame);
#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ emitGetCallerFrameFromCallFrameHeaderPtr(regT0);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT0);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
+
+ peek(regT1);
+ emitPutReturnPCToCallFrameHeader(regT1);
+
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
move(callFrameRegister, X86Registers::ecx);
- subPtr(TrustedImm32(8), stackPointerRegister); // Align stack for call.
- storePtr(X86Registers::ecx, Address(stackPointerRegister));
+ subPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister); // Align stack after call.
+
+ move(regT0, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
// call the function
nativeCall = call();
- addPtr(TrustedImm32(8), stackPointerRegister);
+ addPtr(TrustedImm32(16 - sizeof(void*)), stackPointerRegister);
#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
+ emitGetCallerFrameFromCallFrameHeaderPtr(regT2);
+ emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, regT1, regT2);
+ emitPutCellToCallFrameHeader(regT1, JSStack::ScopeChain);
+
+ preserveReturnAddressAfterCall(regT3); // Callee preserved
+ emitPutReturnPCToCallFrameHeader(regT3);
+
#if CPU(MIPS)
// Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
subPtr(TrustedImm32(16), stackPointerRegister);
@@ -79,6 +93,7 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
move(callFrameRegister, argumentGPR0);
emitGetFromCallFrameHeaderPtr(JSStack::Callee, argumentGPR1);
+ move(regT2, callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
loadPtr(Address(argumentGPR1, OBJECT_OFFSETOF(JSFunction, m_executable)), regT2);
// call the function
@@ -92,25 +107,28 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
restoreReturnAddressBeforeReturn(regT3);
#else
#error "JIT not supported on this platform."
- abortWithReason(JITNotSupported);
+ breakpoint();
#endif // CPU(X86)
// Check for an exception
- Jump sawException = branch32(NotEqual, AbsoluteAddress(vm->addressOfException()), TrustedImm32(0));
+ Jump sawException = branch32(NotEqual, AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::EmptyValueTag));
- emitFunctionEpilogue();
// Return.
ret();
// Handle an exception
sawException.link(this);
+ // Grab the return address.
+ preserveReturnAddressAfterCall(regT1);
+
+ move(TrustedImmPtr(&vm->exceptionLocation), regT2);
+ storePtr(regT1, regT2);
storePtr(callFrameRegister, &m_vm->topCallFrame);
#if CPU(X86)
- addPtr(TrustedImm32(-4), stackPointerRegister);
- move(callFrameRegister, X86Registers::ecx);
- push(X86Registers::ecx);
+ addPtr(TrustedImm32(-12), stackPointerRegister);
+ push(callFrameRegister);
#else
move(callFrameRegister, argumentGPR0);
#endif
@@ -118,13 +136,13 @@ JIT::CodeRef JIT::privateCompileCTINativeCall(VM* vm, NativeFunction func)
call(regT3);
#if CPU(X86)
- addPtr(TrustedImm32(8), stackPointerRegister);
+ addPtr(TrustedImm32(16), stackPointerRegister);
#endif
jumpToExceptionHandler();
// All trampolines constructed! copy the code, link up calls, and set the pointers on the Machine object.
- LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
patchBuffer.link(nativeCall, FunctionPtr(func));
return FINALIZE_CODE(patchBuffer, ("JIT CTI native call"));
@@ -143,12 +161,21 @@ void JIT::emit_op_mov(Instruction* currentInstruction)
}
}
+void JIT::emit_op_captured_mov(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int src = currentInstruction[2].u.operand;
+
+ emitLoad(src, regT1, regT0);
+ emitNotifyWrite(regT1, regT0, regT2, currentInstruction[3].u.watchpointSet);
+ emitStore(dst, regT1, regT0);
+}
+
void JIT::emit_op_end(Instruction* currentInstruction)
{
ASSERT(returnValueGPR != callFrameRegister);
- emitLoad(currentInstruction[1].u.operand, regT1, returnValueGPR);
- emitRestoreCalleeSaves();
- emitFunctionEpilogue();
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ restoreReturnAddressBeforeReturn(Address(callFrameRegister, CallFrame::returnPCOffset()));
ret();
}
@@ -164,9 +191,9 @@ void JIT::emit_op_new_object(Instruction* currentInstruction)
size_t allocationSize = JSFinalObject::allocationSize(structure->inlineCapacity());
MarkedAllocator* allocator = &m_vm->heap.allocatorForObjectWithoutDestructor(allocationSize);
- RegisterID resultReg = returnValueGPR;
+ RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
- RegisterID scratchReg = regT3;
+ RegisterID scratchReg = regT2;
move(TrustedImmPtr(allocator), allocatorReg);
emitAllocateJSObject(allocatorReg, TrustedImmPtr(structure), resultReg, scratchReg);
@@ -182,31 +209,18 @@ void JIT::emitSlow_op_new_object(Instruction* currentInstruction, Vector<SlowCas
emitStoreCell(dst, returnValueGPR);
}
-void JIT::emit_op_overrides_has_instance(Instruction* currentInstruction)
+void JIT::emit_op_check_has_instance(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
- int constructor = currentInstruction[2].u.operand;
- int hasInstanceValue = currentInstruction[3].u.operand;
-
- emitLoadPayload(hasInstanceValue, regT0);
- // We don't jump if we know what Symbol.hasInstance would do.
- Jump hasInstanceValueNotCell = emitJumpIfNotJSCell(hasInstanceValue);
- Jump customhasInstanceValue = branchPtr(NotEqual, regT0, TrustedImmPtr(m_codeBlock->globalObject()->functionProtoHasInstanceSymbolFunction()));
-
- // We know that constructor is an object from the way bytecode is emitted for instanceof expressions.
- emitLoadPayload(constructor, regT0);
-
- // Check that constructor 'ImplementsDefaultHasInstance' i.e. the object is not a C-API user nor a bound function.
- test8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance), regT0);
- Jump done = jump();
+ int baseVal = currentInstruction[3].u.operand;
- hasInstanceValueNotCell.link(this);
- customhasInstanceValue.link(this);
- move(TrustedImm32(1), regT0);
-
- done.link(this);
- emitStoreBool(dst, regT0);
+ emitLoadPayload(baseVal, regT0);
+ // Check that baseVal is a cell.
+ emitJumpSlowCaseIfNotJSCell(baseVal);
+
+ // Check that baseVal 'ImplementsHasInstance'.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT0);
+ addSlowCase(branchTest8(Zero, Address(regT0, Structure::typeInfoFlagsOffset()), TrustedImm32(ImplementsDefaultHasInstance)));
}
void JIT::emit_op_instanceof(Instruction* currentInstruction)
@@ -220,12 +234,13 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitLoadPayload(value, regT2);
emitLoadPayload(proto, regT1);
- // Check that proto are cells. baseVal must be a cell - this is checked by the get_by_id for Symbol.hasInstance.
+ // Check that proto are cells. baseVal must be a cell - this is checked by op_check_has_instance.
emitJumpSlowCaseIfNotJSCell(value);
emitJumpSlowCaseIfNotJSCell(proto);
// Check that prototype is an object
- addSlowCase(emitJumpIfCellNotObject(regT1));
+ loadPtr(Address(regT1, JSCell::structureOffset()), regT3);
+ addSlowCase(emitJumpIfNotObject(regT3));
// Optimistically load the result true, and start looping.
// Initially, regT1 still contains proto and regT2 still contains value.
@@ -235,7 +250,7 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
// Load the prototype of the cell in regT2. If this is equal to regT1 - WIN!
// Otherwise, check if we've hit null - if we have then drop out of the loop, if not go again.
- loadPtr(Address(regT2, JSCell::structureIDOffset()), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
load32(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
Jump isInstance = branchPtr(Equal, regT2, regT1);
branchTest32(NonZero, regT2).linkTo(loop, this);
@@ -248,41 +263,35 @@ void JIT::emit_op_instanceof(Instruction* currentInstruction)
emitStoreBool(dst, regT0);
}
-void JIT::emit_op_instanceof_custom(Instruction*)
-{
- // This always goes to slow path since we expect it to be rare.
- addSlowCase(jump());
-}
-
-void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_check_has_instance(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
- int proto = currentInstruction[3].u.operand;
+ int baseVal = currentInstruction[3].u.operand;
- linkSlowCaseIfNotJSCell(iter, value);
- linkSlowCaseIfNotJSCell(iter, proto);
+ linkSlowCaseIfNotJSCell(iter, baseVal);
linkSlowCase(iter);
emitLoad(value, regT1, regT0);
- emitLoad(proto, regT3, regT2);
- callOperation(operationInstanceOf, dst, regT1, regT0, regT3, regT2);
+ emitLoad(baseVal, regT3, regT2);
+ callOperation(operationCheckHasInstance, dst, regT1, regT0, regT3, regT2);
+
+ emitJumpSlowToHot(jump(), currentInstruction[4].u.operand);
}
-void JIT::emitSlow_op_instanceof_custom(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_instanceof(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int value = currentInstruction[2].u.operand;
- int constructor = currentInstruction[3].u.operand;
- int hasInstanceValue = currentInstruction[4].u.operand;
+ int proto = currentInstruction[3].u.operand;
+ linkSlowCaseIfNotJSCell(iter, value);
+ linkSlowCaseIfNotJSCell(iter, proto);
linkSlowCase(iter);
emitLoad(value, regT1, regT0);
- emitLoadPayload(constructor, regT2);
- emitLoad(hasInstanceValue, regT4, regT3);
- callOperation(operationInstanceOfCustom, regT1, regT0, regT2, regT4, regT3);
- emitStoreBool(dst, returnValueGPR);
+ emitLoad(proto, regT3, regT2);
+ callOperation(operationInstanceOf, dst, regT1, regT0, regT3, regT2);
}
void JIT::emit_op_is_undefined(Instruction* currentInstruction)
@@ -297,12 +306,12 @@ void JIT::emit_op_is_undefined(Instruction* currentInstruction)
Jump done = jump();
isCell.link(this);
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT1, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT0);
Jump notMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
- loadPtr(Address(regT0, JSCell::structureIDOffset()), regT1);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT1, Structure::globalObjectOffset()), regT1);
compare32(Equal, regT0, regT1, regT0);
@@ -341,7 +350,8 @@ void JIT::emit_op_is_string(Instruction* currentInstruction)
emitLoad(value, regT1, regT0);
Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- compare8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ compare8(Equal, Address(regT1, Structure::typeInfoTypeOffset()), TrustedImm32(StringType), regT0);
Jump done = jump();
isNotCell.link(this);
@@ -351,22 +361,25 @@ void JIT::emit_op_is_string(Instruction* currentInstruction)
emitStoreBool(dst, regT0);
}
-void JIT::emit_op_is_object(Instruction* currentInstruction)
+void JIT::emit_op_tear_off_activation(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
- int value = currentInstruction[2].u.operand;
-
- emitLoad(value, regT1, regT0);
- Jump isNotCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
-
- compare8(AboveOrEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(ObjectType), regT0);
- Jump done = jump();
+ int activation = currentInstruction[1].u.operand;
+ Jump activationNotCreated = branch32(Equal, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
+ emitLoadPayload(activation, regT0);
+ callOperation(operationTearOffActivation, regT0);
+ activationNotCreated.link(this);
+}
- isNotCell.link(this);
- move(TrustedImm32(0), regT0);
+void JIT::emit_op_tear_off_arguments(Instruction* currentInstruction)
+{
+ VirtualRegister arguments = VirtualRegister(currentInstruction[1].u.operand);
+ int activation = currentInstruction[2].u.operand;
- done.link(this);
- emitStoreBool(dst, regT0);
+ Jump argsNotCreated = branch32(Equal, tagFor(unmodifiedArgumentsRegister(arguments).offset()), TrustedImm32(JSValue::EmptyValueTag));
+ emitLoadPayload(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), regT0);
+ emitLoadPayload(activation, regT1);
+ callOperation(operationTearOffArguments, regT0, regT1);
+ argsNotCreated.link(this);
}
void JIT::emit_op_to_primitive(Instruction* currentInstruction)
@@ -377,7 +390,7 @@ void JIT::emit_op_to_primitive(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImm = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- addSlowCase(emitJumpIfCellObject(regT0));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
isImm.link(this);
if (dst != src)
@@ -497,8 +510,9 @@ void JIT::emit_op_jeq_null(Instruction* currentInstruction)
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
- loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isNotMasqueradesAsUndefined = branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(Equal, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump masqueradesGlobalObjectIsForeign = jump();
@@ -522,8 +536,9 @@ void JIT::emit_op_jneq_null(Instruction* currentInstruction)
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- addJump(branchTest8(Zero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
- loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
+ // First, handle JSCell cases - check MasqueradesAsUndefined bit on the structure.
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addJump(branchTest8(Zero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined)), target);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
addJump(branchPtr(NotEqual, Address(regT2, Structure::globalObjectOffset()), regT0), target);
Jump wasNotImmediate = jump();
@@ -577,8 +592,8 @@ void JIT::emitSlow_op_eq(Instruction* currentInstruction, Vector<SlowCaseEntry>:
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
callOperation(operationCompareStringEq, regT0, regT2);
@@ -621,8 +636,8 @@ void JIT::emitSlow_op_neq(Instruction* currentInstruction, Vector<SlowCaseEntry>
genericCase.append(getSlowCase(iter)); // tags not equal
linkSlowCase(iter); // tags equal and JSCell
- genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
- genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureIDOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
+ genericCase.append(branchPtr(NotEqual, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
// String case.
callOperation(operationCompareStringEq, regT0, regT2);
@@ -650,12 +665,12 @@ void JIT::compileOpStrictEq(Instruction* currentInstruction, CompileOpStrictEqTy
addSlowCase(branch32(NotEqual, regT1, regT3));
addSlowCase(branch32(Below, regT1, TrustedImm32(JSValue::LowestTag)));
- // Jump to a slow case if both are strings or symbols (non object).
+ // Jump to a slow case if both are strings.
Jump notCell = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump firstIsObject = emitJumpIfCellObject(regT0);
- addSlowCase(emitJumpIfCellNotObject(regT2));
+ Jump firstNotString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
+ addSlowCase(branchPtr(Equal, Address(regT2, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get())));
notCell.link(this);
- firstIsObject.link(this);
+ firstNotString.link(this);
// Simply compare the payloads.
if (type == OpStrictEq)
@@ -704,12 +719,12 @@ void JIT::emit_op_eq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(0), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
- loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
compare32(Equal, regT0, regT2, regT1);
@@ -735,12 +750,12 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
emitLoad(src, regT1, regT0);
Jump isImmediate = branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag));
- Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT0, JSCell::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ Jump isMasqueradesAsUndefined = branchTest8(NonZero, Address(regT2, Structure::typeInfoFlagsOffset()), TrustedImm32(MasqueradesAsUndefined));
move(TrustedImm32(1), regT1);
Jump wasNotMasqueradesAsUndefined = jump();
isMasqueradesAsUndefined.link(this);
- loadPtr(Address(regT0, JSCell::structureIDOffset()), regT2);
move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
loadPtr(Address(regT2, Structure::globalObjectOffset()), regT2);
compare32(NotEqual, regT0, regT2, regT1);
@@ -761,116 +776,165 @@ void JIT::emit_op_neq_null(Instruction* currentInstruction)
void JIT::emit_op_throw(Instruction* currentInstruction)
{
ASSERT(regT0 == returnValueGPR);
- copyCalleeSavesToVMCalleeSavesBuffer();
emitLoad(currentInstruction[1].u.operand, regT1, regT0);
callOperationNoExceptionCheck(operationThrow, regT1, regT0);
jumpToExceptionHandler();
}
-void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
+void JIT::emit_op_get_pnames(Instruction* currentInstruction)
{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_push_with_scope);
- slowPathCall.call();
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int breakTarget = currentInstruction[5].u.operand;
+
+ JumpList isNotObject;
+
+ emitLoad(base, regT1, regT0);
+ if (!m_codeBlock->isKnownNotImmediate(base))
+ isNotObject.append(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
+ if (VirtualRegister(base) != m_codeBlock->thisRegister() || m_codeBlock->isStrictMode()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ isNotObject.append(emitJumpIfNotObject(regT2));
+ }
+
+ // We could inline the case where you have a valid cache, but
+ // this call doesn't seem to be hot.
+ Label isObject(this);
+ callOperation(operationGetPNames, regT0);
+ emitStoreCell(dst, returnValueGPR);
+ load32(Address(regT0, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStringsSize)), regT3);
+ store32(TrustedImm32(Int32Tag), intTagFor(i));
+ store32(TrustedImm32(0), intPayloadFor(i));
+ store32(TrustedImm32(Int32Tag), intTagFor(size));
+ store32(regT3, payloadFor(size));
+ Jump end = jump();
+
+ isNotObject.link(this);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::NullTag)), breakTarget);
+ addJump(branch32(Equal, regT1, TrustedImm32(JSValue::UndefinedTag)), breakTarget);
+ callOperation(operationToObject, base, regT1, regT0);
+ jump().linkTo(isObject, this);
+
+ end.link(this);
}
-void JIT::emit_op_to_number(Instruction* currentInstruction)
+void JIT::emit_op_next_pname(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int src = currentInstruction[2].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int i = currentInstruction[3].u.operand;
+ int size = currentInstruction[4].u.operand;
+ int it = currentInstruction[5].u.operand;
+ int target = currentInstruction[6].u.operand;
- emitLoad(src, regT1, regT0);
+ JumpList callHasProperty;
- Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
- isInt32.link(this);
+ Label begin(this);
+ load32(intPayloadFor(i), regT0);
+ Jump end = branch32(Equal, regT0, intPayloadFor(size));
- if (src != dst)
- emitStore(dst, regT1, regT0);
+ // Grab key @ i
+ loadPtr(payloadFor(it), regT1);
+ loadPtr(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_jsStrings)), regT2);
+ load32(BaseIndex(regT2, regT0, TimesEight), regT2);
+ store32(TrustedImm32(JSValue::CellTag), tagFor(dst));
+ store32(regT2, payloadFor(dst));
+
+ // Increment i
+ add32(TrustedImm32(1), regT0);
+ store32(regT0, intPayloadFor(i));
+
+ // Verify that i is valid:
+ loadPtr(payloadFor(base), regT0);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure)))));
+
+ // Test base's prototype chain
+ loadPtr(Address(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedPrototypeChain))), regT3);
+ loadPtr(Address(regT3, OBJECT_OFFSETOF(StructureChain, m_vector)), regT3);
+ addJump(branchTestPtr(Zero, Address(regT3)), target);
+
+ Label checkPrototype(this);
+ callHasProperty.append(branch32(Equal, Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), TrustedImm32(JSValue::NullTag)));
+ loadPtr(Address(regT2, Structure::prototypeOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT2);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT2);
+ callHasProperty.append(branchPtr(NotEqual, regT2, Address(regT3)));
+ addPtr(TrustedImm32(sizeof(Structure*)), regT3);
+ branchTestPtr(NonZero, Address(regT3)).linkTo(checkPrototype, this);
+
+ // Continue loop.
+ addJump(jump(), target);
+
+ // Slow case: Ask the object if i is valid.
+ callHasProperty.link(this);
+ loadPtr(addressFor(dst), regT1);
+ callOperation(operationHasProperty, regT0, regT1);
+
+ // Test for valid key.
+ addJump(branchTest32(NonZero, regT0), target);
+ jump().linkTo(begin, this);
+
+ // End of loop.
+ end.link(this);
}
-void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_push_with_scope(Instruction* currentInstruction)
{
- linkSlowCase(iter);
+ emitLoad(currentInstruction[1].u.operand, regT1, regT0);
+ callOperation(operationPushWithScope, regT1, regT0);
+}
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number);
- slowPathCall.call();
+void JIT::emit_op_pop_scope(Instruction*)
+{
+ callOperation(operationPopScope);
}
-void JIT::emit_op_to_string(Instruction* currentInstruction)
+void JIT::emit_op_to_number(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int src = currentInstruction[2].u.operand;
emitLoad(src, regT1, regT0);
- addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::CellTag)));
- addSlowCase(branch8(NotEqual, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
+ Jump isInt32 = branch32(Equal, regT1, TrustedImm32(JSValue::Int32Tag));
+ addSlowCase(branch32(AboveOrEqual, regT1, TrustedImm32(JSValue::LowestTag)));
+ isInt32.link(this);
if (src != dst)
emitStore(dst, regT1, regT0);
}
-void JIT::emitSlow_op_to_string(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emitSlow_op_to_number(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter); // Not JSCell.
- linkSlowCase(iter); // Not JSString.
+ linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_string);
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_to_number);
slowPathCall.call();
}
-void JIT::emit_op_catch(Instruction* currentInstruction)
+void JIT::emit_op_push_name_scope(Instruction* currentInstruction)
{
- restoreCalleeSavesFromVMCalleeSavesBuffer();
+ emitLoad(currentInstruction[2].u.operand, regT1, regT0);
+ callOperation(operationPushNameScope, &m_codeBlock->identifier(currentInstruction[1].u.operand), regT1, regT0, currentInstruction[3].u.operand);
+}
+void JIT::emit_op_catch(Instruction* currentInstruction)
+{
move(TrustedImmPtr(m_vm), regT3);
// operationThrow returns the callFrame for the handler.
- load32(Address(regT3, VM::callFrameForCatchOffset()), callFrameRegister);
- storePtr(TrustedImmPtr(nullptr), Address(regT3, VM::callFrameForCatchOffset()));
-
- addPtr(TrustedImm32(stackPointerOffsetFor(codeBlock()) * sizeof(Register)), callFrameRegister, stackPointerRegister);
-
- callOperationNoExceptionCheck(operationCheckIfExceptionIsUncatchableAndNotifyProfiler);
- Jump isCatchableException = branchTest32(Zero, returnValueGPR);
- jumpToExceptionHandler();
- isCatchableException.link(this);
-
- move(TrustedImmPtr(m_vm), regT3);
-
+ load32(Address(regT3, VM::callFrameForThrowOffset()), callFrameRegister);
// Now store the exception returned by operationThrow.
- load32(Address(regT3, VM::exceptionOffset()), regT2);
- move(TrustedImm32(JSValue::CellTag), regT1);
-
- store32(TrustedImm32(0), Address(regT3, VM::exceptionOffset()));
+ load32(Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ load32(Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
+ store32(TrustedImm32(JSValue().payload()), Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
+ store32(TrustedImm32(JSValue().tag()), Address(regT3, VM::exceptionOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
unsigned exception = currentInstruction[1].u.operand;
- emitStore(exception, regT1, regT2);
-
- load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
- load32(Address(regT2, Exception::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
-
- unsigned thrownValue = currentInstruction[2].u.operand;
- emitStore(thrownValue, regT1, regT0);
-}
-
-void JIT::emit_op_assert(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_assert);
- slowPathCall.call();
-}
-
-void JIT::emit_op_create_lexical_environment(Instruction* currentInstruction)
-{
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_lexical_environment);
- slowPathCall.call();
-}
-
-void JIT::emit_op_get_parent_scope(Instruction* currentInstruction)
-{
- int currentScope = currentInstruction[2].u.operand;
- emitLoadPayload(currentScope, regT0);
- loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
- emitStoreCell(currentInstruction[1].u.operand, regT0);
+ emitStore(exception, regT1, regT0);
}
void JIT::emit_op_switch_imm(Instruction* currentInstruction)
@@ -882,7 +946,7 @@ void JIT::emit_op_switch_imm(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Immediate));
- jumpTable->ensureCTITable();
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
emitLoad(scrutinee, regT1, regT0);
callOperation(operationSwitchImmWithUnknownKeyType, regT1, regT0, tableIndex);
@@ -898,7 +962,7 @@ void JIT::emit_op_switch_char(Instruction* currentInstruction)
// create jump table for switch destinations, track this switch statement.
SimpleJumpTable* jumpTable = &m_codeBlock->switchJumpTable(tableIndex);
m_switches.append(SwitchRecord(jumpTable, m_bytecodeOffset, defaultOffset, SwitchRecord::Character));
- jumpTable->ensureCTITable();
+ jumpTable->ctiOffsets.grow(jumpTable->branchOffsets.size());
emitLoad(scrutinee, regT1, regT0);
callOperation(operationSwitchCharWithUnknownKeyType, regT1, regT0, tableIndex);
@@ -949,48 +1013,77 @@ void JIT::emit_op_enter(Instruction* currentInstruction)
slowPathCall.call();
}
-void JIT::emit_op_get_scope(Instruction* currentInstruction)
+void JIT::emit_op_create_activation(Instruction* currentInstruction)
+{
+ int activation = currentInstruction[1].u.operand;
+
+ Jump activationCreated = branch32(NotEqual, tagFor(activation), TrustedImm32(JSValue::EmptyValueTag));
+ callOperation(operationCreateActivation, 0);
+ emitStoreCell(activation, returnValueGPR);
+ activationCreated.link(this);
+}
+
+void JIT::emit_op_create_arguments(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+
+ Jump argsCreated = branch32(NotEqual, tagFor(dst), TrustedImm32(JSValue::EmptyValueTag));
+ callOperation(operationCreateArguments);
+ emitStoreCell(dst, returnValueGPR);
+ emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(dst)).offset(), returnValueGPR);
+ argsCreated.link(this);
+}
+
+void JIT::emit_op_init_lazy_reg(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
+
+ emitStore(dst, JSValue());
+}
+
+void JIT::emit_op_get_callee(Instruction* currentInstruction)
+{
+ int result = currentInstruction[1].u.operand;
+ WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[2].u.jsCell;
emitGetFromCallFrameHeaderPtr(JSStack::Callee, regT0);
- loadPtr(Address(regT0, JSFunction::offsetOfScopeChain()), regT0);
- emitStoreCell(dst, regT0);
+
+ loadPtr(cachedFunction, regT2);
+ addSlowCase(branchPtr(NotEqual, regT0, regT2));
+
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ emitStore(result, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_callee(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ linkSlowCase(iter);
+
+ JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_callee);
+ slowPathCall.call();
}
void JIT::emit_op_create_this(Instruction* currentInstruction)
{
int callee = currentInstruction[2].u.operand;
- WriteBarrierBase<JSCell>* cachedFunction = &currentInstruction[4].u.jsCell;
RegisterID calleeReg = regT0;
- RegisterID rareDataReg = regT4;
RegisterID resultReg = regT0;
RegisterID allocatorReg = regT1;
RegisterID structureReg = regT2;
- RegisterID cachedFunctionReg = regT4;
RegisterID scratchReg = regT3;
emitLoadPayload(callee, calleeReg);
- loadPtr(Address(calleeReg, JSFunction::offsetOfRareData()), rareDataReg);
- addSlowCase(branchTestPtr(Zero, rareDataReg));
- loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
- loadPtr(Address(rareDataReg, FunctionRareData::offsetOfObjectAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfAllocator()), allocatorReg);
+ loadPtr(Address(calleeReg, JSFunction::offsetOfAllocationProfile() + ObjectAllocationProfile::offsetOfStructure()), structureReg);
addSlowCase(branchTestPtr(Zero, allocatorReg));
- loadPtr(cachedFunction, cachedFunctionReg);
- Jump hasSeenMultipleCallees = branchPtr(Equal, cachedFunctionReg, TrustedImmPtr(JSCell::seenMultipleCalleeObjects()));
- addSlowCase(branchPtr(NotEqual, calleeReg, cachedFunctionReg));
- hasSeenMultipleCallees.link(this);
-
emitAllocateJSObject(allocatorReg, structureReg, resultReg, scratchReg);
emitStoreCell(currentInstruction[1].u.operand, resultReg);
}
void JIT::emitSlow_op_create_this(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- linkSlowCase(iter); // doesn't have rare data
linkSlowCase(iter); // doesn't have an allocation profile
linkSlowCase(iter); // allocation failed
- linkSlowCase(iter); // cached function didn't match
JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_create_this);
slowPathCall.call();
@@ -1004,8 +1097,8 @@ void JIT::emit_op_to_this(Instruction* currentInstruction)
emitLoad(thisRegister, regT3, regT2);
addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
- addSlowCase(branch8(NotEqual, Address(regT2, JSCell::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
- loadPtr(Address(regT2, JSCell::structureIDOffset()), regT0);
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
+ addSlowCase(branch8(NotEqual, Address(regT0, Structure::typeInfoTypeOffset()), TrustedImm32(FinalObjectType)));
loadPtr(cachedStructure, regT2);
addSlowCase(branchPtr(NotEqual, regT0, regT2));
}
@@ -1019,19 +1112,6 @@ void JIT::emitSlow_op_to_this(Instruction* currentInstruction, Vector<SlowCaseEn
slowPathCall.call();
}
-void JIT::emit_op_check_tdz(Instruction* currentInstruction)
-{
- emitLoadTag(currentInstruction[1].u.operand, regT0);
- addSlowCase(branch32(Equal, regT0, TrustedImm32(JSValue::EmptyValueTag)));
-}
-
-void JIT::emitSlow_op_check_tdz(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_tdz_error);
- slowPathCall.call();
-}
-
void JIT::emit_op_profile_will_call(Instruction* currentInstruction)
{
load32(m_vm->enabledProfilerAddress(), regT0);
@@ -1050,282 +1130,63 @@ void JIT::emit_op_profile_did_call(Instruction* currentInstruction)
profilerDone.link(this);
}
-void JIT::emit_op_has_structure_property(Instruction* currentInstruction)
+void JIT::emit_op_get_arguments_length(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int enumerator = currentInstruction[4].u.operand;
-
- emitLoadPayload(base, regT0);
- emitJumpSlowCaseIfNotJSCell(base);
-
- emitLoadPayload(enumerator, regT1);
-
- load32(Address(regT0, JSCell::structureIDOffset()), regT0);
- addSlowCase(branch32(NotEqual, regT0, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
-
- move(TrustedImm32(1), regT0);
- emitStoreBool(dst, regT0);
+ int argumentsRegister = currentInstruction[2].u.operand;
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
+ load32(payloadFor(JSStack::ArgumentCount), regT0);
+ sub32(TrustedImm32(1), regT0);
+ emitStoreInt32(dst, regT0);
}
-void JIT::privateCompileHasIndexedProperty(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
-{
- Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
-
- PatchableJump badType;
-
- // FIXME: Add support for other types like TypedArrays and Arguments.
- // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
- JumpList slowCases = emitLoadForArrayMode(currentInstruction, arrayMode, badType);
- move(TrustedImm32(1), regT0);
- Jump done = jump();
-
- LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
-
- patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
- patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
-
- patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
-
- byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
- ("Baseline has_indexed_property stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
-
- MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationHasIndexedPropertyGeneric));
-}
-
-void JIT::emit_op_has_indexed_property(Instruction* currentInstruction)
+void JIT::emitSlow_op_get_arguments_length(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
+ linkSlowCase(iter);
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
- int property = currentInstruction[3].u.operand;
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_codeBlock->addByValInfo();
-
- emitLoadPayload(base, regT0);
- emitJumpSlowCaseIfNotJSCell(base);
-
- emitLoadPayload(property, regT1);
-
- // This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
- // We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
- // number was signed since m_vectorLength is always less than intmax (since the total allocation
- // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
- // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
- // extending since it makes it easier to re-tag the value in the slow case.
- zeroExtend32ToPtr(regT1, regT1);
-
- emitArrayProfilingSiteWithCell(regT0, regT2, profile);
- and32(TrustedImm32(IndexingShapeMask), regT2);
-
- JITArrayMode mode = chooseArrayMode(profile);
- PatchableJump badType;
-
- // FIXME: Add support for other types like TypedArrays and Arguments.
- // See https://bugs.webkit.org/show_bug.cgi?id=135033 and https://bugs.webkit.org/show_bug.cgi?id=135034.
- JumpList slowCases = emitLoadForArrayMode(currentInstruction, mode, badType);
- move(TrustedImm32(1), regT0);
-
- addSlowCase(badType);
- addSlowCase(slowCases);
-
- Label done = label();
-
- emitStoreBool(dst, regT0);
-
- Label nextHotPath = label();
-
- m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, PatchableJump(), badType, mode, profile, done, nextHotPath));
+ callOperation(operationGetArgumentsLength, dst, base);
}
-void JIT::emitSlow_op_has_indexed_property(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+void JIT::emit_op_get_argument_by_val(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
+ int argumentsRegister = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
-
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
- linkSlowCase(iter); // base array check
- linkSlowCase(iter); // vector length check
- linkSlowCase(iter); // empty value
-
- Label slowPath = label();
-
- emitLoad(base, regT1, regT0);
- emitLoad(property, regT3, regT2);
- Call call = callOperation(operationHasIndexedPropertyDefault, dst, regT1, regT0, regT3, regT2, byValInfo);
-
- m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
- m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
- m_byValInstructionIndex++;
-}
-
-void JIT::emit_op_get_direct_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int base = currentInstruction[2].u.operand;
- int index = currentInstruction[4].u.operand;
- int enumerator = currentInstruction[5].u.operand;
-
- // Check that base is a cell
- emitLoadPayload(base, regT0);
- emitJumpSlowCaseIfNotJSCell(base);
-
- // Check the structure
- emitLoadPayload(enumerator, regT1);
- load32(Address(regT0, JSCell::structureIDOffset()), regT2);
- addSlowCase(branch32(NotEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedStructureIDOffset())));
-
- // Compute the offset
- emitLoadPayload(index, regT2);
- // If index is less than the enumerator's cached inline storage, then it's an inline access
- Jump outOfLineAccess = branch32(AboveOrEqual, regT2, Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()));
- addPtr(TrustedImm32(JSObject::offsetOfInlineStorage()), regT0);
- load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- load32(BaseIndex(regT0, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
-
- Jump done = jump();
-
- // Otherwise it's out of line
- outOfLineAccess.link(this);
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- addSlowCase(branchIfNotToSpace(regT0));
- sub32(Address(regT1, JSPropertyNameEnumerator::cachedInlineCapacityOffset()), regT2);
- neg32(regT2);
- int32_t offsetOfFirstProperty = static_cast<int32_t>(offsetInButterfly(firstOutOfLineOffset)) * sizeof(EncodedJSValue);
- load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1);
- load32(BaseIndex(regT0, regT2, TimesEight, offsetOfFirstProperty + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0);
+ addSlowCase(branch32(NotEqual, tagFor(argumentsRegister), TrustedImm32(JSValue::EmptyValueTag)));
+ emitLoad(property, regT1, regT2);
+ addSlowCase(branch32(NotEqual, regT1, TrustedImm32(JSValue::Int32Tag)));
+ add32(TrustedImm32(1), regT2);
+ // regT2 now contains the integer index of the argument we want, including this
+ load32(payloadFor(JSStack::ArgumentCount), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT2, regT3));
- done.link(this);
+ loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT0);
+ loadPtr(BaseIndex(callFrameRegister, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))), regT1);
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
}
-void JIT::emitSlow_op_get_direct_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
-{
- int base = currentInstruction[2].u.operand;
- linkSlowCaseIfNotJSCell(iter, base);
- linkSlowCase(iter);
- linkSlowCase(iter);
-
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_get_direct_pname);
- slowPathCall.call();
-}
-
-void JIT::emit_op_enumerator_structure_pname(Instruction* currentInstruction)
-{
- int dst = currentInstruction[1].u.operand;
- int enumerator = currentInstruction[2].u.operand;
- int index = currentInstruction[3].u.operand;
-
- emitLoadPayload(index, regT0);
- emitLoadPayload(enumerator, regT1);
- Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endStructurePropertyIndexOffset()));
-
- move(TrustedImm32(JSValue::NullTag), regT2);
- move(TrustedImm32(0), regT0);
-
- Jump done = jump();
- inBounds.link(this);
-
- loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
- loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0);
- move(TrustedImm32(JSValue::CellTag), regT2);
-
- done.link(this);
- emitStore(dst, regT2, regT0);
-}
-
-void JIT::emit_op_enumerator_generic_pname(Instruction* currentInstruction)
+void JIT::emitSlow_op_get_argument_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- int enumerator = currentInstruction[2].u.operand;
- int index = currentInstruction[3].u.operand;
-
- emitLoadPayload(index, regT0);
- emitLoadPayload(enumerator, regT1);
- Jump inBounds = branch32(Below, regT0, Address(regT1, JSPropertyNameEnumerator::endGenericPropertyIndexOffset()));
+ int arguments = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
- move(TrustedImm32(JSValue::NullTag), regT2);
- move(TrustedImm32(0), regT0);
+ linkSlowCase(iter);
+ Jump skipArgumentsCreation = jump();
- Jump done = jump();
- inBounds.link(this);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
- loadPtr(Address(regT1, JSPropertyNameEnumerator::cachedPropertyNamesVectorOffset()), regT1);
- loadPtr(BaseIndex(regT1, regT0, timesPtr()), regT0);
- move(TrustedImm32(JSValue::CellTag), regT2);
+ callOperation(operationCreateArguments);
+ emitStoreCell(arguments, returnValueGPR);
+ emitStoreCell(unmodifiedArgumentsRegister(VirtualRegister(arguments)).offset(), returnValueGPR);
- done.link(this);
- emitStore(dst, regT2, regT0);
-}
-
-void JIT::emit_op_profile_type(Instruction* currentInstruction)
-{
- TypeLocation* cachedTypeLocation = currentInstruction[2].u.location;
- int valueToProfile = currentInstruction[1].u.operand;
-
- // Load payload in T0. Load tag in T3.
- emitLoadPayload(valueToProfile, regT0);
- emitLoadTag(valueToProfile, regT3);
-
- JumpList jumpToEnd;
-
- jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::EmptyValueTag)));
-
- // Compile in a predictive type check, if possible, to see if we can skip writing to the log.
- // These typechecks are inlined to match those of the 32-bit JSValue type checks.
- if (cachedTypeLocation->m_lastSeenType == TypeUndefined)
- jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::UndefinedTag)));
- else if (cachedTypeLocation->m_lastSeenType == TypeNull)
- jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::NullTag)));
- else if (cachedTypeLocation->m_lastSeenType == TypeBoolean)
- jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::BooleanTag)));
- else if (cachedTypeLocation->m_lastSeenType == TypeMachineInt)
- jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag)));
- else if (cachedTypeLocation->m_lastSeenType == TypeNumber) {
- jumpToEnd.append(branch32(Below, regT3, TrustedImm32(JSValue::LowestTag)));
- jumpToEnd.append(branch32(Equal, regT3, TrustedImm32(JSValue::Int32Tag)));
- } else if (cachedTypeLocation->m_lastSeenType == TypeString) {
- Jump isNotCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag));
- jumpToEnd.append(branch8(Equal, Address(regT0, JSCell::typeInfoTypeOffset()), TrustedImm32(StringType)));
- isNotCell.link(this);
- }
-
- // Load the type profiling log into T2.
- TypeProfilerLog* cachedTypeProfilerLog = m_vm->typeProfilerLog();
- move(TrustedImmPtr(cachedTypeProfilerLog), regT2);
-
- // Load the next log entry into T1.
- loadPtr(Address(regT2, TypeProfilerLog::currentLogEntryOffset()), regT1);
-
- // Store the JSValue onto the log entry.
- store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.payload)));
- store32(regT3, Address(regT1, TypeProfilerLog::LogEntry::valueOffset() + OBJECT_OFFSETOF(JSValue, u.asBits.tag)));
-
- // Store the structureID of the cell if argument is a cell, otherwise, store 0 on the log entry.
- Jump notCell = branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag));
- load32(Address(regT0, JSCell::structureIDOffset()), regT0);
- store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
- Jump skipNotCell = jump();
- notCell.link(this);
- store32(TrustedImm32(0), Address(regT1, TypeProfilerLog::LogEntry::structureIDOffset()));
- skipNotCell.link(this);
-
- // Store the typeLocation on the log entry.
- move(TrustedImmPtr(cachedTypeLocation), regT0);
- store32(regT0, Address(regT1, TypeProfilerLog::LogEntry::locationOffset()));
-
- // Increment the current log entry.
- addPtr(TrustedImm32(sizeof(TypeProfilerLog::LogEntry)), regT1);
- store32(regT1, Address(regT2, TypeProfilerLog::currentLogEntryOffset()));
- jumpToEnd.append(branchPtr(NotEqual, regT1, TrustedImmPtr(cachedTypeProfilerLog->logEndPtr())));
- // Clear the log if we're at the end of the log.
- callOperation(operationProcessTypeProfilerLog);
-
- jumpToEnd.link(this);
+ skipArgumentsCreation.link(this);
+ emitLoad(arguments, regT1, regT0);
+ emitLoad(property, regT3, regT2);
+ callOperation(WithProfile, operationGetByValGeneric, dst, regT1, regT0, regT3, regT2);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITOperationWrappers.h b/Source/JavaScriptCore/jit/JITOperationWrappers.h
new file mode 100644
index 000000000..f9624fdbc
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITOperationWrappers.h
@@ -0,0 +1,413 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITOperationWrappers_h
+#define JITOperationWrappers_h
+
+#include "JITOperations.h"
+#include <wtf/Compiler.h>
+#include <wtf/InlineASM.h>
+
+#if COMPILER(MSVC)
+#include <intrin.h>
+#endif
+
+namespace JSC {
+
+#if CPU(MIPS)
+#if WTF_MIPS_PIC
+#define LOAD_FUNCTION_TO_T9(function) \
+ ".set noreorder" "\n" \
+ ".cpload $25" "\n" \
+ ".set reorder" "\n" \
+ "la $t9, " LOCAL_REFERENCE(function) "\n"
+#else
+#define LOAD_FUNCTION_TO_T9(function) "" "\n"
+#endif
+#endif
+
+#if COMPILER(GCC) && CPU(X86_64)
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
+ asm( \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov (%rsp), %" STRINGIZE(register) "\n" \
+ "jmp " LOCAL_REFERENCE(function##WithReturnAddress) "\n" \
+ );
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rsi)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, rcx)
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, r8)
+
+#elif COMPILER(GCC) && CPU(X86)
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov (%esp), %eax\n" \
+ "mov %eax, " STRINGIZE(offset) "(%esp)\n" \
+ "jmp " LOCAL_REFERENCE(function##WithReturnAddress) "\n" \
+ );
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 16)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 20)
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 28)
+
+#elif CPU(ARM64)
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, register) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov " STRINGIZE(register) ", lr" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x1)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x1)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x3)
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x3)
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, x4)
+
+#elif COMPILER(GCC) && CPU(ARM_THUMB2)
+
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov a2, lr" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov a4, lr" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
+// As a result, return address will be at a 4-byte further location in the following cases.
+#if COMPILER_SUPPORTS(EABI) && CPU(ARM)
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #4]"
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #12]"
+#else
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #0]"
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #8]"
+#endif
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ INSTRUCTION_STORE_RETURN_ADDRESS_EJI "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".align 2" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ ".thumb" "\n" \
+ ".thumb_func " THUMB_FUNC_PARAM(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
+
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ asm ( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ INLINE_ARM_FUNCTION(function) \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov a2, lr" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ INLINE_ARM_FUNCTION(function) \
+ SYMBOL_STRING(function) ":" "\n" \
+ "mov a4, lr" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]).
+// As a result, return address will be at a 4-byte further location in the following cases.
+#if COMPILER_SUPPORTS(EABI) && CPU(ARM)
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #4]"
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #12]"
+#else
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #0]"
+#define INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "str lr, [sp, #8]"
+#endif
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ INLINE_ARM_FUNCTION(function) \
+ SYMBOL_STRING(function) ":" "\n" \
+ INSTRUCTION_STORE_RETURN_ADDRESS_EJI "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
+ asm ( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ INLINE_ARM_FUNCTION(function) \
+ SYMBOL_STRING(function) ":" "\n" \
+ INSTRUCTION_STORE_RETURN_ADDRESS_EJJI "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#elif COMPILER(GCC) && CPU(MIPS)
+
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
+ "move $a1, $ra" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
+ "move $a3, $ra" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
+ "sw $ra, 20($sp)" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ LOAD_FUNCTION_TO_T9(function##WithReturnAddress) \
+ "sw $ra, 28($sp)" "\n" \
+ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ );
+
+#elif COMPILER(GCC) && CPU(SH4)
+
+#define SH4_SCRATCH_REGISTER "r11"
+
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "sts pr, r5" "\n" \
+ "bra " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \
+ "nop" "\n" \
+ );
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "sts pr, r7" "\n" \
+ "mov.l 2f, " SH4_SCRATCH_REGISTER "\n" \
+ "braf " SH4_SCRATCH_REGISTER "\n" \
+ "nop" "\n" \
+ "1: .balign 4" "\n" \
+ "2: .long " LOCAL_REFERENCE(function) "WithReturnAddress-1b" "\n" \
+ );
+
+#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, offset, scratch) \
+ asm( \
+ ".text" "\n" \
+ ".globl " SYMBOL_STRING(function) "\n" \
+ HIDE_SYMBOL(function) "\n" \
+ SYMBOL_STRING(function) ":" "\n" \
+ "sts pr, " scratch "\n" \
+ "mov.l " scratch ", @(" STRINGIZE(offset) ", r15)" "\n" \
+ "mov.l 2f, " scratch "\n" \
+ "braf " scratch "\n" \
+ "nop" "\n" \
+ "1: .balign 4" "\n" \
+ "2: .long " LOCAL_REFERENCE(function) "WithReturnAddress-1b" "\n" \
+ );
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 0, SH4_SCRATCH_REGISTER)
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) FUNCTION_WRAPPER_WITH_RETURN_ADDRESS(function, 8, SH4_SCRATCH_REGISTER)
+
+#elif COMPILER(MSVC) && CPU(X86)
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+__declspec(naked) EncodedJSValue JIT_OPERATION function(ExecState*, EncodedJSValue, StringImpl*) \
+{ \
+ __asm { \
+ __asm mov eax, [esp] \
+ __asm mov [esp + 20], eax \
+ __asm jmp function##WithReturnAddress \
+ } \
+}
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+__declspec(naked) EncodedJSValue JIT_OPERATION function(ExecState*, JSCell*, StringImpl*) \
+{ \
+ __asm { \
+ __asm mov eax, [esp] \
+ __asm mov [esp + 16], eax \
+ __asm jmp function##WithReturnAddress \
+ } \
+}
+
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) \
+__declspec(naked) void JIT_OPERATION function(ExecState*, EncodedJSValue, JSCell*, StringImpl*) \
+{ \
+ __asm { \
+ __asm mov eax, [esp] \
+ __asm mov [esp + 24], eax \
+ __asm jmp function##WithReturnAddress \
+ } \
+}
+
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
+__declspec(naked) void JIT_OPERATION function(ExecState*, EncodedJSValue, EncodedJSValue, StringImpl*) \
+{ \
+ __asm { \
+ __asm mov eax, [esp] \
+ __asm mov [esp + 28], eax \
+ __asm jmp function##WithReturnAddress \
+ } \
+}
+
+#elif COMPILER(MSVC)
+
+#define _P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ void* JIT_OPERATION function(ExecState* exec) { return function##WithReturnAddress(exec, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+ EncodedJSValue JIT_OPERATION function(ExecState* exec) { return function##WithReturnAddress(exec, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+ EncodedJSValue JIT_OPERATION function(ExecState* exec, JSCell* cell, StringImpl* string) { return function##WithReturnAddress(exec, cell, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
+
+#define _J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+ EncodedJSValue JIT_OPERATION function(ExecState* exec, EncodedJSValue value, StringImpl* string) { return function##WithReturnAddress(exec, value, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
+
+#define _V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
+ void JIT_OPERATION function(ExecState* exec, EncodedJSValue value, EncodedJSValue baseValue, StringImpl* string) { return function##WithReturnAddress(exec, value, baseValue, string, ReturnAddressPtr(*(void**)_AddressOfReturnAddress())); }
+
+#endif
+
+#define P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+void* JIT_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
+_P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
+
+#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \
+EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
+_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function)
+
+#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \
+EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, JSCell*, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
+_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function)
+
+#define J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \
+EncodedJSValue JIT_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
+_J_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function)
+
+#define V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function) \
+void JIT_OPERATION function##WithReturnAddress(ExecState*, EncodedJSValue, EncodedJSValue, StringImpl*, ReturnAddressPtr) REFERENCED_FROM_ASM WTF_INTERNAL; \
+_V_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJJI(function)
+
+} // namespace JSC
+
+#endif // JITOperationWrappers_h
+
diff --git a/Source/JavaScriptCore/jit/JITOperations.cpp b/Source/JavaScriptCore/jit/JITOperations.cpp
index 868eed755..578d15dac 100644
--- a/Source/JavaScriptCore/jit/JITOperations.cpp
+++ b/Source/JavaScriptCore/jit/JITOperations.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -24,43 +24,32 @@
*/
#include "config.h"
-#include "JITOperations.h"
-
#if ENABLE(JIT)
+#include "JITOperations.h"
+#include "Arguments.h"
#include "ArrayConstructor.h"
+#include "CallFrameInlines.h"
#include "CommonSlowPaths.h"
#include "DFGCompilationMode.h"
#include "DFGDriver.h"
#include "DFGOSREntry.h"
-#include "DFGThunks.h"
#include "DFGWorklist.h"
-#include "Debugger.h"
-#include "DirectArguments.h"
#include "Error.h"
-#include "ErrorHandlingScope.h"
-#include "ExceptionFuzz.h"
#include "GetterSetter.h"
#include "HostCallReturnValue.h"
#include "JIT.h"
-#include "JITExceptions.h"
+#include "JITOperationWrappers.h"
#include "JITToDFGDeferredCompilationCallback.h"
-#include "JSCInlines.h"
-#include "JSGeneratorFunction.h"
#include "JSGlobalObjectFunctions.h"
-#include "JSLexicalEnvironment.h"
-#include "JSPropertyNameEnumerator.h"
+#include "JSNameScope.h"
+#include "JSPropertyNameIterator.h"
#include "JSStackInlines.h"
#include "JSWithScope.h"
-#include "LegacyProfiler.h"
#include "ObjectConstructor.h"
-#include "PropertyName.h"
+#include "Operations.h"
#include "Repatch.h"
-#include "ScopedArguments.h"
-#include "TestRunnerUtils.h"
-#include "TypeProfilerLog.h"
-#include "VMInlines.h"
-#include <wtf/InlineASM.h>
+#include "RepatchBuffer.h"
namespace JSC {
@@ -82,56 +71,33 @@ void * _ReturnAddress(void);
#endif
-void JIT_OPERATION operationThrowStackOverflowError(ExecState* exec, CodeBlock* codeBlock)
+void JIT_OPERATION operationStackCheck(ExecState* exec, CodeBlock* codeBlock)
{
// We pass in our own code block, because the callframe hasn't been populated.
VM* vm = codeBlock->vm();
-
- VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
- CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel();
if (!callerFrame)
callerFrame = exec;
- NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
- throwStackOverflowError(callerFrame);
-}
+ NativeCallFrameTracer tracer(vm, callerFrame);
-#if ENABLE(WEBASSEMBLY)
-void JIT_OPERATION operationThrowDivideError(ExecState* exec)
-{
- VM* vm = &exec->vm();
- VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
- CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
-
- NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
- ErrorHandlingScope errorScope(*vm);
- vm->throwException(callerFrame, createError(callerFrame, ASCIILiteral("Division by zero or division overflow.")));
-}
-
-void JIT_OPERATION operationThrowOutOfBoundsAccessError(ExecState* exec)
-{
- VM* vm = &exec->vm();
- VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
- CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
+ JSStack& stack = vm->interpreter->stack();
- NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
- ErrorHandlingScope errorScope(*vm);
- vm->throwException(callerFrame, createError(callerFrame, ASCIILiteral("Out-of-bounds access.")));
+ if (UNLIKELY(!stack.grow(&exec->registers()[virtualRegisterForLocal(codeBlock->frameRegisterCount()).offset()])))
+ vm->throwException(callerFrame, createStackOverflowError(callerFrame));
}
-#endif
int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec)
{
VM* vm = &exec->vm();
+ CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel();
+ NativeCallFrameTracer tracer(vm, callerFrame);
+
JSStack& stack = vm->interpreter->stack();
int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForCall);
- if (missingArgCount < 0) {
- VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
- CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
- NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
- throwStackOverflowError(callerFrame);
- }
+ if (missingArgCount < 0)
+ vm->throwException(callerFrame, createStackOverflowError(callerFrame));
return missingArgCount;
}
@@ -139,96 +105,105 @@ int32_t JIT_OPERATION operationCallArityCheck(ExecState* exec)
int32_t JIT_OPERATION operationConstructArityCheck(ExecState* exec)
{
VM* vm = &exec->vm();
+ CallFrame* callerFrame = exec->callerFrameSkippingVMEntrySentinel();
+ NativeCallFrameTracer tracer(vm, callerFrame);
+
JSStack& stack = vm->interpreter->stack();
int32_t missingArgCount = CommonSlowPaths::arityCheckFor(exec, &stack, CodeForConstruct);
- if (missingArgCount < 0) {
- VMEntryFrame* vmEntryFrame = vm->topVMEntryFrame;
- CallFrame* callerFrame = exec->callerFrame(vmEntryFrame);
- NativeCallFrameTracerWithRestore tracer(vm, vmEntryFrame, callerFrame);
- throwStackOverflowError(callerFrame);
- }
+ if (missingArgCount < 0)
+ vm->throwException(callerFrame, createStackOverflowError(callerFrame));
return missingArgCount;
}
-EncodedJSValue JIT_OPERATION operationGetById(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
+EncodedJSValue JIT_OPERATION operationGetById(ExecState* exec, StructureStubInfo*, EncodedJSValue base, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- stubInfo->tookSlowPath = true;
-
JSValue baseValue = JSValue::decode(base);
- PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
- Identifier ident = Identifier::fromUid(vm, uid);
+ PropertySlot slot(baseValue);
+ Identifier ident(vm, uid);
return JSValue::encode(baseValue.get(exec, ident, slot));
}
-EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState* exec, EncodedJSValue base, UniquedStringImpl* uid)
+EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
-
+
+ Identifier ident(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
JSValue baseValue = JSValue::decode(base);
- PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
- Identifier ident = Identifier::fromUid(vm, uid);
- return JSValue::encode(baseValue.get(exec, ident, slot));
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(exec, ident, slot);
+
+ if (accessType == static_cast<AccessType>(stubInfo->accessType))
+ buildGetByIDList(exec, baseValue, ident, slot, *stubInfo);
+
+ return JSValue::encode(result);
}
-EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, UniquedStringImpl* uid)
+EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue base, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident = uid->isEmptyUnique() ? Identifier::from(PrivateName(uid)) : Identifier(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue baseValue = JSValue::decode(base);
- PropertySlot slot(baseValue, PropertySlot::InternalMethodType::Get);
-
- bool hasResult = baseValue.getPropertySlot(exec, ident, slot);
- if (stubInfo->considerCaching())
- repatchGetByID(exec, baseValue, ident, slot, *stubInfo);
+ PropertySlot slot(baseValue);
+ JSValue result = baseValue.get(exec, ident, slot);
- return JSValue::encode(hasResult? slot.getValue(exec, ident) : jsUndefined());
+ if (accessType == static_cast<AccessType>(stubInfo->accessType)) {
+ if (stubInfo->seen)
+ repatchGetByID(exec, baseValue, ident, slot, *stubInfo);
+ else
+ stubInfo->seen = true;
+ }
+
+ return JSValue::encode(result);
}
-EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key)
+EncodedJSValue JIT_OPERATION operationInOptimize(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, StringImpl* key)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
if (!base->isObject()) {
- vm->throwException(exec, createInvalidInParameterError(exec, base));
+ vm->throwException(exec, createInvalidParameterError(exec, "in", base));
return JSValue::encode(jsUndefined());
}
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
- Identifier ident = Identifier::fromUid(vm, key);
- PropertySlot slot(base, PropertySlot::InternalMethodType::HasProperty);
+ Identifier ident(vm, key);
+ PropertySlot slot(base);
bool result = asObject(base)->getPropertySlot(exec, ident, slot);
RELEASE_ASSERT(accessType == stubInfo->accessType);
- if (stubInfo->considerCaching())
+ if (stubInfo->seen)
repatchIn(exec, base, ident, result, slot, *stubInfo);
+ else
+ stubInfo->seen = true;
return JSValue::encode(jsBoolean(result));
}
-EncodedJSValue JIT_OPERATION operationIn(ExecState* exec, StructureStubInfo* stubInfo, JSCell* base, UniquedStringImpl* key)
+EncodedJSValue JIT_OPERATION operationIn(ExecState* exec, StructureStubInfo*, JSCell* base, StringImpl* key)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
-
- stubInfo->tookSlowPath = true;
if (!base->isObject()) {
- vm->throwException(exec, createInvalidInParameterError(exec, base));
+ vm->throwException(exec, createInvalidParameterError(exec, "in", base));
return JSValue::encode(jsUndefined());
}
- Identifier ident = Identifier::fromUid(vm, key);
+ Identifier ident(vm, key);
return JSValue::encode(jsBoolean(asObject(base)->hasProperty(exec, ident)));
}
@@ -240,140 +215,234 @@ EncodedJSValue JIT_OPERATION operationGenericIn(ExecState* exec, JSCell* base, E
return JSValue::encode(jsBoolean(CommonSlowPaths::opIn(exec, JSValue::decode(key), base)));
}
-void JIT_OPERATION operationPutByIdStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+EncodedJSValue JIT_OPERATION operationCallCustomGetter(ExecState* exec, JSCell* base, PropertySlot::GetValueFunc function, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- stubInfo->tookSlowPath = true;
+ Identifier ident(vm, uid);
- Identifier ident = Identifier::fromUid(vm, uid);
- PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext());
- JSValue::decode(encodedBase).putInline(exec, ident, JSValue::decode(encodedValue), slot);
+ return function(exec, JSValue::encode(base), JSValue::encode(base), ident);
+}
+
+EncodedJSValue JIT_OPERATION operationCallGetter(ExecState* exec, JSCell* base, JSCell* getterSetter)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ return JSValue::encode(callGetter(exec, base, getterSetter));
}
-void JIT_OPERATION operationPutByIdNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+void JIT_OPERATION operationPutByIdStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- stubInfo->tookSlowPath = true;
+ Identifier ident(vm, uid);
+ PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext());
+ JSValue::decode(encodedBase).put(exec, ident, JSValue::decode(encodedValue), slot);
+}
+
+void JIT_OPERATION operationPutByIdNonStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident(vm, uid);
PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext());
- JSValue::decode(encodedBase).putInline(exec, ident, JSValue::decode(encodedValue), slot);
+ JSValue::decode(encodedBase).put(exec, ident, JSValue::decode(encodedValue), slot);
}
-void JIT_OPERATION operationPutByIdDirectStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- stubInfo->tookSlowPath = true;
-
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident(vm, uid);
PutPropertySlot slot(JSValue::decode(encodedBase), true, exec->codeBlock()->putByIdContext());
asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
}
-void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState* exec, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- stubInfo->tookSlowPath = true;
-
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident(vm, uid);
PutPropertySlot slot(JSValue::decode(encodedBase), false, exec->codeBlock()->putByIdContext());
asObject(JSValue::decode(encodedBase))->putDirect(exec->vm(), ident, JSValue::decode(encodedValue), slot);
}
-void JIT_OPERATION operationPutByIdStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+void JIT_OPERATION operationPutByIdStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue value = JSValue::decode(encodedValue);
JSValue baseValue = JSValue::decode(encodedBase);
PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext());
-
- Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr;
- baseValue.putInline(exec, ident, value, slot);
+
+ baseValue.put(exec, ident, value, slot);
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- if (stubInfo->considerCaching())
- repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect);
+ if (stubInfo->seen)
+ repatchPutByID(exec, baseValue, ident, slot, *stubInfo, NotDirect);
+ else
+ stubInfo->seen = true;
}
-void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue value = JSValue::decode(encodedValue);
JSValue baseValue = JSValue::decode(encodedBase);
PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext());
-
- Structure* structure = baseValue.isCell() ? baseValue.asCell()->structure(*vm) : nullptr;
- baseValue.putInline(exec, ident, value, slot);
+
+ baseValue.put(exec, ident, value, slot);
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- if (stubInfo->considerCaching())
- repatchPutByID(exec, baseValue, structure, ident, slot, *stubInfo, NotDirect);
+ if (stubInfo->seen)
+ repatchPutByID(exec, baseValue, ident, slot, *stubInfo, NotDirect);
+ else
+ stubInfo->seen = true;
}
-void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue value = JSValue::decode(encodedValue);
JSObject* baseObject = asObject(JSValue::decode(encodedBase));
PutPropertySlot slot(baseObject, true, exec->codeBlock()->putByIdContext());
- Structure* structure = baseObject->structure(*vm);
baseObject->putDirect(exec->vm(), ident, value, slot);
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- if (stubInfo->considerCaching())
- repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct);
+ if (stubInfo->seen)
+ repatchPutByID(exec, baseObject, ident, slot, *stubInfo, Direct);
+ else
+ stubInfo->seen = true;
}
-void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl* uid)
+void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- Identifier ident = Identifier::fromUid(vm, uid);
+ Identifier ident(vm, uid);
AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
JSValue value = JSValue::decode(encodedValue);
JSObject* baseObject = asObject(JSValue::decode(encodedBase));
PutPropertySlot slot(baseObject, false, exec->codeBlock()->putByIdContext());
- Structure* structure = baseObject->structure(*vm);
baseObject->putDirect(exec->vm(), ident, value, slot);
if (accessType != static_cast<AccessType>(stubInfo->accessType))
return;
- if (stubInfo->considerCaching())
- repatchPutByID(exec, baseObject, structure, ident, slot, *stubInfo, Direct);
+ if (stubInfo->seen)
+ repatchPutByID(exec, baseObject, ident, slot, *stubInfo, Direct);
+ else
+ stubInfo->seen = true;
+}
+
+void JIT_OPERATION operationPutByIdStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ PutPropertySlot slot(baseValue, true, exec->codeBlock()->putByIdContext());
+
+ baseValue.put(exec, ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ buildPutByIdList(exec, baseValue, ident, slot, *stubInfo, NotDirect);
+}
+
+void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSValue baseValue = JSValue::decode(encodedBase);
+ PutPropertySlot slot(baseValue, false, exec->codeBlock()->putByIdContext());
+
+ baseValue.put(exec, ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ buildPutByIdList(exec, baseValue, ident, slot, *stubInfo, NotDirect);
+}
+
+void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSObject* baseObject = asObject(JSValue::decode(encodedBase));
+ PutPropertySlot slot(baseObject, true, exec->codeBlock()->putByIdContext());
+
+ baseObject->putDirect(exec->vm(), ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ buildPutByIdList(exec, baseObject, ident, slot, *stubInfo, Direct);
+}
+
+void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState* exec, StructureStubInfo* stubInfo, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl* uid)
+{
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
+
+ Identifier ident(vm, uid);
+ AccessType accessType = static_cast<AccessType>(stubInfo->accessType);
+
+ JSValue value = JSValue::decode(encodedValue);
+ JSObject* baseObject = asObject(JSValue::decode(encodedBase));
+ PutPropertySlot slot(baseObject, false, exec->codeBlock()->putByIdContext());
+
+ baseObject ->putDirect(exec->vm(), ident, value, slot);
+
+ if (accessType != static_cast<AccessType>(stubInfo->accessType))
+ return;
+
+ buildPutByIdList(exec, baseObject, ident, slot, *stubInfo, Direct);
}
void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObject* base, Structure* structure, PropertyOffset offset, EncodedJSValue value)
@@ -381,273 +450,144 @@ void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObj
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- ASSERT(structure->outOfLineCapacity() > base->structure(vm)->outOfLineCapacity());
+ ASSERT(structure->outOfLineCapacity() > base->structure()->outOfLineCapacity());
ASSERT(!vm.heap.storageAllocator().fastPathShouldSucceed(structure->outOfLineCapacity() * sizeof(JSValue)));
base->setStructureAndReallocateStorageIfNecessary(vm, structure);
base->putDirect(vm, offset, JSValue::decode(value));
}
-ALWAYS_INLINE static bool isStringOrSymbol(JSValue value)
-{
- return value.isString() || value.isSymbol();
-}
-
-static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value, ByValInfo* byValInfo)
+static void putByVal(CallFrame* callFrame, JSValue baseValue, JSValue subscript, JSValue value)
{
- VM& vm = callFrame->vm();
if (LIKELY(subscript.isUInt32())) {
- byValInfo->tookSlowPath = true;
uint32_t i = subscript.asUInt32();
if (baseValue.isObject()) {
JSObject* object = asObject(baseValue);
if (object->canSetIndexQuickly(i))
object->setIndexQuickly(callFrame->vm(), i, value);
- else {
- // FIXME: This will make us think that in-bounds typed array accesses are actually
- // out-of-bounds.
- // https://bugs.webkit.org/show_bug.cgi?id=149886
- byValInfo->arrayProfile->setOutOfBounds();
- object->methodTable(vm)->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
- }
+ else
+ object->methodTable()->putByIndex(object, callFrame, i, value, callFrame->codeBlock()->isStrictMode());
} else
baseValue.putByIndex(callFrame, i, value, callFrame->codeBlock()->isStrictMode());
- return;
+ } else if (isName(subscript)) {
+ PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode());
+ baseValue.put(callFrame, jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
+ if (!callFrame->vm().exception()) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode());
+ baseValue.put(callFrame, property, value, slot);
+ }
}
-
- auto property = subscript.toPropertyKey(callFrame);
- // Don't put to an object if toString threw an exception.
- if (callFrame->vm().exception())
- return;
-
- if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
- byValInfo->tookSlowPath = true;
-
- PutPropertySlot slot(baseValue, callFrame->codeBlock()->isStrictMode());
- baseValue.putInline(callFrame, property, value, slot);
}
-static void directPutByVal(CallFrame* callFrame, JSObject* baseObject, JSValue subscript, JSValue value, ByValInfo* byValInfo)
+static void directPutByVal(CallFrame* callFrame, JSObject* baseObject, JSValue subscript, JSValue value)
{
- bool isStrictMode = callFrame->codeBlock()->isStrictMode();
if (LIKELY(subscript.isUInt32())) {
- // Despite its name, JSValue::isUInt32 will return true only for positive boxed int32_t; all those values are valid array indices.
- byValInfo->tookSlowPath = true;
- uint32_t index = subscript.asUInt32();
- ASSERT(isIndex(index));
- if (baseObject->canSetIndexQuicklyForPutDirect(index)) {
- baseObject->setIndexQuickly(callFrame->vm(), index, value);
- return;
- }
-
- // FIXME: This will make us think that in-bounds typed array accesses are actually
- // out-of-bounds.
- // https://bugs.webkit.org/show_bug.cgi?id=149886
- byValInfo->arrayProfile->setOutOfBounds();
- baseObject->putDirectIndex(callFrame, index, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
- return;
- }
-
- if (subscript.isDouble()) {
- double subscriptAsDouble = subscript.asDouble();
- uint32_t subscriptAsUInt32 = static_cast<uint32_t>(subscriptAsDouble);
- if (subscriptAsDouble == subscriptAsUInt32 && isIndex(subscriptAsUInt32)) {
- byValInfo->tookSlowPath = true;
- baseObject->putDirectIndex(callFrame, subscriptAsUInt32, value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
- return;
+ uint32_t i = subscript.asUInt32();
+ baseObject->putDirectIndex(callFrame, i, value);
+ } else if (isName(subscript)) {
+ PutPropertySlot slot(baseObject, callFrame->codeBlock()->isStrictMode());
+ baseObject->putDirect(callFrame->vm(), jsCast<NameInstance*>(subscript.asCell())->privateName(), value, slot);
+ } else {
+ Identifier property(callFrame, subscript.toString(callFrame)->value(callFrame));
+ if (!callFrame->vm().exception()) { // Don't put to an object if toString threw an exception.
+ PutPropertySlot slot(baseObject, callFrame->codeBlock()->isStrictMode());
+ baseObject->putDirect(callFrame->vm(), property, value, slot);
}
}
-
- // Don't put to an object if toString threw an exception.
- auto property = subscript.toPropertyKey(callFrame);
- if (callFrame->vm().exception())
- return;
-
- if (Optional<uint32_t> index = parseIndex(property)) {
- byValInfo->tookSlowPath = true;
- baseObject->putDirectIndex(callFrame, index.value(), value, 0, isStrictMode ? PutDirectIndexShouldThrow : PutDirectIndexShouldNotThrow);
- return;
- }
-
- if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
- byValInfo->tookSlowPath = true;
-
- PutPropertySlot slot(baseObject, isStrictMode);
- baseObject->putDirect(callFrame->vm(), property, value, slot);
}
-
-enum class OptimizationResult {
- NotOptimized,
- SeenOnce,
- Optimized,
- GiveUp,
-};
-
-static OptimizationResult tryPutByValOptimize(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+void JIT_OPERATION operationPutByVal(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
{
- // See if it's worth optimizing at all.
- OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
-
VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSValue baseValue = JSValue::decode(encodedBaseValue);
+ JSValue subscript = JSValue::decode(encodedSubscript);
+ JSValue value = JSValue::decode(encodedValue);
if (baseValue.isObject() && subscript.isInt32()) {
+ // See if it's worth optimizing at all.
JSObject* object = asObject(baseValue);
+ bool didOptimize = false;
- ASSERT(exec->bytecodeOffset());
- ASSERT(!byValInfo->stubRoutine);
+ unsigned bytecodeOffset = exec->locationAsBytecodeOffset();
+ ASSERT(bytecodeOffset);
+ ByValInfo& byValInfo = exec->codeBlock()->getByValInfo(bytecodeOffset - 1);
+ ASSERT(!byValInfo.stubRoutine);
- Structure* structure = object->structure(vm);
- if (hasOptimizableIndexing(structure)) {
+ if (hasOptimizableIndexing(object->structure())) {
// Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(structure);
- if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) {
- CodeBlock* codeBlock = exec->codeBlock();
- ConcurrentJITLocker locker(codeBlock->m_lock);
- byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
-
- JIT::compilePutByVal(&vm, exec->codeBlock(), byValInfo, returnAddress, arrayMode);
- optimizationResult = OptimizationResult::Optimized;
+ JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
+ if (arrayMode != byValInfo.arrayMode) {
+ JIT::compilePutByVal(&vm, exec->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
+ didOptimize = true;
}
}
- // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
- if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
- optimizationResult = OptimizationResult::GiveUp;
- }
-
- if (baseValue.isObject() && isStringOrSymbol(subscript)) {
- const Identifier propertyName = subscript.toPropertyKey(exec);
- if (!subscript.isString() || !parseIndex(propertyName)) {
- ASSERT(exec->bytecodeOffset());
- ASSERT(!byValInfo->stubRoutine);
- if (byValInfo->seen) {
- if (byValInfo->cachedId == propertyName) {
- JIT::compilePutByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, NotDirect, propertyName);
- optimizationResult = OptimizationResult::Optimized;
- } else {
- // Seem like a generic property access site.
- optimizationResult = OptimizationResult::GiveUp;
- }
- } else {
- byValInfo->seen = true;
- byValInfo->cachedId = propertyName;
- optimizationResult = OptimizationResult::SeenOnce;
+ if (!didOptimize) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. Or, if we failed to patch and we have some object
+ // that intercepts indexed get, then don't even wait until 10 times. For cases
+ // where we see non-index-intercepting objects, this gives 10 iterations worth of
+ // opportunity for us to observe that the get_by_val may be polymorphic.
+ if (++byValInfo.slowPathCount >= 10
+ || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
+ // Don't ever try to optimize.
+ RepatchBuffer repatchBuffer(exec->codeBlock());
+ repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationPutByValGeneric));
}
}
}
- if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. For cases where we see non-index-intercepting
- // objects, this gives 10 iterations worth of opportunity for us to observe
- // that the put_by_val may be polymorphic. We count up slowPathCount even if
- // the result is GiveUp.
- if (++byValInfo->slowPathCount >= 10)
- optimizationResult = OptimizationResult::GiveUp;
- }
-
- return optimizationResult;
+ putByVal(exec, baseValue, subscript, value);
}
-void JIT_OPERATION operationPutByValOptimize(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+void JIT_OPERATION operationDirectPutByVal(ExecState* callFrame, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
+ VM& vm = callFrame->vm();
+ NativeCallFrameTracer tracer(&vm, callFrame);
+
JSValue baseValue = JSValue::decode(encodedBaseValue);
JSValue subscript = JSValue::decode(encodedSubscript);
JSValue value = JSValue::decode(encodedValue);
- if (tryPutByValOptimize(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)) == OptimizationResult::GiveUp) {
- // Don't ever try to optimize.
- byValInfo->tookSlowPath = true;
- ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationPutByValGeneric));
- }
- putByVal(exec, baseValue, subscript, value, byValInfo);
-}
-
-static OptimizationResult tryDirectPutByValOptimize(ExecState* exec, JSObject* object, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
-{
- // See if it's worth optimizing at all.
- OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
-
- VM& vm = exec->vm();
-
+ RELEASE_ASSERT(baseValue.isObject());
+ JSObject* object = asObject(baseValue);
if (subscript.isInt32()) {
- ASSERT(exec->bytecodeOffset());
- ASSERT(!byValInfo->stubRoutine);
-
- Structure* structure = object->structure(vm);
- if (hasOptimizableIndexing(structure)) {
+ // See if it's worth optimizing at all.
+ bool didOptimize = false;
+
+ unsigned bytecodeOffset = callFrame->locationAsBytecodeOffset();
+ ASSERT(bytecodeOffset);
+ ByValInfo& byValInfo = callFrame->codeBlock()->getByValInfo(bytecodeOffset - 1);
+ ASSERT(!byValInfo.stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure())) {
// Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(structure);
- if (jitArrayModePermitsPut(arrayMode) && arrayMode != byValInfo->arrayMode) {
- CodeBlock* codeBlock = exec->codeBlock();
- ConcurrentJITLocker locker(codeBlock->m_lock);
- byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
-
- JIT::compileDirectPutByVal(&vm, exec->codeBlock(), byValInfo, returnAddress, arrayMode);
- optimizationResult = OptimizationResult::Optimized;
+ JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
+ if (arrayMode != byValInfo.arrayMode) {
+ JIT::compileDirectPutByVal(&vm, callFrame->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
+ didOptimize = true;
}
}
-
- // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
- if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
- optimizationResult = OptimizationResult::GiveUp;
- } else if (isStringOrSymbol(subscript)) {
- const Identifier propertyName = subscript.toPropertyKey(exec);
- Optional<uint32_t> index = parseIndex(propertyName);
-
- if (!subscript.isString() || !index) {
- ASSERT(exec->bytecodeOffset());
- ASSERT(!byValInfo->stubRoutine);
- if (byValInfo->seen) {
- if (byValInfo->cachedId == propertyName) {
- JIT::compilePutByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, Direct, propertyName);
- optimizationResult = OptimizationResult::Optimized;
- } else {
- // Seem like a generic property access site.
- optimizationResult = OptimizationResult::GiveUp;
- }
- } else {
- byValInfo->seen = true;
- byValInfo->cachedId = propertyName;
- optimizationResult = OptimizationResult::SeenOnce;
+
+ if (!didOptimize) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. Or, if we failed to patch and we have some object
+ // that intercepts indexed get, then don't even wait until 10 times. For cases
+ // where we see non-index-intercepting objects, this gives 10 iterations worth of
+ // opportunity for us to observe that the get_by_val may be polymorphic.
+ if (++byValInfo.slowPathCount >= 10
+ || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
+ // Don't ever try to optimize.
+ RepatchBuffer repatchBuffer(callFrame->codeBlock());
+ repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationDirectPutByValGeneric));
}
}
}
-
- if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. For cases where we see non-index-intercepting
- // objects, this gives 10 iterations worth of opportunity for us to observe
- // that the get_by_val may be polymorphic. We count up slowPathCount even if
- // the result is GiveUp.
- if (++byValInfo->slowPathCount >= 10)
- optimizationResult = OptimizationResult::GiveUp;
- }
-
- return optimizationResult;
-}
-
-void JIT_OPERATION operationDirectPutByValOptimize(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- JSValue baseValue = JSValue::decode(encodedBaseValue);
- JSValue subscript = JSValue::decode(encodedSubscript);
- JSValue value = JSValue::decode(encodedValue);
- RELEASE_ASSERT(baseValue.isObject());
- JSObject* object = asObject(baseValue);
- if (tryDirectPutByValOptimize(exec, object, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS)) == OptimizationResult::GiveUp) {
- // Don't ever try to optimize.
- byValInfo->tookSlowPath = true;
- ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationDirectPutByValGeneric));
- }
-
- directPutByVal(exec, object, subscript, value, byValInfo);
+ directPutByVal(callFrame, object, subscript, value);
}
-void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -656,11 +596,11 @@ void JIT_OPERATION operationPutByValGeneric(ExecState* exec, EncodedJSValue enco
JSValue subscript = JSValue::decode(encodedSubscript);
JSValue value = JSValue::decode(encodedValue);
- putByVal(exec, baseValue, subscript, value, byValInfo);
+ putByVal(exec, baseValue, subscript, value);
}
-void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue, ByValInfo* byValInfo)
+void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValue encodedBaseValue, EncodedJSValue encodedSubscript, EncodedJSValue encodedValue)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -669,13 +609,18 @@ void JIT_OPERATION operationDirectPutByValGeneric(ExecState* exec, EncodedJSValu
JSValue subscript = JSValue::decode(encodedSubscript);
JSValue value = JSValue::decode(encodedValue);
RELEASE_ASSERT(baseValue.isObject());
- directPutByVal(exec, asObject(baseValue), subscript, value, byValInfo);
+ directPutByVal(exec, asObject(baseValue), subscript, value);
}
-EncodedJSValue JIT_OPERATION operationCallEval(ExecState* exec, ExecState* execCallee)
+EncodedJSValue JIT_OPERATION operationCallEval(ExecState* execCallee)
{
- UNUSED_PARAM(exec);
+ CallFrame* callerFrame = execCallee->callerFrame();
+ ASSERT(execCallee->callerFrame()->codeBlock()->codeType() != FunctionCode
+ || !execCallee->callerFrame()->codeBlock()->needsFullScopeChain()
+ || execCallee->callerFrame()->uncheckedR(execCallee->callerFrame()->codeBlock()->activationRegister().offset()).jsValue());
+ execCallee->setScope(callerFrame->scope());
+ execCallee->setReturnPC(static_cast<Instruction*>(OUR_RETURN_ADDRESS));
execCallee->setCodeBlock(0);
if (!isHostFunction(execCallee->calleeAsValue(), globalFuncEval))
@@ -689,14 +634,15 @@ EncodedJSValue JIT_OPERATION operationCallEval(ExecState* exec, ExecState* execC
return JSValue::encode(result);
}
-static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee, CallLinkInfo* callLinkInfo)
+static void* handleHostCall(ExecState* execCallee, JSValue callee, CodeSpecializationKind kind)
{
ExecState* exec = execCallee->callerFrame();
VM* vm = &exec->vm();
+ execCallee->setScope(exec->scope());
execCallee->setCodeBlock(0);
- if (callLinkInfo->specializationKind() == CodeForCall) {
+ if (kind == CodeForCall) {
CallData callData;
CallType callType = getCallData(callee, callData);
@@ -706,25 +652,18 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee,
NativeCallFrameTracer tracer(vm, execCallee);
execCallee->setCallee(asObject(callee));
vm->hostCallReturnValue = JSValue::decode(callData.native.function(execCallee));
- if (vm->exception()) {
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
- }
+ if (vm->exception())
+ return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
- return encodeResult(
- bitwise_cast<void*>(getHostCallReturnValue),
- reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
+ return reinterpret_cast<void*>(getHostCallReturnValue);
}
ASSERT(callType == CallTypeNone);
exec->vm().throwException(exec, createNotAFunctionError(exec, callee));
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
+ return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
}
- ASSERT(callLinkInfo->specializationKind() == CodeForConstruct);
+ ASSERT(kind == CodeForConstruct);
ConstructData constructData;
ConstructType constructType = getConstructData(callee, constructData);
@@ -735,166 +674,153 @@ static SlowPathReturnType handleHostCall(ExecState* execCallee, JSValue callee,
NativeCallFrameTracer tracer(vm, execCallee);
execCallee->setCallee(asObject(callee));
vm->hostCallReturnValue = JSValue::decode(constructData.native.function(execCallee));
- if (vm->exception()) {
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
- }
+ if (vm->exception())
+ return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
- return encodeResult(bitwise_cast<void*>(getHostCallReturnValue), reinterpret_cast<void*>(KeepTheFrame));
+ return reinterpret_cast<void*>(getHostCallReturnValue);
}
ASSERT(constructType == ConstructTypeNone);
exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
+ return vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress();
}
-SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+inline char* linkFor(ExecState* execCallee, CodeSpecializationKind kind)
{
ExecState* exec = execCallee->callerFrame();
VM* vm = &exec->vm();
- CodeSpecializationKind kind = callLinkInfo->specializationKind();
NativeCallFrameTracer tracer(vm, exec);
JSValue calleeAsValue = execCallee->calleeAsValue();
JSCell* calleeAsFunctionCell = getJSFunction(calleeAsValue);
- if (!calleeAsFunctionCell) {
- // FIXME: We should cache these kinds of calls. They can be common and currently they are
- // expensive.
- // https://bugs.webkit.org/show_bug.cgi?id=144458
- return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
- }
+ if (!calleeAsFunctionCell)
+ return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));
JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
- JSScope* scope = callee->scopeUnchecked();
+ execCallee->setScope(callee->scopeUnchecked());
ExecutableBase* executable = callee->executable();
MacroAssemblerCodePtr codePtr;
CodeBlock* codeBlock = 0;
- if (executable->isHostFunction()) {
- codePtr = executable->entrypointFor(kind, MustCheckArity);
-#if ENABLE(WEBASSEMBLY)
- } else if (executable->isWebAssemblyExecutable()) {
- WebAssemblyExecutable* webAssemblyExecutable = static_cast<WebAssemblyExecutable*>(executable);
- webAssemblyExecutable->prepareForExecution(execCallee);
- codeBlock = webAssemblyExecutable->codeBlockForCall();
- ASSERT(codeBlock);
- ArityCheckMode arity;
- if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
- arity = MustCheckArity;
- else
- arity = ArityCheckNotRequired;
- codePtr = webAssemblyExecutable->entrypointFor(kind, arity);
-#endif
- } else {
+ CallLinkInfo& callLinkInfo = exec->codeBlock()->getCallLinkInfo(execCallee->returnPC());
+ if (executable->isHostFunction())
+ codePtr = executable->generatedJITCodeFor(kind)->addressForCall();
+ else {
FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
-
- if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
- exec->vm().throwException(exec, createNotAConstructorError(exec, callee));
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
- }
-
- JSObject* error = functionExecutable->prepareForExecution(execCallee, callee, scope, kind);
+ JSObject* error = functionExecutable->prepareForExecution(execCallee, callee->scope(), kind);
if (error) {
- exec->vm().throwException(exec, error);
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
+ vm->throwException(exec, createStackOverflowError(exec));
+ return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
}
codeBlock = functionExecutable->codeBlockFor(kind);
- ArityCheckMode arity;
- if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo->isVarargs())
- arity = MustCheckArity;
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.callType == CallLinkInfo::CallVarargs)
+ codePtr = functionExecutable->generatedJITCodeWithArityCheckFor(kind);
else
- arity = ArityCheckNotRequired;
- codePtr = functionExecutable->entrypointFor(kind, arity);
+ codePtr = functionExecutable->generatedJITCodeFor(kind)->addressForCall();
}
- if (!callLinkInfo->seenOnce())
- callLinkInfo->setSeen();
+ if (!callLinkInfo.seenOnce())
+ callLinkInfo.setSeen();
else
- linkFor(execCallee, *callLinkInfo, codeBlock, callee, codePtr);
-
- return encodeResult(codePtr.executableAddress(), reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
+ linkFor(execCallee, callLinkInfo, codeBlock, callee, codePtr, kind);
+ return reinterpret_cast<char*>(codePtr.executableAddress());
+}
+
+char* JIT_OPERATION operationLinkCall(ExecState* execCallee)
+{
+ return linkFor(execCallee, CodeForCall);
}
-inline SlowPathReturnType virtualForWithFunction(
- ExecState* execCallee, CallLinkInfo* callLinkInfo, JSCell*& calleeAsFunctionCell)
+char* JIT_OPERATION operationLinkConstruct(ExecState* execCallee)
+{
+ return linkFor(execCallee, CodeForConstruct);
+}
+
+inline char* virtualForWithFunction(ExecState* execCallee, CodeSpecializationKind kind, JSCell*& calleeAsFunctionCell)
{
ExecState* exec = execCallee->callerFrame();
VM* vm = &exec->vm();
- CodeSpecializationKind kind = callLinkInfo->specializationKind();
NativeCallFrameTracer tracer(vm, exec);
JSValue calleeAsValue = execCallee->calleeAsValue();
calleeAsFunctionCell = getJSFunction(calleeAsValue);
if (UNLIKELY(!calleeAsFunctionCell))
- return handleHostCall(execCallee, calleeAsValue, callLinkInfo);
+ return reinterpret_cast<char*>(handleHostCall(execCallee, calleeAsValue, kind));
JSFunction* function = jsCast<JSFunction*>(calleeAsFunctionCell);
- JSScope* scope = function->scopeUnchecked();
+ execCallee->setScope(function->scopeUnchecked());
ExecutableBase* executable = function->executable();
if (UNLIKELY(!executable->hasJITCodeFor(kind))) {
- bool isWebAssemblyExecutable = false;
-#if ENABLE(WEBASSEMBLY)
- isWebAssemblyExecutable = executable->isWebAssemblyExecutable();
-#endif
- if (!isWebAssemblyExecutable) {
- FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
-
- if (!isCall(kind) && functionExecutable->constructAbility() == ConstructAbility::CannotConstruct) {
- exec->vm().throwException(exec, createNotAConstructorError(exec, function));
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
- }
+ FunctionExecutable* functionExecutable = static_cast<FunctionExecutable*>(executable);
+ JSObject* error = functionExecutable->prepareForExecution(execCallee, function->scope(), kind);
+ if (error) {
+ exec->vm().throwException(execCallee, error);
+ return reinterpret_cast<char*>(vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress());
+ }
+ }
+ return reinterpret_cast<char*>(executable->generatedJITCodeWithArityCheckFor(kind).executableAddress());
+}
- JSObject* error = functionExecutable->prepareForExecution(execCallee, function, scope, kind);
- if (error) {
- exec->vm().throwException(exec, error);
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
- }
- } else {
-#if ENABLE(WEBASSEMBLY)
- if (!isCall(kind)) {
- exec->vm().throwException(exec, createNotAConstructorError(exec, function));
- return encodeResult(
- vm->getCTIStub(throwExceptionFromCallSlowPathGenerator).code().executableAddress(),
- reinterpret_cast<void*>(KeepTheFrame));
- }
+inline char* virtualFor(ExecState* execCallee, CodeSpecializationKind kind)
+{
+ JSCell* calleeAsFunctionCellIgnored;
+ return virtualForWithFunction(execCallee, kind, calleeAsFunctionCellIgnored);
+}
- WebAssemblyExecutable* webAssemblyExecutable = static_cast<WebAssemblyExecutable*>(executable);
- webAssemblyExecutable->prepareForExecution(execCallee);
-#endif
- }
+static bool attemptToOptimizeClosureCall(ExecState* execCallee, JSCell* calleeAsFunctionCell, CallLinkInfo& callLinkInfo)
+{
+ if (!calleeAsFunctionCell)
+ return false;
+
+ JSFunction* callee = jsCast<JSFunction*>(calleeAsFunctionCell);
+ JSFunction* oldCallee = callLinkInfo.callee.get();
+
+ if (!oldCallee
+ || oldCallee->structure() != callee->structure()
+ || oldCallee->executable() != callee->executable())
+ return false;
+
+ ASSERT(callee->executable()->hasJITCodeForCall());
+ MacroAssemblerCodePtr codePtr = callee->executable()->generatedJITCodeForCall()->addressForCall();
+
+ CodeBlock* codeBlock;
+ if (callee->executable()->isHostFunction())
+ codeBlock = 0;
+ else {
+ codeBlock = jsCast<FunctionExecutable*>(callee->executable())->codeBlockForCall();
+ if (execCallee->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()))
+ return false;
}
- return encodeResult(executable->entrypointFor(
- kind, MustCheckArity).executableAddress(),
- reinterpret_cast<void*>(callLinkInfo->callMode() == CallMode::Tail ? ReuseTheFrame : KeepTheFrame));
+
+ linkClosureCall(
+ execCallee, callLinkInfo, codeBlock,
+ callee->structure(), callee->executable(), codePtr);
+
+ return true;
}
-SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+char* JIT_OPERATION operationLinkClosureCall(ExecState* execCallee)
{
- ASSERT(callLinkInfo->specializationKind() == CodeForCall);
JSCell* calleeAsFunctionCell;
- SlowPathReturnType result = virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCell);
+ char* result = virtualForWithFunction(execCallee, CodeForCall, calleeAsFunctionCell);
+ CallLinkInfo& callLinkInfo = execCallee->callerFrame()->codeBlock()->getCallLinkInfo(execCallee->returnPC());
- linkPolymorphicCall(execCallee, *callLinkInfo, CallVariant(calleeAsFunctionCell));
+ if (!attemptToOptimizeClosureCall(execCallee, calleeAsFunctionCell, callLinkInfo))
+ linkSlowFor(execCallee, callLinkInfo, CodeForCall);
return result;
}
-SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState* execCallee, CallLinkInfo* callLinkInfo)
+char* JIT_OPERATION operationVirtualCall(ExecState* execCallee)
+{
+ return virtualFor(execCallee, CodeForCall);
+}
+
+char* JIT_OPERATION operationVirtualConstruct(ExecState* execCallee)
{
- JSCell* calleeAsFunctionCellIgnored;
- return virtualForWithFunction(execCallee, callLinkInfo, calleeAsFunctionCellIgnored);
+ return virtualFor(execCallee, CodeForConstruct);
}
+
size_t JIT_OPERATION operationCompareLess(ExecState* exec, EncodedJSValue encodedOp1, EncodedJSValue encodedOp2)
{
VM* vm = &exec->vm();
@@ -952,7 +878,7 @@ size_t JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSC
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- bool result = WTF::equal(*asString(left)->value(exec).impl(), *asString(right)->value(exec).impl());
+ bool result = asString(left)->value(exec) == asString(right)->value(exec);
#if USE(JSVALUE64)
return JSValue::encode(jsBoolean(result));
#else
@@ -962,7 +888,7 @@ size_t JIT_OPERATION operationCompareStringEq(ExecState* exec, JSCell* left, JSC
size_t JIT_OPERATION operationHasProperty(ExecState* exec, JSObject* base, JSString* property)
{
- int result = base->hasProperty(exec, property->toIdentifier(exec));
+ int result = base->hasProperty(exec, Identifier(exec, property->value(exec)));
return result;
}
@@ -989,46 +915,19 @@ EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState* exec
return JSValue::encode(constructArrayWithSizeQuirk(exec, profile, exec->lexicalGlobalObject(), sizeValue));
}
-}
-
-template<typename FunctionType>
-static EncodedJSValue operationNewFunctionCommon(ExecState* exec, JSScope* scope, JSCell* functionExecutable, bool isInvalidated)
+EncodedJSValue JIT_OPERATION operationNewFunction(ExecState* exec, JSCell* functionExecutable)
{
ASSERT(functionExecutable->inherits(FunctionExecutable::info()));
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- if (isInvalidated)
- return JSValue::encode(FunctionType::createWithInvalidatedReallocationWatchpoint(vm, static_cast<FunctionExecutable*>(functionExecutable), scope));
- return JSValue::encode(FunctionType::create(vm, static_cast<FunctionExecutable*>(functionExecutable), scope));
-}
-
-extern "C" {
-
-EncodedJSValue JIT_OPERATION operationNewFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
-{
- return operationNewFunctionCommon<JSFunction>(exec, scope, functionExecutable, false);
-}
-
-EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
-{
- return operationNewFunctionCommon<JSFunction>(exec, scope, functionExecutable, true);
-}
-
-EncodedJSValue JIT_OPERATION operationNewGeneratorFunction(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
-{
- return operationNewFunctionCommon<JSGeneratorFunction>(exec, scope, functionExecutable, false);
-}
-
-EncodedJSValue JIT_OPERATION operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint(ExecState* exec, JSScope* scope, JSCell* functionExecutable)
-{
- return operationNewFunctionCommon<JSGeneratorFunction>(exec, scope, functionExecutable, true);
+ return JSValue::encode(JSFunction::create(vm, static_cast<FunctionExecutable*>(functionExecutable), exec->scope()));
}
JSCell* JIT_OPERATION operationNewObject(ExecState* exec, Structure* structure)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
-
+
return constructEmptyObject(exec, structure);
}
@@ -1038,39 +937,32 @@ EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState* exec, void* regexpPtr
NativeCallFrameTracer tracer(&vm, exec);
RegExp* regexp = static_cast<RegExp*>(regexpPtr);
if (!regexp->isValid()) {
- vm.throwException(exec, createSyntaxError(exec, ASCIILiteral("Invalid flags supplied to RegExp constructor.")));
+ vm.throwException(exec, createSyntaxError(exec, "Invalid flags supplied to RegExp constructor."));
return JSValue::encode(jsUndefined());
}
return JSValue::encode(RegExpObject::create(vm, exec->lexicalGlobalObject()->regExpStructure(), regexp));
}
-// The only reason for returning an UnusedPtr (instead of void) is so that we can reuse the
-// existing DFG slow path generator machinery when creating the slow path for CheckWatchdogTimer
-// in the DFG. If a DFG slow path generator that supports a void return type is added in the
-// future, we can switch to using that then.
-UnusedPtr JIT_OPERATION operationHandleWatchdogTimer(ExecState* exec)
+void JIT_OPERATION operationHandleWatchdogTimer(ExecState* exec)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- if (UNLIKELY(vm.shouldTriggerTermination(exec)))
+ if (UNLIKELY(vm.watchdog.didFire(exec)))
vm.throwException(exec, createTerminatedExecutionException(&vm));
-
- return nullptr;
}
void JIT_OPERATION operationThrowStaticError(ExecState* exec, EncodedJSValue encodedValue, int32_t referenceErrorFlag)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSValue errorMessageValue = JSValue::decode(encodedValue);
- RELEASE_ASSERT(errorMessageValue.isString());
- String errorMessage = asString(errorMessageValue)->value(exec);
+
+ String message = errorDescriptionForValue(exec, JSValue::decode(encodedValue))->value(exec);
if (referenceErrorFlag)
- vm.throwException(exec, createReferenceError(exec, errorMessage));
+ vm.throwException(exec, createReferenceError(exec, message));
else
- vm.throwException(exec, createTypeError(exec, errorMessage));
+ vm.throwException(exec, createTypeError(exec, message));
}
void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookID)
@@ -1082,13 +974,7 @@ void JIT_OPERATION operationDebug(ExecState* exec, int32_t debugHookID)
}
#if ENABLE(DFG_JIT)
-static void updateAllPredictionsAndOptimizeAfterWarmUp(CodeBlock* codeBlock)
-{
- codeBlock->updateAllPredictions();
- codeBlock->optimizeAfterWarmUp();
-}
-
-SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
+char* JIT_OPERATION operationOptimize(ExecState* exec, int32_t bytecodeIndex)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -1110,11 +996,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
DeferGCForAWhile deferGC(vm.heap);
CodeBlock* codeBlock = exec->codeBlock();
- if (codeBlock->jitType() != JITCode::BaselineJIT) {
- dataLog("Unexpected code block in Baseline->DFG tier-up: ", *codeBlock, "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
-
+
if (bytecodeIndex) {
// If we're attempting to OSR from a loop, assume that this should be
// separately optimized.
@@ -1138,37 +1020,26 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
codeBlock->updateAllPredictions();
if (Options::verboseOSR())
dataLog("Choosing not to optimize ", *codeBlock, " yet, because the threshold hasn't been reached.\n");
- return encodeResult(0, 0);
+ return 0;
}
- if (vm.enabledProfiler()) {
- updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
- return encodeResult(0, 0);
- }
-
- Debugger* debugger = codeBlock->globalObject()->debugger();
- if (debugger && (debugger->isStepping() || codeBlock->baselineAlternative()->hasDebuggerRequests())) {
- updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
- return encodeResult(0, 0);
- }
-
if (codeBlock->m_shouldAlwaysBeInlined) {
- updateAllPredictionsAndOptimizeAfterWarmUp(codeBlock);
+ codeBlock->updateAllPredictions();
+ codeBlock->optimizeAfterWarmUp();
if (Options::verboseOSR())
dataLog("Choosing not to optimize ", *codeBlock, " yet, because m_shouldAlwaysBeInlined == true.\n");
- return encodeResult(0, 0);
+ return 0;
}
// We cannot be in the process of asynchronous compilation and also have an optimized
// replacement.
- DFG::Worklist* worklist = DFG::existingGlobalDFGWorklistOrNull();
ASSERT(
- !worklist
- || !(worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown
+ !vm.worklist
+ || !(vm.worklist->compilationState(DFG::CompilationKey(codeBlock, DFG::DFGMode)) != DFG::Worklist::NotKnown
&& codeBlock->hasOptimizedReplacement()));
DFG::Worklist::State worklistState;
- if (worklist) {
+ if (vm.worklist) {
// The call to DFG::Worklist::completeAllReadyPlansForVM() will complete all ready
// (i.e. compiled) code blocks. But if it completes ours, we also need to know
// what the result was so that we don't plow ahead and attempt OSR or immediate
@@ -1187,7 +1058,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
// probably a waste of memory. Our goal here is to complete code blocks as soon as
// possible in order to minimize the chances of us executing baseline code after
// optimized code is already available.
- worklistState = worklist->completeAllReadyPlansForVM(
+ worklistState = vm.worklist->completeAllReadyPlansForVM(
vm, DFG::CompilationKey(codeBlock, DFG::DFGMode));
} else
worklistState = DFG::Worklist::NotKnown;
@@ -1197,7 +1068,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
// replacement.
RELEASE_ASSERT(!codeBlock->hasOptimizedReplacement());
codeBlock->setOptimizationThresholdBasedOnCompilationResult(CompilationDeferred);
- return encodeResult(0, 0);
+ return 0;
}
if (worklistState == DFG::Worklist::Compiled) {
@@ -1210,7 +1081,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
codeBlock->updateAllPredictions();
if (Options::verboseOSR())
dataLog("Code block ", *codeBlock, " was compiled but it doesn't have an optimized replacement.\n");
- return encodeResult(0, 0);
+ return 0;
}
} else if (codeBlock->hasOptimizedReplacement()) {
if (Options::verboseOSR())
@@ -1234,8 +1105,8 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
"Triggering reoptimization of ", *codeBlock,
"(", *codeBlock->replacement(), ") (in loop).\n");
}
- codeBlock->replacement()->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTrigger, CountReoptimization);
- return encodeResult(0, 0);
+ codeBlock->replacement()->jettison(CountReoptimization);
+ return 0;
}
} else {
if (!codeBlock->shouldOptimizeNow()) {
@@ -1244,7 +1115,7 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
"Delaying optimization for ", *codeBlock,
" because of insufficient profiling.\n");
}
- return encodeResult(0, 0);
+ return 0;
}
if (Options::verboseOSR())
@@ -1256,34 +1127,42 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
else
numVarsWithValues = 0;
Operands<JSValue> mustHandleValues(codeBlock->numParameters(), numVarsWithValues);
- int localsUsedForCalleeSaves = static_cast<int>(CodeBlock::llintBaselineCalleeSaveSpaceAsVirtualRegisters());
for (size_t i = 0; i < mustHandleValues.size(); ++i) {
int operand = mustHandleValues.operandForIndex(i);
- if (operandIsLocal(operand) && VirtualRegister(operand).toLocal() < localsUsedForCalleeSaves)
- continue;
- mustHandleValues[i] = exec->uncheckedR(operand).jsValue();
+ if (operandIsArgument(operand)
+ && !VirtualRegister(operand).toArgument()
+ && codeBlock->codeType() == FunctionCode
+ && codeBlock->specializationKind() == CodeForConstruct) {
+ // Ugh. If we're in a constructor, the 'this' argument may hold garbage. It will
+ // also never be used. It doesn't matter what we put into the value for this,
+ // but it has to be an actual value that can be grokked by subsequent DFG passes,
+ // so we sanitize it here by turning it into Undefined.
+ mustHandleValues[i] = jsUndefined();
+ } else
+ mustHandleValues[i] = exec->uncheckedR(operand).jsValue();
}
- CodeBlock* replacementCodeBlock = codeBlock->newReplacement();
CompilationResult result = DFG::compile(
- vm, replacementCodeBlock, nullptr, DFG::DFGMode, bytecodeIndex,
- mustHandleValues, JITToDFGDeferredCompilationCallback::create());
+ vm, codeBlock->newReplacement().get(), DFG::DFGMode, bytecodeIndex,
+ mustHandleValues, JITToDFGDeferredCompilationCallback::create(),
+ vm.ensureWorklist());
if (result != CompilationSuccessful)
- return encodeResult(0, 0);
+ return 0;
}
CodeBlock* optimizedCodeBlock = codeBlock->replacement();
ASSERT(JITCode::isOptimizingJIT(optimizedCodeBlock->jitType()));
- if (void* dataBuffer = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) {
+ if (void* address = DFG::prepareOSREntry(exec, optimizedCodeBlock, bytecodeIndex)) {
if (Options::verboseOSR()) {
dataLog(
- "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ".\n");
+ "Performing OSR ", *codeBlock, " -> ", *optimizedCodeBlock, ", address ",
+ RawPointer(OUR_RETURN_ADDRESS), " -> ", RawPointer(address), ".\n");
}
codeBlock->optimizeSoon();
- return encodeResult(vm.getCTIStub(DFG::osrEntryThunkGenerator).code().executableAddress(), dataBuffer);
+ return static_cast<char*>(address);
}
if (Options::verboseOSR()) {
@@ -1311,15 +1190,15 @@ SlowPathReturnType JIT_OPERATION operationOptimize(ExecState* exec, int32_t byte
"Triggering reoptimization of ", *codeBlock, " -> ",
*codeBlock->replacement(), " (after OSR fail).\n");
}
- optimizedCodeBlock->jettison(Profiler::JettisonDueToBaselineLoopReoptimizationTriggerOnOSREntryFail, CountReoptimization);
- return encodeResult(0, 0);
+ optimizedCodeBlock->jettison(CountReoptimization);
+ return 0;
}
// OSR failed this time, but it might succeed next time! Let the code run a bit
// longer and then try again.
codeBlock->optimizeAfterWarmUp();
- return encodeResult(0, 0);
+ return 0;
}
#endif
@@ -1333,73 +1212,16 @@ void JIT_OPERATION operationPutByIndex(ExecState* exec, EncodedJSValue encodedAr
asArray(arrayValue)->putDirectIndex(exec, index, JSValue::decode(encodedValue));
}
-enum class AccessorType {
- Getter,
- Setter
-};
-
-static void putAccessorByVal(ExecState* exec, JSObject* base, JSValue subscript, int32_t attribute, JSObject* accessor, AccessorType accessorType)
-{
- auto propertyKey = subscript.toPropertyKey(exec);
- if (exec->hadException())
- return;
-
- if (accessorType == AccessorType::Getter)
- base->putGetter(exec, propertyKey, accessor, attribute);
- else
- base->putSetter(exec, propertyKey, accessor, attribute);
-}
-
-void JIT_OPERATION operationPutGetterById(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t options, JSCell* getter)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- ASSERT(object && object->isObject());
- JSObject* baseObj = object->getObject();
-
- ASSERT(getter->isObject());
- baseObj->putGetter(exec, uid, getter, options);
-}
-
-void JIT_OPERATION operationPutSetterById(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t options, JSCell* setter)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- ASSERT(object && object->isObject());
- JSObject* baseObj = object->getObject();
-
- ASSERT(setter->isObject());
- baseObj->putSetter(exec, uid, setter, options);
-}
-
-void JIT_OPERATION operationPutGetterByVal(ExecState* exec, JSCell* base, EncodedJSValue encodedSubscript, int32_t attribute, JSCell* getter)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- putAccessorByVal(exec, asObject(base), JSValue::decode(encodedSubscript), attribute, asObject(getter), AccessorType::Getter);
-}
-
-void JIT_OPERATION operationPutSetterByVal(ExecState* exec, JSCell* base, EncodedJSValue encodedSubscript, int32_t attribute, JSCell* setter)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- putAccessorByVal(exec, asObject(base), JSValue::decode(encodedSubscript), attribute, asObject(setter), AccessorType::Setter);
-}
-
#if USE(JSVALUE64)
-void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t attribute, EncodedJSValue encodedGetterValue, EncodedJSValue encodedSetterValue)
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, EncodedJSValue encodedObjectValue, Identifier* identifier, EncodedJSValue encodedGetterValue, EncodedJSValue encodedSetterValue)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- ASSERT(object && object->isObject());
- JSObject* baseObj = asObject(object);
+ ASSERT(JSValue::decode(encodedObjectValue).isObject());
+ JSObject* baseObj = asObject(JSValue::decode(encodedObjectValue));
- GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
+ GetterSetter* accessor = GetterSetter::create(vm);
JSValue getter = JSValue::decode(encodedGetterValue);
JSValue setter = JSValue::decode(encodedSetterValue);
@@ -1408,42 +1230,62 @@ void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, Uni
ASSERT(getter.isObject() || setter.isObject());
if (!getter.isUndefined())
- accessor->setGetter(vm, exec->lexicalGlobalObject(), asObject(getter));
+ accessor->setGetter(vm, asObject(getter));
if (!setter.isUndefined())
- accessor->setSetter(vm, exec->lexicalGlobalObject(), asObject(setter));
- baseObj->putDirectAccessor(exec, uid, accessor, attribute);
+ accessor->setSetter(vm, asObject(setter));
+ baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor);
}
-
#else
-void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, UniquedStringImpl* uid, int32_t attribute, JSCell* getter, JSCell* setter)
+void JIT_OPERATION operationPutGetterSetter(ExecState* exec, JSCell* object, Identifier* identifier, JSCell* getter, JSCell* setter)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
ASSERT(object && object->isObject());
- JSObject* baseObj = asObject(object);
+ JSObject* baseObj = object->getObject();
- GetterSetter* accessor = GetterSetter::create(vm, exec->lexicalGlobalObject());
+ GetterSetter* accessor = GetterSetter::create(vm);
ASSERT(!getter || getter->isObject());
ASSERT(!setter || setter->isObject());
ASSERT(getter || setter);
if (getter)
- accessor->setGetter(vm, exec->lexicalGlobalObject(), getter->getObject());
+ accessor->setGetter(vm, getter->getObject());
if (setter)
- accessor->setSetter(vm, exec->lexicalGlobalObject(), setter->getObject());
- baseObj->putDirectAccessor(exec, uid, accessor, attribute);
+ accessor->setSetter(vm, setter->getObject());
+ baseObj->putDirectAccessor(exec, *identifier, accessor, Accessor);
}
#endif
-void JIT_OPERATION operationPopScope(ExecState* exec, int32_t scopeReg)
+void JIT_OPERATION operationPushNameScope(ExecState* exec, Identifier* identifier, EncodedJSValue encodedValue, int32_t attibutes)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSNameScope* scope = JSNameScope::create(exec, *identifier, JSValue::decode(encodedValue), attibutes);
+
+ exec->setScope(scope);
+}
+
+void JIT_OPERATION operationPushWithScope(ExecState* exec, EncodedJSValue encodedValue)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ JSObject* o = JSValue::decode(encodedValue).toObject(exec);
+ if (vm.exception())
+ return;
+
+ exec->setScope(JSWithScope::create(exec, o));
+}
+
+void JIT_OPERATION operationPopScope(ExecState* exec)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
- JSScope* scope = exec->uncheckedR(scopeReg).Register::scope();
- exec->uncheckedR(scopeReg) = scope->next();
+ exec->setScope(exec->scope()->next());
}
void JIT_OPERATION operationProfileDidCall(ExecState* exec, EncodedJSValue encodedValue)
@@ -1464,277 +1306,143 @@ void JIT_OPERATION operationProfileWillCall(ExecState* exec, EncodedJSValue enco
profiler->willExecute(exec, JSValue::decode(encodedValue));
}
-int32_t JIT_OPERATION operationInstanceOfCustom(ExecState* exec, EncodedJSValue encodedValue, JSObject* constructor, EncodedJSValue encodedHasInstance)
+EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedBaseVal)
{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
+ VM* vm = &exec->vm();
+ NativeCallFrameTracer tracer(vm, exec);
JSValue value = JSValue::decode(encodedValue);
- JSValue hasInstanceValue = JSValue::decode(encodedHasInstance);
+ JSValue baseVal = JSValue::decode(encodedBaseVal);
+
+ if (baseVal.isObject()) {
+ JSObject* baseObject = asObject(baseVal);
+ ASSERT(!baseObject->structure()->typeInfo().implementsDefaultHasInstance());
+ if (baseObject->structure()->typeInfo().implementsHasInstance()) {
+ bool result = baseObject->methodTable()->customHasInstance(baseObject, exec, value);
+ return JSValue::encode(jsBoolean(result));
+ }
+ }
- ASSERT(hasInstanceValue != exec->lexicalGlobalObject()->functionProtoHasInstanceSymbolFunction() || !constructor->structure()->typeInfo().implementsDefaultHasInstance());
+ vm->throwException(exec, createInvalidParameterError(exec, "instanceof", baseVal));
+ return JSValue::encode(JSValue());
+}
- if (constructor->hasInstance(exec, value, hasInstanceValue))
- return 1;
- return 0;
+JSCell* JIT_OPERATION operationCreateActivation(ExecState* exec, int32_t offset)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ JSActivation* activation = JSActivation::create(vm, exec, exec->registers() + offset, exec->codeBlock());
+ exec->setScope(activation);
+ return activation;
}
+JSCell* JIT_OPERATION operationCreateArguments(ExecState* exec)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ // NB: This needs to be exceedingly careful with top call frame tracking, since it
+ // may be called from OSR exit, while the state of the call stack is bizarre.
+ Arguments* result = Arguments::create(vm, exec);
+ ASSERT(!vm.exception());
+ return result;
}
-static bool canAccessArgumentIndexQuickly(JSObject& object, uint32_t index)
+EncodedJSValue JIT_OPERATION operationGetArgumentsLength(ExecState* exec, int32_t argumentsRegister)
{
- switch (object.structure()->typeInfo().type()) {
- case DirectArgumentsType: {
- DirectArguments* directArguments = jsCast<DirectArguments*>(&object);
- if (directArguments->canAccessArgumentIndexQuicklyInDFG(index))
- return true;
- break;
- }
- case ScopedArgumentsType: {
- ScopedArguments* scopedArguments = jsCast<ScopedArguments*>(&object);
- if (scopedArguments->canAccessArgumentIndexQuicklyInDFG(index))
- return true;
- break;
- }
- default:
- break;
- }
- return false;
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ // Here we can assume that the argumernts were created. Because otherwise the JIT code would
+ // have not made this call.
+ Identifier ident(&vm, "length");
+ JSValue baseValue = exec->uncheckedR(argumentsRegister).jsValue();
+ PropertySlot slot(baseValue);
+ return JSValue::encode(baseValue.get(exec, ident, slot));
+}
+
}
-static JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
+static JSValue getByVal(ExecState* exec, JSValue baseValue, JSValue subscript, ReturnAddressPtr returnAddress)
{
if (LIKELY(baseValue.isCell() && subscript.isString())) {
- VM& vm = exec->vm();
- Structure& structure = *baseValue.asCell()->structure(vm);
- if (JSCell::canUseFastGetOwnProperty(structure)) {
- if (RefPtr<AtomicStringImpl> existingAtomicString = asString(subscript)->toExistingAtomicString(exec)) {
- if (JSValue result = baseValue.asCell()->fastGetOwnProperty(vm, structure, existingAtomicString.get())) {
- ASSERT(exec->bytecodeOffset());
- if (byValInfo->stubInfo && byValInfo->cachedId.impl() != existingAtomicString)
- byValInfo->tookSlowPath = true;
- return result;
- }
- }
- }
+ if (JSValue result = baseValue.asCell()->fastGetOwnProperty(exec, asString(subscript)->value(exec)))
+ return result;
}
if (subscript.isUInt32()) {
- ASSERT(exec->bytecodeOffset());
- byValInfo->tookSlowPath = true;
-
uint32_t i = subscript.asUInt32();
- if (isJSString(baseValue)) {
- if (asString(baseValue)->canGetIndex(i)) {
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(operationGetByValString));
- return asString(baseValue)->getIndex(exec, i);
- }
- byValInfo->arrayProfile->setOutOfBounds();
- } else if (baseValue.isObject()) {
- JSObject* object = asObject(baseValue);
- if (object->canGetIndexQuickly(i))
- return object->getIndexQuickly(i);
-
- if (!canAccessArgumentIndexQuickly(*object, i)) {
- // FIXME: This will make us think that in-bounds typed array accesses are actually
- // out-of-bounds.
- // https://bugs.webkit.org/show_bug.cgi?id=149886
- byValInfo->arrayProfile->setOutOfBounds();
- }
+ if (isJSString(baseValue) && asString(baseValue)->canGetIndex(i)) {
+ ctiPatchCallByReturnAddress(exec->codeBlock(), returnAddress, FunctionPtr(operationGetByValString));
+ return asString(baseValue)->getIndex(exec, i);
}
-
return baseValue.get(exec, i);
}
- baseValue.requireObjectCoercible(exec);
- if (exec->hadException())
- return jsUndefined();
- auto property = subscript.toPropertyKey(exec);
- if (exec->hadException())
- return jsUndefined();
-
- ASSERT(exec->bytecodeOffset());
- if (byValInfo->stubInfo && (!isStringOrSymbol(subscript) || byValInfo->cachedId != property))
- byValInfo->tookSlowPath = true;
+ if (isName(subscript))
+ return baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
+ Identifier property(exec, subscript.toString(exec)->value(exec));
return baseValue.get(exec, property);
}
-static OptimizationResult tryGetByValOptimize(ExecState* exec, JSValue baseValue, JSValue subscript, ByValInfo* byValInfo, ReturnAddressPtr returnAddress)
-{
- // See if it's worth optimizing this at all.
- OptimizationResult optimizationResult = OptimizationResult::NotOptimized;
-
- VM& vm = exec->vm();
-
- if (baseValue.isObject() && subscript.isInt32()) {
- JSObject* object = asObject(baseValue);
-
- ASSERT(exec->bytecodeOffset());
- ASSERT(!byValInfo->stubRoutine);
-
- if (hasOptimizableIndexing(object->structure(vm))) {
- // Attempt to optimize.
- Structure* structure = object->structure(vm);
- JITArrayMode arrayMode = jitArrayModeForStructure(structure);
- if (arrayMode != byValInfo->arrayMode) {
- // If we reached this case, we got an interesting array mode we did not expect when we compiled.
- // Let's update the profile to do better next time.
- CodeBlock* codeBlock = exec->codeBlock();
- ConcurrentJITLocker locker(codeBlock->m_lock);
- byValInfo->arrayProfile->computeUpdatedPrediction(locker, codeBlock, structure);
-
- JIT::compileGetByVal(&vm, exec->codeBlock(), byValInfo, returnAddress, arrayMode);
- optimizationResult = OptimizationResult::Optimized;
- }
- }
-
- // If we failed to patch and we have some object that intercepts indexed get, then don't even wait until 10 times.
- if (optimizationResult != OptimizationResult::Optimized && object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero())
- optimizationResult = OptimizationResult::GiveUp;
- }
-
- if (baseValue.isObject() && isStringOrSymbol(subscript)) {
- const Identifier propertyName = subscript.toPropertyKey(exec);
- if (!subscript.isString() || !parseIndex(propertyName)) {
- ASSERT(exec->bytecodeOffset());
- ASSERT(!byValInfo->stubRoutine);
- if (byValInfo->seen) {
- if (byValInfo->cachedId == propertyName) {
- JIT::compileGetByValWithCachedId(&vm, exec->codeBlock(), byValInfo, returnAddress, propertyName);
- optimizationResult = OptimizationResult::Optimized;
- } else {
- // Seem like a generic property access site.
- optimizationResult = OptimizationResult::GiveUp;
- }
- } else {
- byValInfo->seen = true;
- byValInfo->cachedId = propertyName;
- optimizationResult = OptimizationResult::SeenOnce;
- }
-
- }
- }
-
- if (optimizationResult != OptimizationResult::Optimized && optimizationResult != OptimizationResult::SeenOnce) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. For cases where we see non-index-intercepting
- // objects, this gives 10 iterations worth of opportunity for us to observe
- // that the get_by_val may be polymorphic. We count up slowPathCount even if
- // the result is GiveUp.
- if (++byValInfo->slowPathCount >= 10)
- optimizationResult = OptimizationResult::GiveUp;
- }
-
- return optimizationResult;
-}
-
extern "C" {
-
-EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+
+EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
JSValue baseValue = JSValue::decode(encodedBase);
JSValue subscript = JSValue::decode(encodedSubscript);
- JSValue result = getByVal(exec, baseValue, subscript, byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS));
+ JSValue result = getByVal(exec, baseValue, subscript, ReturnAddressPtr(OUR_RETURN_ADDRESS));
return JSValue::encode(result);
}
-EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- JSValue baseValue = JSValue::decode(encodedBase);
- JSValue subscript = JSValue::decode(encodedSubscript);
- ReturnAddressPtr returnAddress = ReturnAddressPtr(OUR_RETURN_ADDRESS);
- if (tryGetByValOptimize(exec, baseValue, subscript, byValInfo, returnAddress) == OptimizationResult::GiveUp) {
- // Don't ever try to optimize.
- byValInfo->tookSlowPath = true;
- ctiPatchCallByReturnAddress(returnAddress, FunctionPtr(operationGetByValGeneric));
- }
-
- return JSValue::encode(getByVal(exec, baseValue, subscript, byValInfo, returnAddress));
-}
-
-EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+EncodedJSValue JIT_OPERATION operationGetByValDefault(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
JSValue baseValue = JSValue::decode(encodedBase);
JSValue subscript = JSValue::decode(encodedSubscript);
- ASSERT(baseValue.isObject());
- ASSERT(subscript.isUInt32());
-
- JSObject* object = asObject(baseValue);
- bool didOptimize = false;
+ if (baseValue.isObject() && subscript.isInt32()) {
+ // See if it's worth optimizing this at all.
+ JSObject* object = asObject(baseValue);
+ bool didOptimize = false;
- ASSERT(exec->bytecodeOffset());
- ASSERT(!byValInfo->stubRoutine);
-
- if (hasOptimizableIndexing(object->structure(vm))) {
- // Attempt to optimize.
- JITArrayMode arrayMode = jitArrayModeForStructure(object->structure(vm));
- if (arrayMode != byValInfo->arrayMode) {
- JIT::compileHasIndexedProperty(&vm, exec->codeBlock(), byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
- didOptimize = true;
+ unsigned bytecodeOffset = exec->locationAsBytecodeOffset();
+ ASSERT(bytecodeOffset);
+ ByValInfo& byValInfo = exec->codeBlock()->getByValInfo(bytecodeOffset - 1);
+ ASSERT(!byValInfo.stubRoutine);
+
+ if (hasOptimizableIndexing(object->structure())) {
+ // Attempt to optimize.
+ JITArrayMode arrayMode = jitArrayModeForStructure(object->structure());
+ if (arrayMode != byValInfo.arrayMode) {
+ JIT::compileGetByVal(&vm, exec->codeBlock(), &byValInfo, ReturnAddressPtr(OUR_RETURN_ADDRESS), arrayMode);
+ didOptimize = true;
+ }
}
- }
-
- if (!didOptimize) {
- // If we take slow path more than 10 times without patching then make sure we
- // never make that mistake again. Or, if we failed to patch and we have some object
- // that intercepts indexed get, then don't even wait until 10 times. For cases
- // where we see non-index-intercepting objects, this gives 10 iterations worth of
- // opportunity for us to observe that the get_by_val may be polymorphic.
- if (++byValInfo->slowPathCount >= 10
- || object->structure(vm)->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
- // Don't ever try to optimize.
- ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationHasIndexedPropertyGeneric));
+
+ if (!didOptimize) {
+ // If we take slow path more than 10 times without patching then make sure we
+ // never make that mistake again. Or, if we failed to patch and we have some object
+ // that intercepts indexed get, then don't even wait until 10 times. For cases
+ // where we see non-index-intercepting objects, this gives 10 iterations worth of
+ // opportunity for us to observe that the get_by_val may be polymorphic.
+ if (++byValInfo.slowPathCount >= 10
+ || object->structure()->typeInfo().interceptsGetOwnPropertySlotByIndexEvenWhenLengthIsNotZero()) {
+ // Don't ever try to optimize.
+ RepatchBuffer repatchBuffer(exec->codeBlock());
+ repatchBuffer.relinkCallerToFunction(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationGetByValGeneric));
+ }
}
}
-
- uint32_t index = subscript.asUInt32();
- if (object->canGetIndexQuickly(index))
- return JSValue::encode(JSValue(JSValue::JSTrue));
-
- if (!canAccessArgumentIndexQuickly(*object, index)) {
- // FIXME: This will make us think that in-bounds typed array accesses are actually
- // out-of-bounds.
- // https://bugs.webkit.org/show_bug.cgi?id=149886
- byValInfo->arrayProfile->setOutOfBounds();
- }
- return JSValue::encode(jsBoolean(object->hasProperty(exec, index)));
-}
-
-EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- JSValue baseValue = JSValue::decode(encodedBase);
- JSValue subscript = JSValue::decode(encodedSubscript);
- ASSERT(baseValue.isObject());
- ASSERT(subscript.isUInt32());
-
- JSObject* object = asObject(baseValue);
- uint32_t index = subscript.asUInt32();
- if (object->canGetIndexQuickly(index))
- return JSValue::encode(JSValue(JSValue::JSTrue));
-
- if (!canAccessArgumentIndexQuickly(*object, index)) {
- // FIXME: This will make us think that in-bounds typed array accesses are actually
- // out-of-bounds.
- // https://bugs.webkit.org/show_bug.cgi?id=149886
- byValInfo->arrayProfile->setOutOfBounds();
- }
- return JSValue::encode(jsBoolean(object->hasProperty(exec, subscript.asUInt32())));
+ JSValue result = getByVal(exec, baseValue, subscript, ReturnAddressPtr(OUR_RETURN_ADDRESS));
+ return JSValue::encode(result);
}
-EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo* byValInfo)
+EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
@@ -1748,23 +1456,37 @@ EncodedJSValue JIT_OPERATION operationGetByValString(ExecState* exec, EncodedJSV
result = asString(baseValue)->getIndex(exec, i);
else {
result = baseValue.get(exec, i);
- if (!isJSString(baseValue)) {
- ASSERT(exec->bytecodeOffset());
- ctiPatchCallByReturnAddress(ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(byValInfo->stubRoutine ? operationGetByValGeneric : operationGetByValOptimize));
- }
+ if (!isJSString(baseValue))
+ ctiPatchCallByReturnAddress(exec->codeBlock(), ReturnAddressPtr(OUR_RETURN_ADDRESS), FunctionPtr(operationGetByValDefault));
}
- } else {
- baseValue.requireObjectCoercible(exec);
- if (exec->hadException())
- return JSValue::encode(jsUndefined());
- auto property = subscript.toPropertyKey(exec);
- if (exec->hadException())
- return JSValue::encode(jsUndefined());
+ } else if (isName(subscript))
+ result = baseValue.get(exec, jsCast<NameInstance*>(subscript.asCell())->privateName());
+ else {
+ Identifier property(exec, subscript.toString(exec)->value(exec));
result = baseValue.get(exec, property);
}
return JSValue::encode(result);
}
+
+void JIT_OPERATION operationTearOffActivation(ExecState* exec, JSCell* activationCell)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ ASSERT(exec->codeBlock()->needsFullScopeChain());
+ jsCast<JSActivation*>(activationCell)->tearOff(vm);
+}
+
+void JIT_OPERATION operationTearOffArguments(ExecState* exec, JSCell* argumentsCell, JSCell* activationCell)
+{
+ ASSERT(exec->codeBlock()->usesArguments());
+ if (activationCell) {
+ jsCast<Arguments*>(argumentsCell)->didTearOffActivation(exec, jsCast<JSActivation*>(activationCell));
+ return;
+ }
+ jsCast<Arguments*>(argumentsCell)->tearOff(exec);
+}
EncodedJSValue JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue encodedBase, const Identifier* identifier)
{
@@ -1772,13 +1494,25 @@ EncodedJSValue JIT_OPERATION operationDeleteById(ExecState* exec, EncodedJSValue
NativeCallFrameTracer tracer(&vm, exec);
JSObject* baseObj = JSValue::decode(encodedBase).toObject(exec);
- bool couldDelete = baseObj->methodTable(vm)->deleteProperty(baseObj, exec, *identifier);
+ bool couldDelete = baseObj->methodTable()->deleteProperty(baseObj, exec, *identifier);
JSValue result = jsBoolean(couldDelete);
if (!couldDelete && exec->codeBlock()->isStrictMode())
- vm.throwException(exec, createTypeError(exec, ASCIILiteral("Unable to delete property.")));
+ vm.throwException(exec, createTypeError(exec, "Unable to delete property."));
return JSValue::encode(result);
}
+JSCell* JIT_OPERATION operationGetPNames(ExecState* exec, JSObject* obj)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+
+ Structure* structure = obj->structure();
+ JSPropertyNameIterator* jsPropertyNameIterator = structure->enumerationCache();
+ if (!jsPropertyNameIterator || jsPropertyNameIterator->cachedPrototypeChain() != structure->prototypeChain(exec))
+ jsPropertyNameIterator = JSPropertyNameIterator::create(exec, obj);
+ return jsPropertyNameIterator;
+}
+
EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState* exec, EncodedJSValue encodedValue, EncodedJSValue encodedProto)
{
VM& vm = exec->vm();
@@ -1792,21 +1526,23 @@ EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState* exec, EncodedJSValue
return JSValue::encode(jsBoolean(result));
}
-int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset)
+CallFrame* JIT_OPERATION operationSizeAndAllocFrameForVarargs(ExecState* exec, EncodedJSValue encodedArguments, int32_t firstFreeRegister)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
JSStack* stack = &exec->interpreter()->stack();
JSValue arguments = JSValue::decode(encodedArguments);
- return sizeFrameForVarargs(exec, stack, arguments, numUsedStackSlots, firstVarArgOffset);
+ CallFrame* newCallFrame = sizeAndAllocFrameForVarargs(exec, stack, arguments, firstFreeRegister);
+ return newCallFrame;
}
-CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue encodedArguments, int32_t firstVarArgOffset, int32_t length)
+CallFrame* JIT_OPERATION operationLoadVarargs(ExecState* exec, CallFrame* newCallFrame, EncodedJSValue encodedThis, EncodedJSValue encodedArguments)
{
VM& vm = exec->vm();
NativeCallFrameTracer tracer(&vm, exec);
+ JSValue thisValue = JSValue::decode(encodedThis);
JSValue arguments = JSValue::decode(encodedArguments);
- setupVarargsFrame(exec, newCallFrame, arguments, firstVarArgOffset, length);
+ loadVarargs(exec, newCallFrame, thisValue, arguments);
return newCallFrame;
}
@@ -1873,6 +1609,14 @@ char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState* exec, Enc
return reinterpret_cast<char*>(result);
}
+EncodedJSValue JIT_OPERATION operationResolveScope(ExecState* exec, int32_t identifierIndex)
+{
+ VM& vm = exec->vm();
+ NativeCallFrameTracer tracer(&vm, exec);
+ const Identifier& ident = exec->codeBlock()->identifier(identifierIndex);
+ return JSValue::encode(JSScope::resolve(exec, exec->scope(), ident));
+}
+
EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction* bytecodePC)
{
VM& vm = exec->vm();
@@ -1882,33 +1626,25 @@ EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState* exec, Instruction*
const Identifier& ident = codeBlock->identifier(pc[3].u.operand);
JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[2].u.operand).jsValue());
- GetPutInfo getPutInfo(pc[4].u.operand);
+ ResolveModeAndType modeAndType(pc[4].u.operand);
- // ModuleVar is always converted to ClosureVar for get_from_scope.
- ASSERT(getPutInfo.resolveType() != ModuleVar);
-
- PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
+ PropertySlot slot(scope);
if (!scope->getPropertySlot(exec, ident, slot)) {
- if (getPutInfo.resolveMode() == ThrowIfNotFound)
+ if (modeAndType.mode() == ThrowIfNotFound)
vm.throwException(exec, createUndefinedVariableError(exec, ident));
return JSValue::encode(jsUndefined());
}
- JSValue result = JSValue();
- if (jsDynamicCast<JSGlobalLexicalEnvironment*>(scope)) {
- // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
- result = slot.getValue(exec, ident);
- if (result == jsTDZValue()) {
- exec->vm().throwException(exec, createTDZError(exec));
- return JSValue::encode(jsUndefined());
+ // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time.
+ if (slot.isCacheableValue() && slot.slotBase() == scope && scope->structure()->propertyAccessesAreCacheable()) {
+ if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) {
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+ pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure());
+ pc[6].u.operand = slot.cachedOffset();
}
}
- CommonSlowPaths::tryCacheGetFromScopeGlobal(exec, vm, pc, scope, slot, ident);
-
- if (!result)
- result = slot.getValue(exec, ident);
- return JSValue::encode(result);
+ return JSValue::encode(slot.getValue(exec, ident));
}
void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC)
@@ -1921,44 +1657,27 @@ void JIT_OPERATION operationPutToScope(ExecState* exec, Instruction* bytecodePC)
const Identifier& ident = codeBlock->identifier(pc[2].u.operand);
JSObject* scope = jsCast<JSObject*>(exec->uncheckedR(pc[1].u.operand).jsValue());
JSValue value = exec->r(pc[3].u.operand).jsValue();
- GetPutInfo getPutInfo = GetPutInfo(pc[4].u.operand);
-
- // ModuleVar does not keep the scope register value alive in DFG.
- ASSERT(getPutInfo.resolveType() != ModuleVar);
-
- if (getPutInfo.resolveType() == LocalClosureVar) {
- JSLexicalEnvironment* environment = jsCast<JSLexicalEnvironment*>(scope);
- environment->variableAt(ScopeOffset(pc[6].u.operand)).set(vm, environment, value);
- if (WatchpointSet* set = pc[5].u.watchpointSet)
- set->touch("Executed op_put_scope<LocalClosureVar>");
- return;
- }
+ ResolveModeAndType modeAndType = ResolveModeAndType(pc[4].u.operand);
- bool hasProperty = scope->hasProperty(exec, ident);
- if (hasProperty
- && jsDynamicCast<JSGlobalLexicalEnvironment*>(scope)
- && getPutInfo.initializationMode() != Initialization) {
- // When we can't statically prove we need a TDZ check, we must perform the check on the slow path.
- PropertySlot slot(scope, PropertySlot::InternalMethodType::Get);
- JSGlobalLexicalEnvironment::getOwnPropertySlot(scope, exec, ident, slot);
- if (slot.getValue(exec, ident) == jsTDZValue()) {
- exec->vm().throwException(exec, createTDZError(exec));
- return;
- }
- }
-
- if (getPutInfo.resolveMode() == ThrowIfNotFound && !hasProperty) {
+ if (modeAndType.mode() == ThrowIfNotFound && !scope->hasProperty(exec, ident)) {
exec->vm().throwException(exec, createUndefinedVariableError(exec, ident));
return;
}
- PutPropertySlot slot(scope, codeBlock->isStrictMode(), PutPropertySlot::UnknownContext, getPutInfo.initializationMode() == Initialization);
+ PutPropertySlot slot(scope, codeBlock->isStrictMode());
scope->methodTable()->put(scope, exec, ident, value, slot);
if (exec->vm().exception())
return;
- CommonSlowPaths::tryCachePutToScopeGlobal(exec, codeBlock, pc, scope, getPutInfo, slot, ident);
+ // Covers implicit globals. Since they don't exist until they first execute, we didn't know how to cache them at compile time.
+ if (modeAndType.type() == GlobalProperty || modeAndType.type() == GlobalPropertyWithVarInjectionChecks) {
+ if (slot.isCacheable() && slot.base() == scope && scope->structure()->propertyAccessesAreCacheable()) {
+ ConcurrentJITLocker locker(codeBlock->m_lock);
+ pc[5].u.structure.set(exec->vm(), codeBlock->ownerExecutable(), scope->structure());
+ pc[6].u.operand = slot.cachedOffset();
+ }
+ }
}
void JIT_OPERATION operationThrow(ExecState* exec, EncodedJSValue encodedExceptionValue)
@@ -1969,8 +1688,8 @@ void JIT_OPERATION operationThrow(ExecState* exec, EncodedJSValue encodedExcepti
JSValue exceptionValue = JSValue::decode(encodedExceptionValue);
vm->throwException(exec, exceptionValue);
- // Results stored out-of-band in vm.targetMachinePCForThrow & vm.callFrameForCatch
- genericUnwind(vm, exec);
+ // Results stored out-of-band in vm.targetMachinePCForThrow & vm.callFrameForThrow
+ genericUnwind(vm, exec, exceptionValue);
}
void JIT_OPERATION operationFlushWriteBarrierBuffer(ExecState* exec, JSCell* cell)
@@ -1984,7 +1703,7 @@ void JIT_OPERATION operationOSRWriteBarrier(ExecState* exec, JSCell* cell)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- vm->heap.writeBarrier(cell);
+ exec->heap()->writeBarrier(cell);
}
// NB: We don't include the value as part of the barrier because the write barrier elision
@@ -1994,7 +1713,7 @@ void JIT_OPERATION operationUnconditionalWriteBarrier(ExecState* exec, JSCell* c
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- vm->heap.writeBarrier(cell);
+ Heap::writeBarrier(cell);
}
void JIT_OPERATION operationInitGlobalConst(ExecState* exec, Instruction* pc)
@@ -2003,20 +1722,18 @@ void JIT_OPERATION operationInitGlobalConst(ExecState* exec, Instruction* pc)
NativeCallFrameTracer tracer(vm, exec);
JSValue value = exec->r(pc[2].u.operand).jsValue();
- pc[1].u.variablePointer->set(*vm, exec->codeBlock()->globalObject(), value);
+ pc[1].u.registerPointer->set(*vm, exec->codeBlock()->globalObject(), value);
}
-void JIT_OPERATION lookupExceptionHandler(VM* vm, ExecState* exec)
+void JIT_OPERATION lookupExceptionHandler(ExecState* exec)
{
+ VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- genericUnwind(vm, exec);
- ASSERT(vm->targetMachinePCForThrow);
-}
-void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM* vm, ExecState* exec)
-{
- NativeCallFrameTracer tracer(vm, exec);
- genericUnwind(vm, exec, UnwindFromCallerFrame);
+ JSValue exceptionValue = exec->exception();
+ ASSERT(exceptionValue);
+
+ genericUnwind(vm, exec, exceptionValue);
ASSERT(vm->targetMachinePCForThrow);
}
@@ -2024,89 +1741,9 @@ void JIT_OPERATION operationVMHandleException(ExecState* exec)
{
VM* vm = &exec->vm();
NativeCallFrameTracer tracer(vm, exec);
- genericUnwind(vm, exec);
-}
-// This function "should" just take the ExecState*, but doing so would make it more difficult
-// to call from exception check sites. So, unlike all of our other functions, we allow
-// ourselves to play some gnarly ABI tricks just to simplify the calling convention. This is
-// particularly safe here since this is never called on the critical path - it's only for
-// testing.
-void JIT_OPERATION operationExceptionFuzz(ExecState* exec)
-{
- VM* vm = &exec->vm();
- NativeCallFrameTracer tracer(vm, exec);
-#if COMPILER(GCC_OR_CLANG)
- void* returnPC = __builtin_return_address(0);
- doExceptionFuzzing(exec, "JITOperations", returnPC);
-#endif // COMPILER(GCC_OR_CLANG)
-}
-
-EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState* exec, EncodedJSValue encodedBaseValue, JSCell* propertyName)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- JSValue baseValue = JSValue::decode(encodedBaseValue);
- if (baseValue.isUndefinedOrNull())
- return JSValue::encode(jsBoolean(false));
-
- JSObject* base = baseValue.toObject(exec);
- return JSValue::encode(jsBoolean(base->hasProperty(exec, asString(propertyName)->toIdentifier(exec))));
-}
-
-EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState* exec, JSCell* baseCell, int32_t subscript)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- JSObject* object = baseCell->toObject(exec, exec->lexicalGlobalObject());
- return JSValue::encode(jsBoolean(object->hasProperty(exec, subscript)));
-}
-
-JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState* exec, JSCell* cell)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
-
- JSObject* base = cell->toObject(exec, exec->lexicalGlobalObject());
-
- return propertyNameEnumerator(exec, base);
-}
-
-EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState* exec, JSCell* enumeratorCell, int32_t index)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- JSPropertyNameEnumerator* enumerator = jsCast<JSPropertyNameEnumerator*>(enumeratorCell);
- JSString* propertyName = enumerator->propertyNameAtIndex(index);
- return JSValue::encode(propertyName ? propertyName : jsNull());
-}
-
-JSCell* JIT_OPERATION operationToIndexString(ExecState* exec, int32_t index)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- return jsString(exec, Identifier::from(exec, index).string());
-}
-
-void JIT_OPERATION operationProcessTypeProfilerLog(ExecState* exec)
-{
- exec->vm().typeProfilerLog()->processLogEntries(ASCIILiteral("Log Full, called from inside baseline JIT"));
-}
-
-int32_t JIT_OPERATION operationCheckIfExceptionIsUncatchableAndNotifyProfiler(ExecState* exec)
-{
- VM& vm = exec->vm();
- NativeCallFrameTracer tracer(&vm, exec);
- RELEASE_ASSERT(!!vm.exception());
-
- if (LegacyProfiler* profiler = vm.enabledProfiler())
- profiler->exceptionUnwind(exec);
-
- if (isTerminatedExecutionException(vm.exception())) {
- genericUnwind(&vm, exec);
- return 1;
- } else
- return 0;
+ ASSERT(!exec->isVMEntrySentinel());
+ genericUnwind(vm, exec, vm->exception());
}
} // extern "C"
@@ -2121,32 +1758,28 @@ extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWi
return JSValue::encode(exec->vm().hostCallReturnValue);
}
-#if COMPILER(GCC_OR_CLANG) && CPU(X86_64)
+#if COMPILER(GCC) && CPU(X86_64)
asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "lea -8(%rsp), %rdi\n"
+ "mov 0(%rbp), %rbp\n" // CallerFrameAndPC::callerFrame
+ "mov %rbp, %rdi\n"
"jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC_OR_CLANG) && CPU(X86)
+#elif COMPILER(GCC) && CPU(X86)
asm (
".text" "\n" \
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "push %ebp\n"
- "mov %esp, %eax\n"
- "leal -4(%esp), %esp\n"
- "push %eax\n"
- "call " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
- "leal 8(%esp), %esp\n"
- "pop %ebp\n"
- "ret\n"
+ "mov 0(%ebp), %ebp\n" // CallerFrameAndPC::callerFrame
+ "mov %ebp, 4(%esp)\n"
+ "jmp " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_THUMB2)
+#elif COMPILER(GCC) && CPU(ARM_THUMB2)
asm (
".text" "\n"
".align 2" "\n"
@@ -2155,18 +1788,20 @@ HIDE_SYMBOL(getHostCallReturnValue) "\n"
".thumb" "\n"
".thumb_func " THUMB_FUNC_PARAM(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "sub r0, sp, #8" "\n"
+ "ldr r7, [r7, #0]" "\n" // CallerFrameAndPC::callerFrame
+ "mov r0, r7" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC_OR_CLANG) && CPU(ARM_TRADITIONAL)
+#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL)
asm (
".text" "\n"
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
INLINE_ARM_FUNCTION(getHostCallReturnValue)
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "sub r0, sp, #8" "\n"
+ "ldr r11, [r11, #0]" "\n" // CallerFrameAndPC::callerFrame
+ "mov r0, r11" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
@@ -2177,43 +1812,31 @@ asm (
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "sub x0, sp, #16" "\n"
+ "ldur x29, [x29, #0]" "\n"
+ "mov x0, x29" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC_OR_CLANG) && CPU(MIPS)
-
-#if WTF_MIPS_PIC
-#define LOAD_FUNCTION_TO_T9(function) \
- ".set noreorder" "\n" \
- ".cpload $25" "\n" \
- ".set reorder" "\n" \
- "la $t9, " LOCAL_REFERENCE(function) "\n"
-#else
-#define LOAD_FUNCTION_TO_T9(function) "" "\n"
-#endif
-
+#elif COMPILER(GCC) && CPU(MIPS)
asm (
".text" "\n"
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
LOAD_FUNCTION_TO_T9(getHostCallReturnValueWithExecState)
- "addi $a0, $sp, -8" "\n"
+ "lw $fp, 0($fp)" "\n" // CallerFrameAndPC::callerFrame
+ "move $a0, $fp" "\n"
"b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n"
);
-#elif COMPILER(GCC_OR_CLANG) && CPU(SH4)
-
-#define SH4_SCRATCH_REGISTER "r11"
-
+#elif COMPILER(GCC) && CPU(SH4)
asm (
".text" "\n"
".globl " SYMBOL_STRING(getHostCallReturnValue) "\n"
HIDE_SYMBOL(getHostCallReturnValue) "\n"
SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
- "mov r15, r4" "\n"
- "add -8, r4" "\n"
+ "mov.l @r14, r14" "\n" // CallerFrameAndPC::callerFrame
+ "mov r14, r4" "\n"
"mov.l 2f, " SH4_SCRATCH_REGISTER "\n"
"braf " SH4_SCRATCH_REGISTER "\n"
"nop" "\n"
@@ -2225,8 +1848,8 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n"
extern "C" {
__declspec(naked) EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue()
{
- __asm lea eax, [esp - 4]
- __asm mov [esp + 4], eax;
+ __asm mov ebp, [ebp + 0]; // CallerFrameAndPC::callerFrame
+ __asm mov [esp + 4], ebp;
__asm jmp getHostCallReturnValueWithExecState
}
}
diff --git a/Source/JavaScriptCore/jit/JITOperations.h b/Source/JavaScriptCore/jit/JITOperations.h
index cacfbcb20..43ca6177b 100644
--- a/Source/JavaScriptCore/jit/JITOperations.h
+++ b/Source/JavaScriptCore/jit/JITOperations.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,32 +28,18 @@
#if ENABLE(JIT)
-#include "MacroAssemblerCodeRef.h"
-#include "PropertyOffset.h"
-#include "SlowPathReturnType.h"
-#include "TypedArrayType.h"
-#include <wtf/text/UniquedStringImpl.h>
+#include "CallFrame.h"
+#include "JITExceptions.h"
+#include "JSArray.h"
+#include "JSCJSValue.h"
+#include "MacroAssembler.h"
+#include "PutKind.h"
+#include "StructureStubInfo.h"
+#include "VariableWatchpointSet.h"
namespace JSC {
class ArrayAllocationProfile;
-class ArrayProfile;
-class CallLinkInfo;
-class CodeBlock;
-class ExecState;
-class JSArray;
-class JSFunction;
-class JSLexicalEnvironment;
-class JSScope;
-class Register;
-class StructureStubInfo;
-class SymbolTable;
-class WatchpointSet;
-
-struct ByValInfo;
-struct InlineCallFrame;
-
-typedef ExecState CallFrame;
#if CALLING_CONVENTION_IS_STDCALL
#define JIT_OPERATION CDECL
@@ -63,51 +49,39 @@ typedef ExecState CallFrame;
extern "C" {
-typedef char* UnusedPtr;
-
// These typedefs provide typechecking when generating calls out to helper routines;
// this helps prevent calling a helper routine with the wrong arguments!
/*
Key:
A: JSArray*
Aap: ArrayAllocationProfile*
- Ap: ArrayProfile*
- By: ByValInfo*
C: JSCell*
Cb: CodeBlock*
- Cli: CallLinkInfo*
D: double
E: ExecState*
F: CallFrame*
- G: JSGlobalObject*
- I: UniquedStringImpl*
- Icf: InlineCallFrame*
+ I: StringImpl*
+ Icf: InlineCalLFrame*
Idc: const Identifier*
J: EncodedJSValue
Jcp: const JSValue*
- Jsc: JSScope*
- Jsf: JSFunction*
+ Jsa: JSActivation*
Jss: JSString*
- L: JSLexicalEnvironment*
O: JSObject*
P: pointer (char*)
Pc: Instruction* i.e. bytecode PC
- Q: int64_t
R: Register
S: size_t
- Sprt: SlowPathReturnType
Ssi: StructureStubInfo*
St: Structure*
- Symtab: SymbolTable*
- T: StringImpl*
V: void
Vm: VM*
- Ws: WatchpointSet*
+ Vws: VariableWatchpointSet*
Z: int32_t
- Ui: uint32_t
*/
-typedef CallFrame* JIT_OPERATION (*F_JITOperation_EFJZZ)(ExecState*, CallFrame*, EncodedJSValue, int32_t, int32_t);
+typedef CallFrame* JIT_OPERATION (*F_JITOperation_EFJJ)(ExecState*, CallFrame*, EncodedJSValue, EncodedJSValue);
+typedef CallFrame* JIT_OPERATION (*F_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_E)(ExecState*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EA)(ExecState*, JSArray*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAZ)(ExecState*, JSArray*, int32_t);
@@ -115,110 +89,66 @@ typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJ)(ExecState*, ArrayAl
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EAapJcpZ)(ExecState*, ArrayAllocationProfile*, const JSValue*, int32_t);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EC)(ExecState*, JSCell*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECI)(ExecState*, JSCell*, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECI)(ExecState*, JSCell*, StringImpl*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECJ)(ExecState*, JSCell*, EncodedJSValue);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EDA)(ExecState*, double, JSArray*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EE)(ExecState*, ExecState*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EI)(ExecState*, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EI)(ExecState*, StringImpl*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJC)(ExecState*, EncodedJSValue, JSCell*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJA)(ExecState*, EncodedJSValue, JSArray*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJI)(ExecState*, EncodedJSValue, UniquedStringImpl*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJIdc)(ExecState*, EncodedJSValue, const Identifier*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, ArrayProfile*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, ByValInfo*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJssZ)(ExecState*, JSString*, int32_t);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJP)(ExecState*, EncodedJSValue, void*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EP)(ExecState*, void*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPP)(ExecState*, void*, void*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPS)(ExecState*, void*, size_t);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EPc)(ExecState*, Instruction*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EJscCJ)(ExecState*, JSScope*, JSCell*, EncodedJSValue);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESS)(ExecState*, size_t, size_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiCI)(ExecState*, StructureStubInfo*, JSCell*, StringImpl*);
+typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_ESsiJI)(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZ)(ExecState*, int32_t);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZIcfZ)(ExecState*, int32_t, InlineCallFrame*, int32_t);
typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
-typedef EncodedJSValue JIT_OPERATION (*J_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue);
typedef JSCell* JIT_OPERATION (*C_JITOperation_E)(ExecState*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EC)(ExecState*, JSCell*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_ECZC)(ExecState*, JSCell*, int32_t, JSCell*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EIcf)(ExecState*, InlineCallFrame*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJsc)(ExecState*, JSScope*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJscC)(ExecState*, JSScope*, JSCell*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJZC)(ExecState*, EncodedJSValue, int32_t, JSCell*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJJC)(ExecState*, EncodedJSValue, EncodedJSValue, JSCell*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EJscZ)(ExecState*, JSScope*, int32_t);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssSt)(ExecState*, JSString*, Structure*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJss)(ExecState*, JSString*, JSString*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EJssJssJss)(ExecState*, JSString*, JSString*, JSString*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EL)(ExecState*, JSLexicalEnvironment*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EO)(ExecState*, JSObject*);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EOZ)(ExecState*, JSObject*, int32_t);
typedef JSCell* JIT_OPERATION (*C_JITOperation_ESt)(ExecState*, Structure*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EStJscSymtabJ)(ExecState*, Structure*, JSScope*, SymbolTable*, EncodedJSValue);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EStRZJsfL)(ExecState*, Structure*, Register*, int32_t, JSFunction*, JSLexicalEnvironment*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EStRZJsf)(ExecState*, Structure*, Register*, int32_t, JSFunction*);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
-typedef JSCell* JIT_OPERATION (*C_JITOperation_EStZZ)(ExecState*, Structure*, int32_t, int32_t);
typedef JSCell* JIT_OPERATION (*C_JITOperation_EZ)(ExecState*, int32_t);
typedef double JIT_OPERATION (*D_JITOperation_D)(double);
-typedef double JIT_OPERATION (*D_JITOperation_G)(JSGlobalObject*);
typedef double JIT_OPERATION (*D_JITOperation_DD)(double, double);
typedef double JIT_OPERATION (*D_JITOperation_ZZ)(int32_t, int32_t);
typedef double JIT_OPERATION (*D_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef int64_t JIT_OPERATION(*Q_JITOperation_J)(EncodedJSValue);
-typedef int64_t JIT_OPERATION(*Q_JITOperation_D)(double);
typedef int32_t JIT_OPERATION (*Z_JITOperation_D)(double);
typedef int32_t JIT_OPERATION (*Z_JITOperation_E)(ExecState*);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_EC)(ExecState*, JSCell*);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_ESJss)(ExecState*, size_t, JSString*);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_EJOJ)(ExecState*, EncodedJSValue, JSObject*, EncodedJSValue);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
-typedef int32_t JIT_OPERATION (*Z_JITOperation_EJZZ)(ExecState*, EncodedJSValue, int32_t, int32_t);
typedef size_t JIT_OPERATION (*S_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef size_t JIT_OPERATION (*S_JITOperation_EGC)(ExecState*, JSGlobalObject*, JSCell*);
typedef size_t JIT_OPERATION (*S_JITOperation_EJ)(ExecState*, EncodedJSValue);
typedef size_t JIT_OPERATION (*S_JITOperation_EJJ)(ExecState*, EncodedJSValue, EncodedJSValue);
typedef size_t JIT_OPERATION (*S_JITOperation_EOJss)(ExecState*, JSObject*, JSString*);
typedef size_t JIT_OPERATION (*S_JITOperation_J)(EncodedJSValue);
-typedef SlowPathReturnType JIT_OPERATION (*Sprt_JITOperation_EZ)(ExecState*, int32_t);
-typedef void JIT_OPERATION (*V_JITOperation)();
typedef void JIT_OPERATION (*V_JITOperation_E)(ExecState*);
typedef void JIT_OPERATION (*V_JITOperation_EC)(ExecState*, JSCell*);
typedef void JIT_OPERATION (*V_JITOperation_ECb)(ExecState*, CodeBlock*);
typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
typedef void JIT_OPERATION (*V_JITOperation_ECIcf)(ExecState*, JSCell*, InlineCallFrame*);
-typedef void JIT_OPERATION (*V_JITOperation_ECIZC)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, JSCell*);
-typedef void JIT_OPERATION (*V_JITOperation_ECIZCC)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, JSCell*, JSCell*);
-typedef void JIT_OPERATION (*V_JITOperation_ECIZJJ)(ExecState*, JSCell*, UniquedStringImpl*, int32_t, EncodedJSValue, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_ECJZC)(ExecState*, JSCell*, EncodedJSValue, int32_t, JSCell*);
+typedef void JIT_OPERATION (*V_JITOperation_ECICC)(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*);
typedef void JIT_OPERATION (*V_JITOperation_ECCIcf)(ExecState*, JSCell*, JSCell*, InlineCallFrame*);
typedef void JIT_OPERATION (*V_JITOperation_ECJJ)(ExecState*, JSCell*, EncodedJSValue, EncodedJSValue);
typedef void JIT_OPERATION (*V_JITOperation_ECPSPS)(ExecState*, JSCell*, void*, size_t, void*, size_t);
typedef void JIT_OPERATION (*V_JITOperation_ECZ)(ExecState*, JSCell*, int32_t);
typedef void JIT_OPERATION (*V_JITOperation_ECC)(ExecState*, JSCell*, JSCell*);
-typedef void JIT_OPERATION (*V_JITOperation_EZSymtabJ)(ExecState*, int32_t, SymbolTable*, EncodedJSValue);
+typedef void JIT_OPERATION (*V_JITOperation_EIdJZ)(ExecState*, Identifier*, EncodedJSValue, int32_t);
typedef void JIT_OPERATION (*V_JITOperation_EJ)(ExecState*, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, UniquedStringImpl*);
+typedef void JIT_OPERATION (*V_JITOperation_EJCI)(ExecState*, EncodedJSValue, JSCell*, StringImpl*);
+typedef void JIT_OPERATION (*V_JITOperation_EJIdJJ)(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue);
typedef void JIT_OPERATION (*V_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EJJJAp)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ArrayProfile*);
-typedef void JIT_OPERATION (*V_JITOperation_EJJJBy)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*);
typedef void JIT_OPERATION (*V_JITOperation_EJPP)(ExecState*, EncodedJSValue, void*, void*);
typedef void JIT_OPERATION (*V_JITOperation_EJZJ)(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
typedef void JIT_OPERATION (*V_JITOperation_EJZ)(ExecState*, EncodedJSValue, int32_t);
@@ -226,18 +156,12 @@ typedef void JIT_OPERATION (*V_JITOperation_EOZD)(ExecState*, JSObject*, int32_t
typedef void JIT_OPERATION (*V_JITOperation_EOZJ)(ExecState*, JSObject*, int32_t, EncodedJSValue);
typedef void JIT_OPERATION (*V_JITOperation_EPc)(ExecState*, Instruction*);
typedef void JIT_OPERATION (*V_JITOperation_EPZJ)(ExecState*, void*, int32_t, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, UniquedStringImpl*);
-typedef void JIT_OPERATION (*V_JITOperation_EWs)(ExecState*, WatchpointSet*);
+typedef void JIT_OPERATION (*V_JITOperation_ESsiJJI)(ExecState*, StructureStubInfo*, EncodedJSValue, EncodedJSValue, StringImpl*);
+typedef void JIT_OPERATION (*V_JITOperation_EVws)(ExecState*, VariableWatchpointSet*);
typedef void JIT_OPERATION (*V_JITOperation_EZ)(ExecState*, int32_t);
-typedef void JIT_OPERATION (*V_JITOperation_EZJ)(ExecState*, int32_t, EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_EZJZZZ)(ExecState*, int32_t, EncodedJSValue, int32_t, int32_t, int32_t);
typedef void JIT_OPERATION (*V_JITOperation_EVm)(ExecState*, VM*);
-typedef void JIT_OPERATION (*V_JITOperation_J)(EncodedJSValue);
-typedef void JIT_OPERATION (*V_JITOperation_Z)(int32_t);
-typedef void JIT_OPERATION (*V_JITOperation_ECRUiUi)(ExecState*, JSCell*, Register*, uint32_t, uint32_t);
typedef char* JIT_OPERATION (*P_JITOperation_E)(ExecState*);
typedef char* JIT_OPERATION (*P_JITOperation_EC)(ExecState*, JSCell*);
-typedef char* JIT_OPERATION (*P_JITOperation_ECli)(ExecState*, CallLinkInfo*);
typedef char* JIT_OPERATION (*P_JITOperation_EJS)(ExecState*, EncodedJSValue, size_t);
typedef char* JIT_OPERATION (*P_JITOperation_EO)(ExecState*, JSObject*);
typedef char* JIT_OPERATION (*P_JITOperation_EOS)(ExecState*, JSObject*, size_t);
@@ -250,55 +174,51 @@ typedef char* JIT_OPERATION (*P_JITOperation_EStJ)(ExecState*, Structure*, Encod
typedef char* JIT_OPERATION (*P_JITOperation_EStPS)(ExecState*, Structure*, void*, size_t);
typedef char* JIT_OPERATION (*P_JITOperation_EStSS)(ExecState*, Structure*, size_t, size_t);
typedef char* JIT_OPERATION (*P_JITOperation_EStZ)(ExecState*, Structure*, int32_t);
+typedef char* JIT_OPERATION (*P_JITOperation_EZ)(ExecState*, int32_t);
typedef char* JIT_OPERATION (*P_JITOperation_EZZ)(ExecState*, int32_t, int32_t);
-typedef SlowPathReturnType JIT_OPERATION (*Sprt_JITOperation_ECli)(ExecState*, CallLinkInfo*);
-typedef StringImpl* JIT_OPERATION (*T_JITOperation_EJss)(ExecState*, JSString*);
+typedef StringImpl* JIT_OPERATION (*I_JITOperation_EJss)(ExecState*, JSString*);
typedef JSString* JIT_OPERATION (*Jss_JITOperation_EZ)(ExecState*, int32_t);
-typedef JSString* JIT_OPERATION (*Jss_JITOperation_EJJJ)(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue);
// This method is used to lookup an exception hander, keyed by faultLocation, which is
// the return location from one of the calls out to one of the helper operations above.
-
-void JIT_OPERATION lookupExceptionHandler(VM*, ExecState*) WTF_INTERNAL;
-void JIT_OPERATION lookupExceptionHandlerFromCallerFrame(VM*, ExecState*) WTF_INTERNAL;
+
+void JIT_OPERATION lookupExceptionHandler(ExecState*) WTF_INTERNAL;
void JIT_OPERATION operationVMHandleException(ExecState*) WTF_INTERNAL;
-void JIT_OPERATION operationThrowStackOverflowError(ExecState*, CodeBlock*) WTF_INTERNAL;
-#if ENABLE(WEBASSEMBLY)
-void JIT_OPERATION operationThrowDivideError(ExecState*) WTF_INTERNAL;
-void JIT_OPERATION operationThrowOutOfBoundsAccessError(ExecState*) WTF_INTERNAL;
-#endif
+void JIT_OPERATION operationStackCheck(ExecState*, CodeBlock*) WTF_INTERNAL;
int32_t JIT_OPERATION operationCallArityCheck(ExecState*) WTF_INTERNAL;
int32_t JIT_OPERATION operationConstructArityCheck(ExecState*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByIdGeneric(ExecState*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, UniquedStringImpl*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, UniquedStringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetById(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdBuildList(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByIdOptimize(ExecState*, StructureStubInfo*, EncodedJSValue, StringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationInOptimize(ExecState*, StructureStubInfo*, JSCell*, StringImpl*);
+EncodedJSValue JIT_OPERATION operationIn(ExecState*, StructureStubInfo*, JSCell*, StringImpl*);
+EncodedJSValue JIT_OPERATION operationGenericIn(ExecState*, JSCell*, EncodedJSValue);
+EncodedJSValue JIT_OPERATION operationCallCustomGetter(ExecState*, JSCell*, PropertySlot::GetValueFunc, StringImpl*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationCallGetter(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrict(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrictOptimize(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
+void JIT_OPERATION operationPutByIdDirectNonStrictBuildList(ExecState*, StructureStubInfo*, EncodedJSValue encodedValue, EncodedJSValue encodedBase, StringImpl*) WTF_INTERNAL;
void JIT_OPERATION operationReallocateStorageAndFinishPut(ExecState*, JSObject*, Structure*, PropertyOffset, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationPutByValOptimize(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
-void JIT_OPERATION operationDirectPutByValOptimize(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
-void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
-void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue, ByValInfo*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationCallEval(ExecState*, ExecState*) WTF_INTERNAL;
-SlowPathReturnType JIT_OPERATION operationLinkCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
-SlowPathReturnType JIT_OPERATION operationLinkPolymorphicCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
-SlowPathReturnType JIT_OPERATION operationVirtualCall(ExecState*, CallLinkInfo*) WTF_INTERNAL;
-
+void JIT_OPERATION operationPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationDirectPutByVal(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationDirectPutByValGeneric(ExecState*, EncodedJSValue, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationCallEval(ExecState*) WTF_INTERNAL;
+char* JIT_OPERATION operationVirtualCall(ExecState*) WTF_INTERNAL;
+char* JIT_OPERATION operationLinkCall(ExecState*) WTF_INTERNAL;
+char* JIT_OPERATION operationLinkClosureCall(ExecState*) WTF_INTERNAL;
+char* JIT_OPERATION operationVirtualConstruct(ExecState*) WTF_INTERNAL;
+char* JIT_OPERATION operationLinkConstruct(ExecState*) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareLess(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareLessEq(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
size_t JIT_OPERATION operationCompareGreater(ExecState*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
@@ -314,48 +234,47 @@ size_t JIT_OPERATION operationHasProperty(ExecState*, JSObject*, JSString*) WTF_
EncodedJSValue JIT_OPERATION operationNewArrayWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationNewArrayBufferWithProfile(ExecState*, ArrayAllocationProfile*, const JSValue* values, int32_t size) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationNewArrayWithSizeAndProfile(ExecState*, ArrayAllocationProfile*, EncodedJSValue size) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationNewFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationNewGeneratorFunction(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationNewGeneratorFunctionWithInvalidatedReallocationWatchpoint(ExecState*, JSScope*, JSCell*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationNewFunction(ExecState*, JSCell*) WTF_INTERNAL;
JSCell* JIT_OPERATION operationNewObject(ExecState*, Structure*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationNewRegexp(ExecState*, void*) WTF_INTERNAL;
-UnusedPtr JIT_OPERATION operationHandleWatchdogTimer(ExecState*) WTF_INTERNAL;
+void JIT_OPERATION operationHandleWatchdogTimer(ExecState*) WTF_INTERNAL;
void JIT_OPERATION operationThrowStaticError(ExecState*, EncodedJSValue, int32_t) WTF_INTERNAL;
void JIT_OPERATION operationThrow(ExecState*, EncodedJSValue) WTF_INTERNAL;
void JIT_OPERATION operationDebug(ExecState*, int32_t) WTF_INTERNAL;
#if ENABLE(DFG_JIT)
-SlowPathReturnType JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL;
+char* JIT_OPERATION operationOptimize(ExecState*, int32_t) WTF_INTERNAL;
#endif
void JIT_OPERATION operationPutByIndex(ExecState*, EncodedJSValue, int32_t, EncodedJSValue);
-void JIT_OPERATION operationPutGetterById(ExecState*, JSCell*, UniquedStringImpl*, int32_t options, JSCell*) WTF_INTERNAL;
-void JIT_OPERATION operationPutSetterById(ExecState*, JSCell*, UniquedStringImpl*, int32_t options, JSCell*) WTF_INTERNAL;
-void JIT_OPERATION operationPutGetterByVal(ExecState*, JSCell*, EncodedJSValue, int32_t attribute, JSCell*) WTF_INTERNAL;
-void JIT_OPERATION operationPutSetterByVal(ExecState*, JSCell*, EncodedJSValue, int32_t attribute, JSCell*) WTF_INTERNAL;
#if USE(JSVALUE64)
-void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, UniquedStringImpl*, int32_t attribute, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPutGetterSetter(ExecState*, EncodedJSValue, Identifier*, EncodedJSValue, EncodedJSValue) WTF_INTERNAL;
#else
-void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, UniquedStringImpl*, int32_t attribute, JSCell*, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationPutGetterSetter(ExecState*, JSCell*, Identifier*, JSCell*, JSCell*) WTF_INTERNAL;
#endif
-void JIT_OPERATION operationPushFunctionNameScope(ExecState*, int32_t, SymbolTable*, EncodedJSValue) WTF_INTERNAL;
-void JIT_OPERATION operationPopScope(ExecState*, int32_t) WTF_INTERNAL;
+void JIT_OPERATION operationPushNameScope(ExecState*, Identifier*, EncodedJSValue, int32_t) WTF_INTERNAL;
+void JIT_OPERATION operationPushWithScope(ExecState*, EncodedJSValue) WTF_INTERNAL;
+void JIT_OPERATION operationPopScope(ExecState*) WTF_INTERNAL;
void JIT_OPERATION operationProfileDidCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
void JIT_OPERATION operationProfileWillCall(ExecState*, EncodedJSValue) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByValOptimize(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationGetByValString(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationHasIndexedPropertyDefault(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
-EncodedJSValue JIT_OPERATION operationHasIndexedPropertyGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript, ByValInfo*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationCheckHasInstance(ExecState*, EncodedJSValue, EncodedJSValue baseVal) WTF_INTERNAL;
+JSCell* JIT_OPERATION operationCreateActivation(ExecState*, int32_t offset) WTF_INTERNAL;
+JSCell* JIT_OPERATION operationCreateArguments(ExecState*) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetArgumentsLength(ExecState*, int32_t) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValDefault(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValGeneric(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationGetByValString(ExecState*, EncodedJSValue encodedBase, EncodedJSValue encodedSubscript) WTF_INTERNAL;
+void JIT_OPERATION operationTearOffActivation(ExecState*, JSCell*) WTF_INTERNAL;
+void JIT_OPERATION operationTearOffArguments(ExecState*, JSCell*, JSCell*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationDeleteById(ExecState*, EncodedJSValue base, const Identifier*) WTF_INTERNAL;
JSCell* JIT_OPERATION operationGetPNames(ExecState*, JSObject*) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationInstanceOf(ExecState*, EncodedJSValue, EncodedJSValue proto) WTF_INTERNAL;
-int32_t JIT_OPERATION operationSizeFrameForVarargs(ExecState*, EncodedJSValue arguments, int32_t numUsedStackSlots, int32_t firstVarArgOffset) WTF_INTERNAL;
-CallFrame* JIT_OPERATION operationSetupVarargsFrame(ExecState*, CallFrame*, EncodedJSValue arguments, int32_t firstVarArgOffset, int32_t length) WTF_INTERNAL;
+CallFrame* JIT_OPERATION operationSizeAndAllocFrameForVarargs(ExecState*, EncodedJSValue arguments, int32_t firstFreeRegister) WTF_INTERNAL;
+CallFrame* JIT_OPERATION operationLoadVarargs(ExecState*, CallFrame*, EncodedJSValue thisValue, EncodedJSValue arguments) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationToObject(ExecState*, EncodedJSValue) WTF_INTERNAL;
char* JIT_OPERATION operationSwitchCharWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
char* JIT_OPERATION operationSwitchImmWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
char* JIT_OPERATION operationSwitchStringWithUnknownKeyType(ExecState*, EncodedJSValue key, size_t tableIndex) WTF_INTERNAL;
+EncodedJSValue JIT_OPERATION operationResolveScope(ExecState*, int32_t identifierIndex) WTF_INTERNAL;
EncodedJSValue JIT_OPERATION operationGetFromScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;
void JIT_OPERATION operationPutToScope(ExecState*, Instruction* bytecodePC) WTF_INTERNAL;
@@ -366,27 +285,10 @@ void JIT_OPERATION operationOSRWriteBarrier(ExecState*, JSCell*);
void JIT_OPERATION operationInitGlobalConst(ExecState*, Instruction*);
-void JIT_OPERATION operationExceptionFuzz(ExecState*);
-
-int32_t JIT_OPERATION operationCheckIfExceptionIsUncatchableAndNotifyProfiler(ExecState*);
-int32_t JIT_OPERATION operationInstanceOfCustom(ExecState*, EncodedJSValue encodedValue, JSObject* constructor, EncodedJSValue encodedHasInstance) WTF_INTERNAL;
-
-EncodedJSValue JIT_OPERATION operationHasGenericProperty(ExecState*, EncodedJSValue, JSCell*);
-EncodedJSValue JIT_OPERATION operationHasIndexedProperty(ExecState*, JSCell*, int32_t);
-JSCell* JIT_OPERATION operationGetPropertyEnumerator(ExecState*, JSCell*);
-EncodedJSValue JIT_OPERATION operationNextEnumeratorPname(ExecState*, JSCell*, int32_t);
-JSCell* JIT_OPERATION operationToIndexString(ExecState*, int32_t);
-
-void JIT_OPERATION operationProcessTypeProfilerLog(ExecState*) WTF_INTERNAL;
-
} // extern "C"
} // namespace JSC
-#else // ENABLE(JIT)
-
-#define JIT_OPERATION
-
#endif // ENABLE(JIT)
#endif // JITOperations_h
diff --git a/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp b/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp
deleted file mode 100644
index 544bca394..000000000
--- a/Source/JavaScriptCore/jit/JITOperationsMSVC64.cpp
+++ /dev/null
@@ -1,46 +0,0 @@
-/*
- * Copyright (C) 2013 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-
-#if !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64)
-
-#include "CallFrame.h"
-#include "JSCJSValue.h"
-#include "JSCInlines.h"
-
-namespace JSC {
-
-// FIXME: The following is a workaround that is only needed because JITStubsMSVC64.asm
-// is built unconditionally even when the JIT is disable, and it references this function.
-// We only need to provide a stub to satisfy the linkage. It will never be called.
-extern "C" EncodedJSValue getHostCallReturnValueWithExecState(ExecState*)
-{
- return JSValue::encode(JSValue());
-}
-
-} // namespace JSC
-
-#endif // !ENABLE(JIT) && COMPILER(MSVC) && CPU(X86_64)
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
index 3781c1df2..4241baf32 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,20 +29,18 @@
#include "JIT.h"
#include "CodeBlock.h"
-#include "DirectArguments.h"
#include "GCAwareJITStubRoutine.h"
#include "GetterSetter.h"
#include "Interpreter.h"
#include "JITInlines.h"
#include "JSArray.h"
-#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "JSVariableObject.h"
#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
-#include "ScopedArguments.h"
-#include "ScopedArgumentsTable.h"
-#include "SlowPathCall.h"
#include <wtf/StringPrintStream.h>
@@ -53,10 +51,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
JSInterfaceJIT jit(vm);
JumpList failures;
- failures.append(jit.branchStructure(
- NotEqual,
- Address(regT0, JSCell::structureIDOffset()),
- vm->stringStructure.get()));
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
// Load string length to regT2, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT2);
@@ -88,7 +83,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
@@ -98,24 +93,21 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_codeBlock->addByValInfo();
-
+
emitGetVirtualRegisters(base, regT0, property, regT1);
-
- emitJumpSlowCaseIfNotJSCell(regT0, base);
-
- PatchableJump notIndex = emitPatchableJumpIfNotInt(regT1);
- addSlowCase(notIndex);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
// This is technically incorrect - we're zero-extending an int32. On the hot path this doesn't matter.
// We check the value as if it was a uint32 against the m_vectorLength - which will always fail if
// number was signed since m_vectorLength is always less than intmax (since the total allocation
- // size is always less than 4Gb). As such zero extending will have been correct (and extending the value
- // to 64-bits is necessary since it's used in the address calculation). We zero extend rather than sign
+ // size is always less than 4Gb). As such zero extending wil have been correct (and extending the value
+ // to 64-bits is necessary since it's used in the address calculation. We zero extend rather than sign
// extending since it makes it easier to re-tag the value in the slow case.
zeroExtend32ToPtr(regT1, regT1);
- emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ emitArrayProfilingSite(regT2, regT3, profile);
and32(TrustedImm32(IndexingShapeMask), regT2);
PatchableJump badType;
@@ -145,41 +137,39 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
- if (!ASSERT_DISABLED) {
- Jump resultOK = branchTest64(NonZero, regT0);
- abortWithReason(JITGetByValResultIsNotEmpty);
- resultOK.link(this);
- }
+#if !ASSERT_DISABLED
+ Jump resultOK = branchTest64(NonZero, regT0);
+ breakpoint();
+ resultOK.link(this);
+#endif
emitValueProfilingSite();
emitPutVirtualRegister(dst);
-
- Label nextHotPath = label();
-
- m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
+
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}
-JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(DoubleShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
loadDouble(BaseIndex(regT2, regT1, TimesEight), fpRegT0);
slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ moveDoubleTo64(fpRegT0, regT0);
+ sub64(tagTypeNumberRegister, regT0);
return slowCases;
}
-JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(expectedShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength())));
load64(BaseIndex(regT2, regT1, TimesEight), regT0);
slowCases.append(branchTest64(Zero, regT0));
@@ -187,7 +177,7 @@ JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, Inde
return slowCases;
}
-JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
@@ -195,7 +185,6 @@ JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
load64(BaseIndex(regT2, regT1, TimesEight, ArrayStorage::vectorOffset()), regT0);
@@ -204,48 +193,18 @@ JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
return slowCases;
}
-JITGetByIdGenerator JIT::emitGetByValWithCachedId(Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
-{
- // base: regT0
- // property: regT1
- // scratch: regT3
-
- int dst = currentInstruction[1].u.operand;
-
- slowCases.append(emitJumpIfNotJSCell(regT1));
- emitIdentifierCheck(regT1, regT3, propertyName, slowCases);
-
- JITGetByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT0));
- gen.generateFastPath(*this);
-
- fastDoneCase = jump();
-
- Label coldPathBegin = label();
- gen.slowPathJump().link(this);
-
- Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT0, propertyName.impl());
- gen.reportSlowPathCall(coldPathBegin, call);
- slowDoneCase = jump();
-
- return gen;
-}
-
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchStructure(NotEqual,
- Address(regT0, JSCell::structureIDOffset()),
- m_vm->stringStructure.get());
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
emitNakedCall(CodeLocationLabel(m_vm->getCTIStub(stringGetByValStubGenerator).code()));
Jump failed = branchTest64(Zero, regT0);
emitPutVirtualRegister(dst, regT0);
@@ -254,15 +213,20 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
notString.link(this);
nonCell.link(this);
- linkSlowCase(iter); // read barrier
+ Jump skipProfiling = jump();
+
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+
+ skipProfiling.link(this);
+
Label slowPath = label();
emitGetVirtualRegister(base, regT0);
emitGetVirtualRegister(property, regT1);
- Call call = callOperation(operationGetByValOptimize, dst, regT0, regT1, byValInfo);
+ Call call = callOperation(operationGetByValDefault, dst, regT0, regT1);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -271,20 +235,89 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
emitValueProfilingSite();
}
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode)
+{
+ ASSERT(sizeof(JSValue) == 8);
+
+ if (finalObjectMode == MayBeFinal) {
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
+ loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+ neg32(offset);
+ Jump done = jump();
+ isInline.link(this);
+ addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base, scratch);
+ done.link(this);
+ } else {
+#if !ASSERT_DISABLED
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+ breakpoint();
+ isOutOfLine.link(this);
+#endif
+ loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
+ neg32(offset);
+ }
+ signExtend32ToPtr(offset, offset);
+ load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ int iter = currentInstruction[5].u.operand;
+ int i = currentInstruction[6].u.operand;
+
+ emitGetVirtualRegister(property, regT0);
+ addSlowCase(branch64(NotEqual, regT0, addressFor(expected)));
+ emitGetVirtualRegisters(base, regT0, iter, regT1);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+
+ // Test base's structure
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ addSlowCase(branchPtr(NotEqual, regT2, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(TrustedImm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
+ add32(TrustedImm32(firstOutOfLineOffset), regT3);
+ sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
+ inlineProperty.link(this);
+ compileGetDirectOffset(regT0, regT0, regT3, regT1);
+
+ emitPutVirtualRegister(dst, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ emitGetVirtualRegister(base, regT0);
+ emitGetVirtualRegister(property, regT1);
+ callOperation(operationGetByValGeneric, dst, regT0, regT1);
+}
+
void JIT::emit_op_put_by_val(Instruction* currentInstruction)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitGetVirtualRegisters(base, regT0, property, regT1);
- emitJumpSlowCaseIfNotJSCell(regT0, base);
- PatchableJump notIndex = emitPatchableJumpIfNotInt(regT1);
- addSlowCase(notIndex);
+ emitJumpSlowCaseIfNotImmediateInteger(regT1);
// See comment in op_get_by_val.
zeroExtend32ToPtr(regT1, regT1);
- emitArrayProfilingSiteWithCell(regT0, regT2, profile);
+ emitJumpSlowCaseIfNotJSCell(regT0, base);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ emitArrayProfilingSite(regT2, regT3, profile);
and32(TrustedImm32(IndexingShapeMask), regT2);
PatchableJump badType;
@@ -314,7 +347,8 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Label done = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
+
}
JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
@@ -327,18 +361,17 @@ JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(indexingShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- slowCases.append(branchIfNotToSpace(regT2));
Jump outOfBounds = branch32(AboveOrEqual, regT1, Address(regT2, Butterfly::offsetOfPublicLength()));
Label storeResult = label();
emitGetVirtualRegister(value, regT3);
switch (indexingShape) {
case Int32Shape:
- slowCases.append(emitJumpIfNotInt(regT3));
+ slowCases.append(emitJumpIfNotImmediateInteger(regT3));
store64(regT3, BaseIndex(regT2, regT1, TimesEight));
break;
case DoubleShape: {
- Jump notInt = emitJumpIfNotInt(regT3);
+ Jump notInt = emitJumpIfNotImmediateInteger(regT3);
convertInt32ToDouble(regT3, fpRegT0);
Jump ready = jump();
notInt.link(this);
@@ -383,7 +416,6 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
badType = patchableBranch32(NotEqual, regT2, TrustedImm32(ArrayStorageShape));
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT2);
- slowCases.append(branchIfNotToSpace(regT2));
slowCases.append(branch32(AboveOrEqual, regT1, Address(regT2, ArrayStorage::vectorLengthOffset())));
Jump empty = branchTest64(Zero, BaseIndex(regT2, regT1, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0])));
@@ -409,54 +441,17 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
return slowCases;
}
-JITPutByIdGenerator JIT::emitPutByValWithCachedId(Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
-{
- // base: regT0
- // property: regT1
- // scratch: regT2
-
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[3].u.operand;
-
- slowCases.append(emitJumpIfNotJSCell(regT1));
- emitIdentifierCheck(regT1, regT1, propertyName, slowCases);
-
- // Write barrier breaks the registers. So after issuing the write barrier,
- // reload the registers.
- emitWriteBarrier(base, value, ShouldFilterValue);
- emitGetVirtualRegisters(base, regT0, value, regT1);
-
- JITPutByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(), putKind);
- gen.generateFastPath(*this);
- doneCases.append(jump());
-
- Label coldPathBegin = label();
- gen.slowPathJump().link(this);
-
- Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT1, regT0, propertyName.impl());
- gen.reportSlowPathCall(coldPathBegin, call);
- doneCases.append(jump());
-
- return gen;
-}
-
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
- linkSlowCase(iter); // read barrier
- linkSlowCase(iter); // out of bounds
-
JITArrayMode mode = chooseArrayMode(profile);
switch (mode) {
case JITInt32:
@@ -467,13 +462,17 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
break;
}
+ Jump skipProfiling = jump();
+ linkSlowCase(iter); // out of bounds
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+ skipProfiling.link(this);
+
Label slowPath = label();
- emitGetVirtualRegister(base, regT0);
emitGetVirtualRegister(property, regT1);
emitGetVirtualRegister(value, regT2);
bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
- Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT0, regT1, regT2, byValInfo);
+ Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT0, regT1, regT2);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -487,47 +486,12 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
callOperation(operationPutByIndex, regT0, currentInstruction[2].u.operand, regT1);
}
-void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- int32_t options = currentInstruction[3].u.operand;
- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
- callOperation(operationPutGetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1);
-}
-
-void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- int32_t options = currentInstruction[3].u.operand;
- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
- callOperation(operationPutSetterById, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), options, regT1);
-}
-
-void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- int32_t attribute = currentInstruction[3].u.operand;
- emitGetVirtualRegister(currentInstruction[4].u.operand, regT1);
- emitGetVirtualRegister(currentInstruction[5].u.operand, regT2);
- callOperation(operationPutGetterSetter, regT0, m_codeBlock->identifier(currentInstruction[2].u.operand).impl(), attribute, regT1, regT2);
-}
-
-void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
-{
- emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
- int32_t attributes = currentInstruction[3].u.operand;
- emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
- callOperation(operationPutGetterByVal, regT0, regT1, attributes, regT2);
-}
-
-void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
{
emitGetVirtualRegister(currentInstruction[1].u.operand, regT0);
- emitGetVirtualRegister(currentInstruction[2].u.operand, regT1);
- int32_t attributes = currentInstruction[3].u.operand;
+ emitGetVirtualRegister(currentInstruction[3].u.operand, regT1);
emitGetVirtualRegister(currentInstruction[4].u.operand, regT2);
- callOperation(operationPutSetterByVal, regT0, regT1, attributes, regT2);
+ callOperation(operationPutGetterSetter, regT0, &m_codeBlock->identifier(currentInstruction[2].u.operand), regT1, regT2);
}
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
@@ -549,12 +513,14 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
- if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
- emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT1, m_bytecodeOffset);
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ emitArrayProfilingSiteForBytecodeIndex(regT1, regT2, m_bytecodeOffset);
+ }
JITGetByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT0));
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
+ callFrameRegister, JSValueRegs(regT0), JSValueRegs(regT0), true);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
@@ -585,9 +551,9 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
{
int baseVReg = currentInstruction[1].u.operand;
int valueVReg = currentInstruction[3].u.operand;
- unsigned direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
+ unsigned direct = currentInstruction[8].u.operand;
- emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBase);
+ emitWriteBarrier(baseVReg, valueVReg, ShouldFilterBaseAndValue);
// In order to be able to patch both the Structure, and the object offset, we store one pointer,
// to just after the arguments have been loaded into registers 'hotPathBegin', and we generate code
@@ -595,12 +561,13 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
emitGetVirtualRegisters(baseVReg, regT0, valueVReg, regT1);
+ // Jump to a slow case if either the base object is an immediate, or if the Structure does not match.
emitJumpSlowCaseIfNotJSCell(regT0, baseVReg);
JITPutByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(m_bytecodeOffset), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs(regT0), JSValueRegs(regT1), regT2, m_codeBlock->ecmaMode(),
- direct ? Direct : NotDirect);
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
+ callFrameRegister, JSValueRegs(regT0), JSValueRegs(regT1), regT2, true,
+ m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
@@ -626,6 +593,42 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
gen.reportSlowPathCall(coldPathBegin, call);
}
+// Compile a store into an object's property storage. May overwrite the
+// value in objectReg.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset)
+{
+ if (isInlineOffset(cachedOffset)) {
+ store64(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)));
+ return;
+ }
+
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ store64(value, Address(base, sizeof(JSValue) * offsetInButterfly(cachedOffset)));
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset)
+{
+ if (isInlineOffset(cachedOffset)) {
+ load64(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result);
+ return;
+ }
+
+ loadPtr(Address(base, JSObject::butterflyOffset()), result);
+ load64(Address(result, sizeof(JSValue) * offsetInButterfly(cachedOffset)), result);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset)
+{
+ if (isInlineOffset(cachedOffset)) {
+ load64(base->locationForOffset(cachedOffset), result);
+ return;
+ }
+
+ loadPtr(base->butterflyAddress(), result);
+ load64(Address(result, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result);
+}
+
void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
{
if (!needsVarInjectionChecks)
@@ -633,10 +636,16 @@ void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
}
-void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
+void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth)
{
emitVarInjectionCheck(needsVarInjectionChecks);
- emitGetVirtualRegister(scope, regT0);
+ emitGetVirtualRegister(JSStack::ScopeChain, regT0);
+ if (m_codeBlock->needsActivation()) {
+ emitGetVirtualRegister(m_codeBlock->activationRegister(), regT1);
+ Jump noActivation = branchTestPtr(Zero, regT1);
+ loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
+ noActivation.link(this);
+ }
for (unsigned i = 0; i < depth; ++i)
loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
emitPutVirtualRegister(dst);
@@ -645,213 +654,92 @@ void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, u
void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int scope = currentInstruction[2].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
- unsigned depth = currentInstruction[5].u.operand;
-
- auto emitCode = [&] (ResolveType resolveType) {
- switch (resolveType) {
- case GlobalProperty:
- case GlobalVar:
- case GlobalPropertyWithVarInjectionChecks:
- case GlobalVarWithVarInjectionChecks:
- case GlobalLexicalVar:
- case GlobalLexicalVarWithVarInjectionChecks: {
- JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
- RELEASE_ASSERT(constantScope);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- move(TrustedImmPtr(constantScope), regT0);
- emitPutVirtualRegister(dst);
- break;
- }
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
- break;
- case ModuleVar:
- move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
- emitPutVirtualRegister(dst);
- break;
- case Dynamic:
- addSlowCase(jump());
- break;
- case LocalClosureVar:
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks:
- RELEASE_ASSERT_NOT_REACHED();
- }
- };
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
+ unsigned depth = currentInstruction[4].u.operand;
switch (resolveType) {
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks: {
- JumpList skipToEnd;
- load32(&currentInstruction[4], regT0);
-
- Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
- emitCode(GlobalProperty);
- skipToEnd.append(jump());
- notGlobalProperty.link(this);
-
- Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
- emitCode(GlobalPropertyWithVarInjectionChecks);
- skipToEnd.append(jump());
- notGlobalPropertyWithVarInjections.link(this);
-
- Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
- emitCode(GlobalLexicalVar);
- skipToEnd.append(jump());
- notGlobalLexicalVar.link(this);
-
- Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
- emitCode(GlobalLexicalVarWithVarInjectionChecks);
- skipToEnd.append(jump());
- notGlobalLexicalVarWithVarInjections.link(this);
-
- addSlowCase(jump());
- skipToEnd.link(this);
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+ emitPutVirtualRegister(dst);
break;
- }
-
- default:
- emitCode(resolveType);
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
break;
}
}
void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
- if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar || resolveType == GlobalLexicalVar || resolveType == ModuleVar)
- return;
+ int dst = currentInstruction[1].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
- if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
- linkSlowCase(iter); // var injections check for GlobalPropertyWithVarInjectionChecks.
- linkSlowCase(iter); // var injections check for GlobalLexicalVarWithVarInjectionChecks.
- }
+ if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar)
+ return;
linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resolve_scope);
- slowPathCall.call();
+ int32_t indentifierIndex = currentInstruction[2].u.operand;
+ callOperation(operationResolveScope, dst, indentifierIndex);
}
void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
{
emitGetVirtualRegister(scope, regT0);
loadPtr(structureSlot, regT1);
- addSlowCase(branchTestPtr(Zero, regT1));
- load32(Address(regT1, Structure::structureIDOffset()), regT1);
- addSlowCase(branch32(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT1));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT1));
}
-void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg reg)
+void JIT::emitGetGlobalProperty(uintptr_t* operandSlot)
{
- loadPtr(operand, reg);
+ load32(operandSlot, regT1);
+ compileGetDirectOffset(regT0, regT0, regT1, regT2, KnownNotFinal);
}
-void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg reg)
+void JIT::emitGetGlobalVar(uintptr_t operand)
{
- loadPtr(operand, reg);
- loadPtr(reg, reg);
+ loadPtr(reinterpret_cast<void*>(operand), regT0);
}
void JIT::emitGetClosureVar(int scope, uintptr_t operand)
{
emitGetVirtualRegister(scope, regT0);
- loadPtr(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)), regT0);
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+ loadPtr(Address(regT0, operand * sizeof(Register)), regT0);
}
void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int scope = currentInstruction[2].u.operand;
- ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
- auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
- switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks: {
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- GPRReg base = regT0;
- GPRReg result = regT0;
- GPRReg offset = regT1;
- GPRReg scratch = regT2;
-
- load32(operandSlot, offset);
- if (!ASSERT_DISABLED) {
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
- abortWithReason(JITOffsetIsNotOutOfLine);
- isOutOfLine.link(this);
- }
- loadPtr(Address(base, JSObject::butterflyOffset()), scratch);
- addSlowCase(branchIfNotToSpace(scratch));
- neg32(offset);
- signExtend32ToPtr(offset, offset);
- load64(BaseIndex(scratch, offset, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), result);
- break;
- }
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- case GlobalLexicalVar:
- case GlobalLexicalVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- if (indirectLoadForOperand)
- emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
- else
- emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
- if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
- addSlowCase(branchTest64(Zero, regT0));
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitGetClosureVar(scope, *operandSlot);
- break;
- case Dynamic:
- addSlowCase(jump());
- break;
- case LocalClosureVar:
- case ModuleVar:
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks:
- RELEASE_ASSERT_NOT_REACHED();
- }
- };
-
switch (resolveType) {
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks: {
- JumpList skipToEnd;
- load32(&currentInstruction[4], regT0);
- and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
-
- Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
- Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
- isGlobalProperty.link(this);
- emitCode(GlobalProperty, false);
- skipToEnd.append(jump());
- notGlobalPropertyWithVarInjections.link(this);
-
- Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
- emitCode(GlobalLexicalVar, true);
- skipToEnd.append(jump());
- notGlobalLexicalVar.link(this);
-
- Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
- emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
- skipToEnd.append(jump());
- notGlobalLexicalVarWithVarInjections.link(this);
-
- addSlowCase(jump());
-
- skipToEnd.link(this);
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks:
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitGetGlobalProperty(operandSlot);
break;
- }
-
- default:
- emitCode(resolveType, false);
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetGlobalVar(*operandSlot);
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetClosureVar(scope, *operandSlot);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
break;
}
emitPutVirtualRegister(dst);
@@ -861,332 +749,294 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
if (resolveType == GlobalVar || resolveType == ClosureVar)
return;
- if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
- linkSlowCase(iter); // bad structure
- linkSlowCase(iter); // read barrier
- }
-
- if (resolveType == GlobalLexicalVarWithVarInjectionChecks) // Var injections check.
- linkSlowCase(iter);
-
- if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
- // GlobalProperty/GlobalPropertyWithVarInjectionChecks
- linkSlowCase(iter); // emitLoadWithStructureCheck
- linkSlowCase(iter); // emitLoadWithStructureCheck
- linkSlowCase(iter); // read barrier
- // GlobalLexicalVar
- linkSlowCase(iter); // TDZ check.
- // GlobalLexicalVarWithVarInjectionChecks.
- linkSlowCase(iter); // var injection check.
- linkSlowCase(iter); // TDZ check.
- }
-
linkSlowCase(iter);
-
callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
}
-void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
+void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value)
{
- emitGetVirtualRegister(value, regT0);
- emitNotifyWrite(set);
- storePtr(regT0, operand);
+ emitGetVirtualRegister(value, regT2);
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ loadPtr(operandSlot, regT1);
+ negPtr(regT1);
+ storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
}
-void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
+
+void JIT::emitNotifyWrite(RegisterID value, RegisterID scratch, VariableWatchpointSet* set)
+{
+ if (!set || set->state() == IsInvalidated)
+ return;
+
+ load8(set->addressOfState(), scratch);
+
+ JumpList ready;
+
+ ready.append(branch32(Equal, scratch, TrustedImm32(IsInvalidated)));
+
+ if (set->state() == ClearWatchpoint) {
+ Jump isWatched = branch32(NotEqual, scratch, TrustedImm32(ClearWatchpoint));
+
+ store64(value, set->addressOfInferredValue());
+ store8(TrustedImm32(IsWatched), set->addressOfState());
+ ready.append(jump());
+
+ isWatched.link(this);
+ }
+
+ ready.append(branch64(Equal, AbsoluteAddress(set->addressOfInferredValue()), value));
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(set->addressOfSetIsNotEmpty())));
+ store8(TrustedImm32(IsInvalidated), set->addressOfState());
+ move(TrustedImm64(JSValue::encode(JSValue())), scratch);
+ store64(scratch, set->addressOfInferredValue());
+
+ ready.link(this);
+}
+
+void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set)
{
emitGetVirtualRegister(value, regT0);
- loadPtr(indirectWatchpointSet, regT1);
- emitNotifyWrite(regT1);
- loadPtr(addressOfOperand, regT1);
- storePtr(regT0, regT1);
+ emitNotifyWrite(regT0, regT1, set);
+ storePtr(regT0, reinterpret_cast<void*>(operand));
}
-void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
+void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value)
{
emitGetVirtualRegister(value, regT1);
emitGetVirtualRegister(scope, regT0);
- emitNotifyWrite(set);
- storePtr(regT1, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register)));
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+ storePtr(regT1, Address(regT0, operand * sizeof(Register)));
}
void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
{
int scope = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
- GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
- ResolveType resolveType = getPutInfo.resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
- auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
- switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks: {
- emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- emitGetVirtualRegister(value, regT2);
-
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- addSlowCase(branchIfNotToSpace(regT0));
- loadPtr(operandSlot, regT1);
- negPtr(regT1);
- storePtr(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)));
- break;
- }
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- case GlobalLexicalVar:
- case GlobalLexicalVarWithVarInjectionChecks: {
- JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
- RELEASE_ASSERT(constantScope);
- emitWriteBarrier(constantScope, value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
- // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
- if (indirectLoadForOperand)
- emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT0);
- else
- emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT0);
- addSlowCase(branchTest64(Zero, regT0));
- }
- if (indirectLoadForOperand)
- emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(&currentInstruction[5]));
- else
- emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
- break;
- }
- case LocalClosureVar:
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitWriteBarrier(scope, value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
- break;
- case ModuleVar:
- case Dynamic:
- addSlowCase(jump());
- break;
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks:
- RELEASE_ASSERT_NOT_REACHED();
- break;
- }
- };
-
switch (resolveType) {
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks: {
- JumpList skipToEnd;
- load32(&currentInstruction[4], regT0);
- and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
-
- Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
- Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
- isGlobalProperty.link(this);
- emitCode(GlobalProperty, false);
- skipToEnd.append(jump());
- notGlobalPropertyWithVarInjections.link(this);
-
- Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
- emitCode(GlobalLexicalVar, true);
- skipToEnd.append(jump());
- notGlobalLexicalVar.link(this);
-
- Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
- emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
- skipToEnd.append(jump());
- notGlobalLexicalVarWithVarInjections.link(this);
-
- addSlowCase(jump());
-
- skipToEnd.link(this);
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks:
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitPutGlobalProperty(operandSlot, value);
break;
- }
-
- default:
- emitCode(resolveType, false);
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet);
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitWriteBarrier(scope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutClosureVar(scope, *operandSlot, value);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
break;
}
}
void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
- ResolveType resolveType = getPutInfo.resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
unsigned linkCount = 0;
- if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar && resolveType != GlobalLexicalVar)
+ if (resolveType != GlobalVar && resolveType != ClosureVar)
linkCount++;
- if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks
- || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks
- || resolveType == LocalClosureVar)
+ if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
&& currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
linkCount++;
- if (resolveType == GlobalProperty || resolveType == GlobalPropertyWithVarInjectionChecks) {
- linkCount++; // bad structure
- linkCount++; // read barrier
- }
- if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) // TDZ check.
- linkCount++;
- if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
- // GlobalProperty/GlobalPropertyWithVarInjectionsCheck
- linkCount++; // emitLoadWithStructureCheck
- linkCount++; // emitLoadWithStructureCheck
- linkCount++; // read barrier
-
- // GlobalLexicalVar
- bool needsTDZCheck = getPutInfo.initializationMode() != Initialization;
- if (needsTDZCheck)
- linkCount++;
- linkCount++; // Notify write check.
-
- // GlobalLexicalVarWithVarInjectionsCheck
- linkCount++; // var injection check.
- if (needsTDZCheck)
- linkCount++;
- linkCount++; // Notify write check.
- }
if (!linkCount)
return;
while (linkCount--)
linkSlowCase(iter);
-
- if (resolveType == ModuleVar) {
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
- slowPathCall.call();
- } else
- callOperation(operationPutToScope, currentInstruction);
+ callOperation(operationPutToScope, currentInstruction);
}
-void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
- int arguments = currentInstruction[2].u.operand;
- int index = currentInstruction[3].u.operand;
-
- emitGetVirtualRegister(arguments, regT0);
- load64(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)), regT0);
- emitValueProfilingSite();
- emitPutVirtualRegister(dst);
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+ emitWriteBarrier(globalObject, currentInstruction[2].u.operand, ShouldFilterValue);
+ emitGetVirtualRegister(currentInstruction[2].u.operand, regT0);
+ store64(regT0, currentInstruction[1].u.registerPointer);
}
-void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
+#endif // USE(JSVALUE64)
+
+JIT::Jump JIT::checkMarkWord(RegisterID owner, RegisterID scratch1, RegisterID scratch2)
{
- int arguments = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitWriteBarrier(arguments, value, ShouldFilterValue);
-
- emitGetVirtualRegister(arguments, regT0);
- emitGetVirtualRegister(value, regT1);
- store64(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>)));
+ move(owner, scratch1);
+ move(owner, scratch2);
+
+ andPtr(TrustedImmPtr(MarkedBlock::blockMask), scratch1);
+ andPtr(TrustedImmPtr(~MarkedBlock::blockMask), scratch2);
+
+ rshift32(TrustedImm32(3 + 4), scratch2);
+
+ return branchTest8(Zero, BaseIndex(scratch1, scratch2, TimesOne, MarkedBlock::offsetOfMarks()));
}
-#endif // USE(JSVALUE64)
+JIT::Jump JIT::checkMarkWord(JSCell* owner)
+{
+ MarkedBlock* block = MarkedBlock::blockFor(owner);
+ size_t index = (reinterpret_cast<size_t>(owner) & ~MarkedBlock::blockMask) >> (3 + 4);
+ void* address = (reinterpret_cast<char*>(block) + MarkedBlock::offsetOfMarks()) + index;
+
+ return branchTest8(Zero, AbsoluteAddress(address));
+}
#if USE(JSVALUE64)
void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
{
+#if ENABLE(GGC)
+ emitGetVirtualRegister(value, regT0);
Jump valueNotCell;
- if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
- emitGetVirtualRegister(value, regT0);
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
- }
emitGetVirtualRegister(owner, regT0);
Jump ownerNotCell;
- if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
+ if (mode == ShouldFilterBaseAndValue)
ownerNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
- Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT0);
+ Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2);
callOperation(operationUnconditionalWriteBarrier, regT0);
- ownerIsRememberedOrInEden.link(this);
+ ownerNotMarked.link(this);
- if (mode == ShouldFilterBaseAndValue || mode == ShouldFilterBase)
+ if (mode == ShouldFilterBaseAndValue)
ownerNotCell.link(this);
if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
valueNotCell.link(this);
+#else
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+#endif
}
void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
{
+#if ENABLE(GGC)
emitGetVirtualRegister(value, regT0);
Jump valueNotCell;
if (mode == ShouldFilterValue)
valueNotCell = branchTest64(NonZero, regT0, tagMaskRegister);
- emitWriteBarrier(owner);
+ if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
+ Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2);
+ callOperation(operationUnconditionalWriteBarrier, owner);
+ ownerNotMarked.link(this);
+ } else
+ callOperation(operationUnconditionalWriteBarrier, owner);
if (mode == ShouldFilterValue)
valueNotCell.link(this);
+#else
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+#endif
}
#else // USE(JSVALUE64)
void JIT::emitWriteBarrier(unsigned owner, unsigned value, WriteBarrierMode mode)
{
+#if ENABLE(GGC)
+ emitLoadTag(value, regT0);
Jump valueNotCell;
- if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue) {
- emitLoadTag(value, regT0);
+ if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
- }
emitLoad(owner, regT0, regT1);
Jump ownerNotCell;
- if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterBaseAndValue)
ownerNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
- Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(regT1);
+ Jump ownerNotMarked = checkMarkWord(regT1, regT0, regT2);
callOperation(operationUnconditionalWriteBarrier, regT1);
- ownerIsRememberedOrInEden.link(this);
+ ownerNotMarked.link(this);
- if (mode == ShouldFilterBase || mode == ShouldFilterBaseAndValue)
+ if (mode == ShouldFilterBaseAndValue)
ownerNotCell.link(this);
if (mode == ShouldFilterValue || mode == ShouldFilterBaseAndValue)
valueNotCell.link(this);
+#else
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+#endif
}
void JIT::emitWriteBarrier(JSCell* owner, unsigned value, WriteBarrierMode mode)
{
+#if ENABLE(GGC)
+ emitLoadTag(value, regT0);
Jump valueNotCell;
- if (mode == ShouldFilterValue) {
- emitLoadTag(value, regT0);
+ if (mode == ShouldFilterValue)
valueNotCell = branch32(NotEqual, regT0, TrustedImm32(JSValue::CellTag));
- }
- emitWriteBarrier(owner);
+ if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
+ Jump ownerNotMarked = checkMarkWord(regT0, regT1, regT2);
+ callOperation(operationUnconditionalWriteBarrier, owner);
+ ownerNotMarked.link(this);
+ } else
+ callOperation(operationUnconditionalWriteBarrier, owner);
if (mode == ShouldFilterValue)
valueNotCell.link(this);
+#else
+ UNUSED_PARAM(owner);
+ UNUSED_PARAM(value);
+ UNUSED_PARAM(mode);
+#endif
}
#endif // USE(JSVALUE64)
-void JIT::emitWriteBarrier(JSCell* owner)
+JIT::Jump JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, RegisterID scratch)
{
- if (!MarkedBlock::blockFor(owner)->isMarked(owner)) {
- Jump ownerIsRememberedOrInEden = jumpIfIsRememberedOrInEden(owner);
- callOperation(operationUnconditionalWriteBarrier, owner);
- ownerIsRememberedOrInEden.link(this);
- } else
- callOperation(operationUnconditionalWriteBarrier, owner);
+ if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
+ structure->addTransitionWatchpoint(stubInfo->addWatchpoint(m_codeBlock));
+#if !ASSERT_DISABLED
+ move(TrustedImmPtr(object), scratch);
+ Jump ok = branchPtr(Equal, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
+ breakpoint();
+ ok.link(this);
+#endif
+ Jump result; // Returning an unset jump this way because otherwise VC++ would complain.
+ return result;
+ }
+
+ move(TrustedImmPtr(object), scratch);
+ return branchPtr(NotEqual, Address(scratch, JSCell::structureOffset()), TrustedImmPtr(structure));
}
-void JIT::emitIdentifierCheck(RegisterID cell, RegisterID scratch, const Identifier& propertyName, JumpList& slowCases)
+void JIT::addStructureTransitionCheck(JSCell* object, Structure* structure, StructureStubInfo* stubInfo, JumpList& failureCases, RegisterID scratch)
{
- if (propertyName.isSymbol()) {
- slowCases.append(branchStructure(NotEqual, Address(cell, JSCell::structureIDOffset()), m_vm->symbolStructure.get()));
- loadPtr(Address(cell, Symbol::offsetOfPrivateName()), scratch);
- } else {
- slowCases.append(branchStructure(NotEqual, Address(cell, JSCell::structureIDOffset()), m_vm->stringStructure.get()));
- loadPtr(Address(cell, JSString::offsetOfValue()), scratch);
- }
- slowCases.append(branchPtr(NotEqual, scratch, TrustedImmPtr(propertyName.impl())));
+ Jump failureCase = addStructureTransitionCheck(object, structure, stubInfo, scratch);
+ if (!failureCase.isSet())
+ return;
+
+ failureCases.append(failureCase);
+}
+
+void JIT::testPrototype(JSValue prototype, JumpList& failureCases, StructureStubInfo* stubInfo)
+{
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+ addStructureTransitionCheck(prototype.asCell(), prototype.asCell()->structure(), stubInfo, failureCases, regT3);
}
void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -1209,12 +1059,6 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
case JITArrayStorage:
slowCases = emitArrayStorageGetByVal(currentInstruction, badType);
break;
- case JITDirectArguments:
- slowCases = emitDirectArgumentsGetByVal(currentInstruction, badType);
- break;
- case JITScopedArguments:
- slowCases = emitScopedArgumentsGetByVal(currentInstruction, badType);
- break;
default:
TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
if (isInt(type))
@@ -1226,7 +1070,7 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
@@ -1234,42 +1078,12 @@ void JIT::privateCompileGetByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ patchBuffer,
("Baseline get_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
- MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
-}
-
-void JIT::privateCompileGetByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, const Identifier& propertyName)
-{
- Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
-
- Jump fastDoneCase;
- Jump slowDoneCase;
- JumpList slowCases;
-
- JITGetByIdGenerator gen = emitGetByValWithCachedId(currentInstruction, propertyName, fastDoneCase, slowDoneCase, slowCases);
-
- ConcurrentJITLocker locker(m_codeBlock->m_lock);
- LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
- patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
- patchBuffer.link(fastDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
- patchBuffer.link(slowDoneCase, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToNextHotPath));
-
- for (const auto& callSite : m_calls) {
- if (callSite.to)
- patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
- }
- gen.finalize(patchBuffer);
-
- byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
- ("Baseline get_by_val with cached property name '%s' stub for %s, return point %p", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
- byValInfo->stubInfo = gen.stubInfo();
-
- MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(operationGetByValGeneric));
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(operationGetByValGeneric));
}
void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode)
@@ -1279,7 +1093,9 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
PatchableJump badType;
JumpList slowCases;
+#if ENABLE(GGC)
bool needsLinkForWriteBarrier = false;
+#endif
switch (arrayMode) {
case JITInt32:
@@ -1290,11 +1106,15 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
break;
case JITContiguous:
slowCases = emitContiguousPutByVal(currentInstruction, badType);
+#if ENABLE(GGC)
needsLinkForWriteBarrier = true;
+#endif
break;
case JITArrayStorage:
slowCases = emitArrayStoragePutByVal(currentInstruction, badType);
+#if ENABLE(GGC)
needsLinkForWriteBarrier = true;
+#endif
break;
default:
TypedArrayType type = typedArrayTypeForJITArrayMode(arrayMode);
@@ -1307,126 +1127,31 @@ void JIT::privateCompilePutByVal(ByValInfo* byValInfo, ReturnAddressPtr returnAd
Jump done = jump();
- LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
+ LinkBuffer patchBuffer(*m_vm, this, m_codeBlock);
patchBuffer.link(badType, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
patchBuffer.link(done, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
+#if ENABLE(GGC)
if (needsLinkForWriteBarrier) {
ASSERT(m_calls.last().to == operationUnconditionalWriteBarrier);
patchBuffer.link(m_calls.last().from, operationUnconditionalWriteBarrier);
}
+#endif
bool isDirect = m_interpreter->getOpcodeID(currentInstruction->u.opcode) == op_put_by_val_direct;
if (!isDirect) {
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ patchBuffer,
("Baseline put_by_val stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
} else {
byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
+ patchBuffer,
("Baseline put_by_val_direct stub for %s, return point %p", toCString(*m_codeBlock).data(), returnAddress.value()));
}
- MacroAssembler::repatchJump(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
-}
-
-void JIT::privateCompilePutByValWithCachedId(ByValInfo* byValInfo, ReturnAddressPtr returnAddress, PutKind putKind, const Identifier& propertyName)
-{
- Instruction* currentInstruction = m_codeBlock->instructions().begin() + byValInfo->bytecodeIndex;
-
- JumpList doneCases;
- JumpList slowCases;
-
- JITPutByIdGenerator gen = emitPutByValWithCachedId(currentInstruction, putKind, propertyName, doneCases, slowCases);
-
- ConcurrentJITLocker locker(m_codeBlock->m_lock);
- LinkBuffer patchBuffer(*m_vm, *this, m_codeBlock);
- patchBuffer.link(slowCases, CodeLocationLabel(MacroAssemblerCodePtr::createFromExecutableAddress(returnAddress.value())).labelAtOffset(byValInfo->returnAddressToSlowPath));
- patchBuffer.link(doneCases, byValInfo->badTypeJump.labelAtOffset(byValInfo->badTypeJumpToDone));
- for (const auto& callSite : m_calls) {
- if (callSite.to)
- patchBuffer.link(callSite.from, FunctionPtr(callSite.to));
- }
- gen.finalize(patchBuffer);
-
- byValInfo->stubRoutine = FINALIZE_CODE_FOR_STUB(
- m_codeBlock, patchBuffer,
- ("Baseline put_by_val%s with cached property name '%s' stub for %s, return point %p", (putKind == Direct) ? "_direct" : "", propertyName.impl()->utf8().data(), toCString(*m_codeBlock).data(), returnAddress.value()));
- byValInfo->stubInfo = gen.stubInfo();
-
- MacroAssembler::repatchJump(byValInfo->notIndexJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
- MacroAssembler::repatchCall(CodeLocationCall(MacroAssemblerCodePtr(returnAddress)), FunctionPtr(putKind == Direct ? operationDirectPutByValGeneric : operationPutByValGeneric));
-}
-
-
-JIT::JumpList JIT::emitDirectArgumentsGetByVal(Instruction*, PatchableJump& badType)
-{
- JumpList slowCases;
-
-#if USE(JSVALUE64)
- RegisterID base = regT0;
- RegisterID property = regT1;
- JSValueRegs result = JSValueRegs(regT0);
- RegisterID scratch = regT3;
-#else
- RegisterID base = regT0;
- RegisterID property = regT2;
- JSValueRegs result = JSValueRegs(regT1, regT0);
- RegisterID scratch = regT3;
-#endif
-
- load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
- badType = patchableBranch32(NotEqual, scratch, TrustedImm32(DirectArgumentsType));
-
- slowCases.append(branch32(AboveOrEqual, property, Address(base, DirectArguments::offsetOfLength())));
- slowCases.append(branchTestPtr(NonZero, Address(base, DirectArguments::offsetOfOverrides())));
-
- zeroExtend32ToPtr(property, scratch);
- loadValue(BaseIndex(base, scratch, TimesEight, DirectArguments::storageOffset()), result);
-
- return slowCases;
-}
-
-JIT::JumpList JIT::emitScopedArgumentsGetByVal(Instruction*, PatchableJump& badType)
-{
- JumpList slowCases;
-
-#if USE(JSVALUE64)
- RegisterID base = regT0;
- RegisterID property = regT1;
- JSValueRegs result = JSValueRegs(regT0);
- RegisterID scratch = regT3;
- RegisterID scratch2 = regT4;
-#else
- RegisterID base = regT0;
- RegisterID property = regT2;
- JSValueRegs result = JSValueRegs(regT1, regT0);
- RegisterID scratch = regT3;
- RegisterID scratch2 = regT4;
-#endif
-
- load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
- badType = patchableBranch32(NotEqual, scratch, TrustedImm32(ScopedArgumentsType));
- slowCases.append(branch32(AboveOrEqual, property, Address(base, ScopedArguments::offsetOfTotalLength())));
-
- loadPtr(Address(base, ScopedArguments::offsetOfTable()), scratch);
- load32(Address(scratch, ScopedArgumentsTable::offsetOfLength()), scratch2);
- Jump overflowCase = branch32(AboveOrEqual, property, scratch2);
- loadPtr(Address(base, ScopedArguments::offsetOfScope()), scratch2);
- loadPtr(Address(scratch, ScopedArgumentsTable::offsetOfArguments()), scratch);
- load32(BaseIndex(scratch, property, TimesFour), scratch);
- slowCases.append(branch32(Equal, scratch, TrustedImm32(ScopeOffset::invalidOffset)));
- loadValue(BaseIndex(scratch2, scratch, TimesEight, JSEnvironmentRecord::offsetOfVariables()), result);
- Jump done = jump();
- overflowCase.link(this);
- sub32(property, scratch2);
- neg32(scratch2);
- loadValue(BaseIndex(base, scratch2, TimesEight, ScopedArguments::overflowStorageOffset()), result);
- slowCases.append(branchIfEmpty(result));
- done.link(this);
-
- return slowCases;
+ RepatchBuffer repatchBuffer(m_codeBlock);
+ repatchBuffer.relink(byValInfo->badTypeJump, CodeLocationLabel(byValInfo->stubRoutine->code().code()));
+ repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(isDirect ? operationDirectPutByValGeneric : operationPutByValGeneric));
}
JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, TypedArrayType type)
@@ -1451,26 +1176,26 @@ JIT::JumpList JIT::emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badTyp
JumpList slowCases;
- load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
- badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
+ loadPtr(Address(base, JSCell::structureOffset()), scratch);
+ badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
- slowCases.append(loadTypedArrayVector(base, scratch));
+ loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
switch (elementSize(type)) {
case 1:
- if (JSC::isSigned(type))
- load8SignedExtendTo32(BaseIndex(scratch, property, TimesOne), resultPayload);
+ if (isSigned(type))
+ load8Signed(BaseIndex(base, property, TimesOne), resultPayload);
else
- load8(BaseIndex(scratch, property, TimesOne), resultPayload);
+ load8(BaseIndex(base, property, TimesOne), resultPayload);
break;
case 2:
- if (JSC::isSigned(type))
- load16SignedExtendTo32(BaseIndex(scratch, property, TimesTwo), resultPayload);
+ if (isSigned(type))
+ load16Signed(BaseIndex(base, property, TimesTwo), resultPayload);
else
- load16(BaseIndex(scratch, property, TimesTwo), resultPayload);
+ load16(BaseIndex(base, property, TimesTwo), resultPayload);
break;
case 4:
- load32(BaseIndex(scratch, property, TimesFour), resultPayload);
+ load32(BaseIndex(base, property, TimesFour), resultPayload);
break;
default:
CRASH();
@@ -1521,19 +1246,19 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
#endif
JumpList slowCases;
-
- load8(Address(base, JSCell::typeInfoTypeOffset()), scratch);
- badType = patchableBranch32(NotEqual, scratch, TrustedImm32(typeForTypedArrayType(type)));
+
+ loadPtr(Address(base, JSCell::structureOffset()), scratch);
+ badType = patchableBranchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
- slowCases.append(loadTypedArrayVector(base, scratch));
+ loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), base);
switch (elementSize(type)) {
case 4:
- loadFloat(BaseIndex(scratch, property, TimesFour), fpRegT0);
+ loadFloat(BaseIndex(base, property, TimesFour), fpRegT0);
convertFloatToDouble(fpRegT0, fpRegT0);
break;
case 8: {
- loadDouble(BaseIndex(scratch, property, TimesEight), fpRegT0);
+ loadDouble(BaseIndex(base, property, TimesEight), fpRegT0);
break;
}
default:
@@ -1541,8 +1266,8 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
}
Jump notNaN = branchDouble(DoubleEqual, fpRegT0, fpRegT0);
- static const double NaN = PNaN;
- loadDouble(TrustedImmPtr(&NaN), fpRegT0);
+ static const double NaN = QNaN;
+ loadDouble(&NaN, fpRegT0);
notNaN.link(this);
#if USE(JSVALUE64)
@@ -1556,7 +1281,6 @@ JIT::JumpList JIT::emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badT
JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
{
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
ASSERT(isInt(type));
int value = currentInstruction[3].u.operand;
@@ -1575,16 +1299,13 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
JumpList slowCases;
- load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
- badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
- Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
- emitArrayProfileOutOfBoundsSpecialCase(profile);
- Jump done = jump();
- inBounds.link(this);
+ loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
+ badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
#if USE(JSVALUE64)
emitGetVirtualRegister(value, earlyScratch);
- slowCases.append(emitJumpIfNotInt(earlyScratch));
+ slowCases.append(emitJumpIfNotImmediateInteger(earlyScratch));
#else
emitLoad(value, lateScratch, earlyScratch);
slowCases.append(branch32(NotEqual, lateScratch, TrustedImm32(JSValue::Int32Tag)));
@@ -1592,11 +1313,11 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
// We would be loading this into base as in get_by_val, except that the slow
// path expects the base to be unclobbered.
- slowCases.append(loadTypedArrayVector(base, lateScratch));
+ loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
if (isClamped(type)) {
ASSERT(elementSize(type) == 1);
- ASSERT(!JSC::isSigned(type));
+ ASSERT(!isSigned(type));
Jump inBounds = branch32(BelowOrEqual, earlyScratch, TrustedImm32(0xff));
Jump tooBig = branch32(GreaterThan, earlyScratch, TrustedImm32(0xff));
xor32(earlyScratch, earlyScratch);
@@ -1621,14 +1342,11 @@ JIT::JumpList JIT::emitIntTypedArrayPutByVal(Instruction* currentInstruction, Pa
CRASH();
}
- done.link(this);
-
return slowCases;
}
JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction, PatchableJump& badType, TypedArrayType type)
{
- ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
ASSERT(isFloat(type));
int value = currentInstruction[3].u.operand;
@@ -1647,20 +1365,17 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
JumpList slowCases;
- load8(Address(base, JSCell::typeInfoTypeOffset()), earlyScratch);
- badType = patchableBranch32(NotEqual, earlyScratch, TrustedImm32(typeForTypedArrayType(type)));
- Jump inBounds = branch32(Below, property, Address(base, JSArrayBufferView::offsetOfLength()));
- emitArrayProfileOutOfBoundsSpecialCase(profile);
- Jump done = jump();
- inBounds.link(this);
+ loadPtr(Address(base, JSCell::structureOffset()), earlyScratch);
+ badType = patchableBranchPtr(NotEqual, Address(earlyScratch, Structure::classInfoOffset()), TrustedImmPtr(classInfoForType(type)));
+ slowCases.append(branch32(AboveOrEqual, property, Address(base, JSArrayBufferView::offsetOfLength())));
#if USE(JSVALUE64)
emitGetVirtualRegister(value, earlyScratch);
- Jump doubleCase = emitJumpIfNotInt(earlyScratch);
+ Jump doubleCase = emitJumpIfNotImmediateInteger(earlyScratch);
convertInt32ToDouble(earlyScratch, fpRegT0);
Jump ready = jump();
doubleCase.link(this);
- slowCases.append(emitJumpIfNotNumber(earlyScratch));
+ slowCases.append(emitJumpIfNotImmediateNumber(earlyScratch));
add64(tagTypeNumberRegister, earlyScratch);
move64ToDouble(earlyScratch, fpRegT0);
ready.link(this);
@@ -1677,7 +1392,7 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
// We would be loading this into base as in get_by_val, except that the slow
// path expects the base to be unclobbered.
- slowCases.append(loadTypedArrayVector(base, lateScratch));
+ loadPtr(Address(base, JSArrayBufferView::offsetOfVector()), lateScratch);
switch (elementSize(type)) {
case 4:
@@ -1691,8 +1406,6 @@ JIT::JumpList JIT::emitFloatTypedArrayPutByVal(Instruction* currentInstruction,
CRASH();
}
- done.link(this);
-
return slowCases;
}
diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
index ce93d4140..5bc8d1abb 100644
--- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
+++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2008, 2009, 2014, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2008, 2009 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,17 +30,17 @@
#include "JIT.h"
#include "CodeBlock.h"
-#include "DirectArguments.h"
#include "GCAwareJITStubRoutine.h"
#include "Interpreter.h"
#include "JITInlines.h"
#include "JSArray.h"
-#include "JSEnvironmentRecord.h"
#include "JSFunction.h"
+#include "JSPropertyNameIterator.h"
+#include "JSVariableObject.h"
#include "LinkBuffer.h"
+#include "RepatchBuffer.h"
#include "ResultType.h"
#include "SamplingTool.h"
-#include "SlowPathCall.h"
#include <wtf/StringPrintStream.h>
@@ -57,68 +57,17 @@ void JIT::emit_op_put_by_index(Instruction* currentInstruction)
callOperation(operationPutByIndex, regT1, regT0, property, regT3, regT2);
}
-void JIT::emit_op_put_getter_by_id(Instruction* currentInstruction)
+void JIT::emit_op_put_getter_setter(Instruction* currentInstruction)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
- int options = currentInstruction[3].u.operand;
- int getter = currentInstruction[4].u.operand;
-
- emitLoadPayload(base, regT1);
- emitLoadPayload(getter, regT3);
- callOperation(operationPutGetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
-}
-
-void JIT::emit_op_put_setter_by_id(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int property = currentInstruction[2].u.operand;
- unsigned options = currentInstruction[3].u.operand;
+ int getter = currentInstruction[3].u.operand;
int setter = currentInstruction[4].u.operand;
emitLoadPayload(base, regT1);
- emitLoadPayload(setter, regT3);
- callOperation(operationPutSetterById, regT1, m_codeBlock->identifier(property).impl(), options, regT3);
-}
-
-void JIT::emit_op_put_getter_setter_by_id(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int property = currentInstruction[2].u.operand;
- unsigned attribute = currentInstruction[3].u.operand;
- int getter = currentInstruction[4].u.operand;
- int setter = currentInstruction[5].u.operand;
-
- emitLoadPayload(base, regT1);
emitLoadPayload(getter, regT3);
emitLoadPayload(setter, regT4);
- callOperation(operationPutGetterSetter, regT1, m_codeBlock->identifier(property).impl(), attribute, regT3, regT4);
-}
-
-void JIT::emit_op_put_getter_by_val(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int property = currentInstruction[2].u.operand;
- int32_t attributes = currentInstruction[3].u.operand;
- int getter = currentInstruction[4].u.operand;
-
- emitLoadPayload(base, regT2);
- emitLoad(property, regT1, regT0);
- emitLoadPayload(getter, regT3);
- callOperation(operationPutGetterByVal, regT2, regT1, regT0, attributes, regT3);
-}
-
-void JIT::emit_op_put_setter_by_val(Instruction* currentInstruction)
-{
- int base = currentInstruction[1].u.operand;
- int property = currentInstruction[2].u.operand;
- int32_t attributes = currentInstruction[3].u.operand;
- int getter = currentInstruction[4].u.operand;
-
- emitLoadPayload(base, regT2);
- emitLoad(property, regT1, regT0);
- emitLoadPayload(getter, regT3);
- callOperation(operationPutSetterByVal, regT2, regT1, regT0, attributes, regT3);
+ callOperation(operationPutGetterSetter, regT1, &m_codeBlock->identifier(property), regT3, regT4);
}
void JIT::emit_op_del_by_id(Instruction* currentInstruction)
@@ -134,7 +83,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
{
JSInterfaceJIT jit(vm);
JumpList failures;
- failures.append(jit.branchStructure(NotEqual, Address(regT0, JSCell::structureIDOffset()), vm->stringStructure.get()));
+ failures.append(jit.branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(vm->stringStructure.get())));
// Load string length to regT1, and start the process of loading the data pointer into regT0
jit.load32(Address(regT0, ThunkHelpers::jsStringLengthOffset()), regT1);
@@ -168,7 +117,7 @@ JIT::CodeRef JIT::stringGetByValStubGenerator(VM* vm)
jit.move(TrustedImm32(0), regT0);
jit.ret();
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("String get_by_val stub"));
}
@@ -178,14 +127,13 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitLoad2(base, regT1, regT0, property, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(notIndex);
- emitArrayProfilingSiteWithCell(regT0, regT1, profile);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ emitArrayProfilingSite(regT1, regT3, profile);
and32(TrustedImm32(IndexingShapeMask), regT1);
PatchableJump badType;
@@ -214,27 +162,27 @@ void JIT::emit_op_get_by_val(Instruction* currentInstruction)
Label done = label();
- if (!ASSERT_DISABLED) {
- Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
- abortWithReason(JITGetByValResultIsNotEmpty);
- resultOK.link(this);
- }
+#if !ASSERT_DISABLED
+ Jump resultOK = branch32(NotEqual, regT1, TrustedImm32(JSValue::EmptyValueTag));
+ breakpoint();
+ resultOK.link(this);
+#endif
emitValueProfilingSite();
emitStore(dst, regT1, regT0);
-
- Label nextHotPath = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, nextHotPath));
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}
-JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, IndexingType expectedShape)
+JIT::JumpList JIT::emitContiguousGetByVal(Instruction*, PatchableJump& badType, IndexingType expectedShape)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT1, TrustedImm32(expectedShape));
+
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
+
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
@@ -242,75 +190,52 @@ JIT::JumpList JIT::emitContiguousLoad(Instruction*, PatchableJump& badType, Inde
return slowCases;
}
-JIT::JumpList JIT::emitDoubleLoad(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitDoubleGetByVal(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
badType = patchableBranch32(NotEqual, regT1, TrustedImm32(DoubleShape));
+
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, Butterfly::offsetOfPublicLength())));
+
loadDouble(BaseIndex(regT3, regT2, TimesEight), fpRegT0);
slowCases.append(branchDouble(DoubleNotEqualOrUnordered, fpRegT0, fpRegT0));
+ moveDoubleToInts(fpRegT0, regT0, regT1);
return slowCases;
}
-JIT::JumpList JIT::emitArrayStorageLoad(Instruction*, PatchableJump& badType)
+JIT::JumpList JIT::emitArrayStorageGetByVal(Instruction*, PatchableJump& badType)
{
JumpList slowCases;
add32(TrustedImm32(-ArrayStorageShape), regT1, regT3);
badType = patchableBranch32(Above, regT3, TrustedImm32(SlowPutArrayStorageShape - ArrayStorageShape));
+
loadPtr(Address(regT0, JSObject::butterflyOffset()), regT3);
slowCases.append(branch32(AboveOrEqual, regT2, Address(regT3, ArrayStorage::vectorLengthOffset())));
+
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag
load32(BaseIndex(regT3, regT2, TimesEight, OBJECT_OFFSETOF(ArrayStorage, m_vector[0]) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload
slowCases.append(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
return slowCases;
}
-
-JITGetByIdGenerator JIT::emitGetByValWithCachedId(Instruction* currentInstruction, const Identifier& propertyName, Jump& fastDoneCase, Jump& slowDoneCase, JumpList& slowCases)
-{
- int dst = currentInstruction[1].u.operand;
-
- // base: tag(regT1), payload(regT0)
- // property: tag(regT3), payload(regT2)
- // scratch: regT4
-
- slowCases.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
- emitIdentifierCheck(regT2, regT4, propertyName, slowCases);
-
- JITGetByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0));
- gen.generateFastPath(*this);
-
- fastDoneCase = jump();
-
- Label coldPathBegin = label();
- gen.slowPathJump().link(this);
-
- Call call = callOperation(WithProfile, operationGetByIdOptimize, dst, gen.stubInfo(), regT1, regT0, propertyName.impl());
- gen.reportSlowPathCall(coldPathBegin, call);
- slowDoneCase = jump();
-
- return gen;
-}
-
+
void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
int base = currentInstruction[2].u.operand;
int property = currentInstruction[3].u.operand;
- ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
-
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
+ ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
+
linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
Jump nonCell = jump();
linkSlowCase(iter); // base array check
- Jump notString = branchStructure(NotEqual, Address(regT0, JSCell::structureIDOffset()), m_vm->stringStructure.get());
+ Jump notString = branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), TrustedImmPtr(m_vm->stringStructure.get()));
emitNakedCall(m_vm->getCTIStub(stringGetByValStubGenerator).code());
Jump failed = branchTestPtr(Zero, regT0);
emitStore(dst, regT1, regT0);
@@ -318,15 +243,21 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas
failed.link(this);
notString.link(this);
nonCell.link(this);
+
+ Jump skipProfiling = jump();
linkSlowCase(iter); // vector length check
linkSlowCase(iter); // empty value
+ emitArrayProfileOutOfBoundsSpecialCase(profile);
+
+ skipProfiling.link(this);
+
Label slowPath = label();
emitLoad(base, regT1, regT0);
emitLoad(property, regT3, regT2);
- Call call = callOperation(operationGetByValOptimize, dst, regT1, regT0, regT3, regT2, byValInfo);
+ Call call = callOperation(operationGetByValDefault, dst, regT1, regT0, regT3, regT2);
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
m_byValCompilationInfo[m_byValInstructionIndex].returnAddress = call;
@@ -340,14 +271,13 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_codeBlock->addByValInfo();
emitLoad2(base, regT1, regT0, property, regT3, regT2);
+ addSlowCase(branch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag)));
emitJumpSlowCaseIfNotJSCell(base, regT1);
- PatchableJump notIndex = patchableBranch32(NotEqual, regT3, TrustedImm32(JSValue::Int32Tag));
- addSlowCase(notIndex);
- emitArrayProfilingSiteWithCell(regT0, regT1, profile);
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT1);
+ emitArrayProfilingSite(regT1, regT3, profile);
and32(TrustedImm32(IndexingShapeMask), regT1);
PatchableJump badType;
@@ -377,7 +307,7 @@ void JIT::emit_op_put_by_val(Instruction* currentInstruction)
Label done = label();
- m_byValCompilationInfo.append(ByValCompilationInfo(byValInfo, m_bytecodeOffset, notIndex, badType, mode, profile, done, done));
+ m_byValCompilationInfo.append(ByValCompilationInfo(m_bytecodeOffset, badType, mode, done));
}
JIT::JumpList JIT::emitGenericContiguousPutByVal(Instruction* currentInstruction, PatchableJump& badType, IndexingType indexingShape)
@@ -476,52 +406,15 @@ JIT::JumpList JIT::emitArrayStoragePutByVal(Instruction* currentInstruction, Pat
return slowCases;
}
-JITPutByIdGenerator JIT::emitPutByValWithCachedId(Instruction* currentInstruction, PutKind putKind, const Identifier& propertyName, JumpList& doneCases, JumpList& slowCases)
-{
- // base: tag(regT1), payload(regT0)
- // property: tag(regT3), payload(regT2)
-
- int base = currentInstruction[1].u.operand;
- int value = currentInstruction[3].u.operand;
-
- slowCases.append(branch32(NotEqual, regT3, TrustedImm32(JSValue::CellTag)));
- emitIdentifierCheck(regT2, regT2, propertyName, slowCases);
-
- // Write barrier breaks the registers. So after issuing the write barrier,
- // reload the registers.
- emitWriteBarrier(base, value, ShouldFilterBase);
- emitLoadPayload(base, regT0);
- emitLoad(value, regT3, regT2);
-
- JITPutByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2), regT1, m_codeBlock->ecmaMode(), putKind);
- gen.generateFastPath(*this);
- doneCases.append(jump());
-
- Label coldPathBegin = label();
- gen.slowPathJump().link(this);
-
- // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
- emitLoadTag(base, regT1);
-
- Call call = callOperation(gen.slowPathFunction(), gen.stubInfo(), regT3, regT2, regT1, regT0, propertyName.impl());
- gen.reportSlowPathCall(coldPathBegin, call);
- doneCases.append(jump());
-
- return gen;
-}
-
void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int base = currentInstruction[1].u.operand;
int property = currentInstruction[2].u.operand;
int value = currentInstruction[3].u.operand;
ArrayProfile* profile = currentInstruction[4].u.arrayProfile;
- ByValInfo* byValInfo = m_byValCompilationInfo[m_byValInstructionIndex].byValInfo;
- linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // property int32 check
+ linkSlowCaseIfNotJSCell(iter, base); // base cell check
linkSlowCase(iter); // base not array check
JITArrayMode mode = chooseArrayMode(profile);
@@ -558,15 +451,14 @@ void JIT::emitSlow_op_put_by_val(Instruction* currentInstruction, Vector<SlowCas
emitLoad(value, regT0, regT1);
addCallArgument(regT1);
addCallArgument(regT0);
- addCallArgument(TrustedImmPtr(byValInfo));
- Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize);
+ Call call = appendCallWithExceptionCheck(isDirect ? operationDirectPutByVal : operationPutByVal);
#else
// The register selection below is chosen to reduce register swapping on ARM.
// Swapping shouldn't happen on other platforms.
emitLoad(base, regT2, regT1);
emitLoad(property, regT3, regT0);
emitLoad(value, regT5, regT4);
- Call call = callOperation(isDirect ? operationDirectPutByValOptimize : operationPutByValOptimize, regT2, regT1, regT3, regT0, regT5, regT4, byValInfo);
+ Call call = callOperation(isDirect ? operationDirectPutByVal : operationPutByVal, regT2, regT1, regT3, regT0, regT5, regT4);
#endif
m_byValCompilationInfo[m_byValInstructionIndex].slowPathTarget = slowPath;
@@ -583,12 +475,14 @@ void JIT::emit_op_get_by_id(Instruction* currentInstruction)
emitLoad(base, regT1, regT0);
emitJumpSlowCaseIfNotJSCell(base, regT1);
- if (*ident == m_vm->propertyNames->length && shouldEmitProfiling())
- emitArrayProfilingSiteForBytecodeIndexWithCell(regT0, regT2, m_bytecodeOffset);
+ if (*ident == m_vm->propertyNames->length && shouldEmitProfiling()) {
+ loadPtr(Address(regT0, JSCell::structureOffset()), regT2);
+ emitArrayProfilingSiteForBytecodeIndex(regT2, regT3, m_bytecodeOffset);
+ }
JITGetByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0));
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
+ callFrameRegister, JSValueRegs::payloadOnly(regT0), JSValueRegs(regT1, regT0), true);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
m_getByIds.append(gen);
@@ -623,18 +517,21 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction)
int base = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
- int direct = currentInstruction[8].u.putByIdFlags & PutByIdIsDirect;
+ int direct = currentInstruction[8].u.operand;
- emitWriteBarrier(base, value, ShouldFilterBase);
+ emitWriteBarrier(base, value, ShouldFilterBaseAndValue);
emitLoad2(base, regT1, regT0, value, regT3, regT2);
emitJumpSlowCaseIfNotJSCell(base, regT1);
+
+ emitLoad(base, regT1, regT0);
+ emitLoad(value, regT3, regT2);
JITPutByIdGenerator gen(
- m_codeBlock, CodeOrigin(m_bytecodeOffset), CallSiteIndex(currentInstruction), RegisterSet::stubUnavailableRegisters(),
- JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2),
- regT1, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
+ m_codeBlock, CodeOrigin(m_bytecodeOffset), RegisterSet::specialRegisters(),
+ callFrameRegister, JSValueRegs::payloadOnly(regT0), JSValueRegs(regT3, regT2),
+ regT1, true, m_codeBlock->ecmaMode(), direct ? Direct : NotDirect);
gen.generateFastPath(*this);
addSlowCase(gen.slowPathJump());
@@ -651,10 +548,7 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
linkSlowCase(iter);
Label coldPathBegin(this);
-
- // JITPutByIdGenerator only preserve the value and the base's payload, we have to reload the tag.
- emitLoadTag(base, regT1);
-
+
JITPutByIdGenerator& gen = m_putByIds[m_putByIdIndex++];
Call call = callOperation(
@@ -663,6 +557,114 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase
gen.reportSlowPathCall(coldPathBegin, call);
}
+// Compile a store into an object's property storage. May overwrite base.
+void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset)
+{
+ if (isOutOfLineOffset(cachedOffset))
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ emitStore(indexRelativeToBase(cachedOffset), valueTag, valuePayload, base);
+}
+
+// Compile a load from an object's property storage. May overwrite base.
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
+{
+ if (isInlineOffset(cachedOffset)) {
+ emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, base);
+ return;
+ }
+
+ RegisterID temp = resultPayload;
+ loadPtr(Address(base, JSObject::butterflyOffset()), temp);
+ emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, temp);
+}
+
+void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset)
+{
+ if (isInlineOffset(cachedOffset)) {
+ move(TrustedImmPtr(base->locationForOffset(cachedOffset)), resultTag);
+ load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+ return;
+ }
+
+ loadPtr(base->butterflyAddress(), resultTag);
+ load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload);
+ load32(Address(resultTag, offsetInButterfly(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag);
+}
+
+void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode finalObjectMode)
+{
+ ASSERT(sizeof(JSValue) == 8);
+
+ if (finalObjectMode == MayBeFinal) {
+ Jump isInline = branch32(LessThan, offset, TrustedImm32(firstOutOfLineOffset));
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ neg32(offset);
+ Jump done = jump();
+ isInline.link(this);
+ addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() - (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), base);
+ done.link(this);
+ } else {
+#if !ASSERT_DISABLED
+ Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
+ breakpoint();
+ isOutOfLine.link(this);
+#endif
+ loadPtr(Address(base, JSObject::butterflyOffset()), base);
+ neg32(offset);
+ }
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
+ load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
+}
+
+void JIT::emit_op_get_by_pname(Instruction* currentInstruction)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+ unsigned expected = currentInstruction[4].u.operand;
+ int iter = currentInstruction[5].u.operand;
+ int i = currentInstruction[6].u.operand;
+
+ emitLoad2(property, regT1, regT0, base, regT3, regT2);
+ emitJumpSlowCaseIfNotJSCell(property, regT1);
+ addSlowCase(branchPtr(NotEqual, regT0, payloadFor(expected)));
+ // Property registers are now available as the property is known
+ emitJumpSlowCaseIfNotJSCell(base, regT3);
+ emitLoadPayload(iter, regT1);
+
+ // Test base's structure
+ loadPtr(Address(regT2, JSCell::structureOffset()), regT0);
+ addSlowCase(branchPtr(NotEqual, regT0, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructure))));
+ load32(addressFor(i), regT3);
+ sub32(TrustedImm32(1), regT3);
+ addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots))));
+ Jump inlineProperty = branch32(Below, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)));
+ add32(TrustedImm32(firstOutOfLineOffset), regT3);
+ sub32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_cachedStructureInlineCapacity)), regT3);
+ inlineProperty.link(this);
+ compileGetDirectOffset(regT2, regT1, regT0, regT3);
+
+ emitStore(dst, regT1, regT0);
+}
+
+void JIT::emitSlow_op_get_by_pname(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
+{
+ int dst = currentInstruction[1].u.operand;
+ int base = currentInstruction[2].u.operand;
+ int property = currentInstruction[3].u.operand;
+
+ linkSlowCaseIfNotJSCell(iter, property);
+ linkSlowCase(iter);
+ linkSlowCaseIfNotJSCell(iter, base);
+ linkSlowCase(iter);
+ linkSlowCase(iter);
+
+ emitLoad(base, regT1, regT0);
+ emitLoad(property, regT3, regT2);
+ callOperation(operationGetByValGeneric, dst, regT1, regT0, regT3, regT2);
+}
+
void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
{
if (!needsVarInjectionChecks)
@@ -670,11 +672,17 @@ void JIT::emitVarInjectionCheck(bool needsVarInjectionChecks)
addSlowCase(branch8(Equal, AbsoluteAddress(m_codeBlock->globalObject()->varInjectionWatchpoint()->addressOfState()), TrustedImm32(IsInvalidated)));
}
-void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, unsigned depth)
+void JIT::emitResolveClosure(int dst, bool needsVarInjectionChecks, unsigned depth)
{
emitVarInjectionCheck(needsVarInjectionChecks);
move(TrustedImm32(JSValue::CellTag), regT1);
- emitLoadPayload(scope, regT0);
+ emitLoadPayload(JSStack::ScopeChain, regT0);
+ if (m_codeBlock->needsActivation()) {
+ emitLoadPayload(m_codeBlock->activationRegister().offset(), regT2);
+ Jump noActivation = branchTestPtr(Zero, regT2);
+ loadPtr(Address(regT2, JSScope::offsetOfNext()), regT0);
+ noActivation.link(this);
+ }
for (unsigned i = 0; i < depth; ++i)
loadPtr(Address(regT0, JSScope::offsetOfNext()), regT0);
emitStore(dst, regT1, regT0);
@@ -683,214 +691,96 @@ void JIT::emitResolveClosure(int dst, int scope, bool needsVarInjectionChecks, u
void JIT::emit_op_resolve_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
- int scope = currentInstruction[2].u.operand;
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
- unsigned depth = currentInstruction[5].u.operand;
- auto emitCode = [&] (ResolveType resolveType) {
- switch (resolveType) {
- case GlobalProperty:
- case GlobalVar:
- case GlobalLexicalVar:
- case GlobalPropertyWithVarInjectionChecks:
- case GlobalVarWithVarInjectionChecks:
- case GlobalLexicalVarWithVarInjectionChecks: {
- JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
- RELEASE_ASSERT(constantScope);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- move(TrustedImm32(JSValue::CellTag), regT1);
- move(TrustedImmPtr(constantScope), regT0);
- emitStore(dst, regT1, regT0);
- break;
- }
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitResolveClosure(dst, scope, needsVarInjectionChecks(resolveType), depth);
- break;
- case ModuleVar:
- move(TrustedImm32(JSValue::CellTag), regT1);
- move(TrustedImmPtr(currentInstruction[6].u.jsCell.get()), regT0);
- emitStore(dst, regT1, regT0);
- break;
- case Dynamic:
- addSlowCase(jump());
- break;
- case LocalClosureVar:
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks:
- RELEASE_ASSERT_NOT_REACHED();
- }
- };
- switch (resolveType) {
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks: {
- JumpList skipToEnd;
- load32(&currentInstruction[4], regT0);
-
- Jump notGlobalProperty = branch32(NotEqual, regT0, TrustedImm32(GlobalProperty));
- emitCode(GlobalProperty);
- skipToEnd.append(jump());
- notGlobalProperty.link(this);
-
- Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
- emitCode(GlobalPropertyWithVarInjectionChecks);
- skipToEnd.append(jump());
- notGlobalPropertyWithVarInjections.link(this);
-
- Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
- emitCode(GlobalLexicalVar);
- skipToEnd.append(jump());
- notGlobalLexicalVar.link(this);
-
- Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
- emitCode(GlobalLexicalVarWithVarInjectionChecks);
- skipToEnd.append(jump());
- notGlobalLexicalVarWithVarInjections.link(this);
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
+ unsigned depth = currentInstruction[4].u.operand;
- addSlowCase(jump());
- skipToEnd.link(this);
+ switch (resolveType) {
+ case GlobalProperty:
+ case GlobalVar:
+ case GlobalPropertyWithVarInjectionChecks:
+ case GlobalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ move(TrustedImm32(JSValue::CellTag), regT1);
+ move(TrustedImmPtr(m_codeBlock->globalObject()), regT0);
+ emitStore(dst, regT1, regT0);
break;
- }
-
- default:
- emitCode(resolveType);
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitResolveClosure(dst, needsVarInjectionChecks(resolveType), depth);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
break;
}
}
void JIT::emitSlow_op_resolve_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- ResolveType resolveType = static_cast<ResolveType>(currentInstruction[4].u.operand);
+ int dst = currentInstruction[1].u.operand;
+ ResolveType resolveType = static_cast<ResolveType>(currentInstruction[3].u.operand);
- if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar || resolveType == GlobalLexicalVar || resolveType == ModuleVar)
+ if (resolveType == GlobalProperty || resolveType == GlobalVar || resolveType == ClosureVar)
return;
- if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
- linkSlowCase(iter); // Var injections check for GlobalPropertyWithVarInjectionChecks.
- linkSlowCase(iter); // Var injections check for GlobalLexicalVarWithVarInjectionChecks.
- }
linkSlowCase(iter);
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_resolve_scope);
- slowPathCall.call();
+ int32_t indentifierIndex = currentInstruction[2].u.operand;
+ callOperation(operationResolveScope, dst, indentifierIndex);
}
void JIT::emitLoadWithStructureCheck(int scope, Structure** structureSlot)
{
emitLoad(scope, regT1, regT0);
loadPtr(structureSlot, regT2);
- addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureIDOffset()), regT2));
+ addSlowCase(branchPtr(NotEqual, Address(regT0, JSCell::structureOffset()), regT2));
}
-void JIT::emitGetVarFromPointer(JSValue* operand, GPRReg tag, GPRReg payload)
+void JIT::emitGetGlobalProperty(uintptr_t* operandSlot)
{
- uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand);
- load32(bitwise_cast<void*>(rawAddress + TagOffset), tag);
- load32(bitwise_cast<void*>(rawAddress + PayloadOffset), payload);
+ move(regT0, regT2);
+ load32(operandSlot, regT3);
+ compileGetDirectOffset(regT2, regT1, regT0, regT3, KnownNotFinal);
}
-void JIT::emitGetVarFromIndirectPointer(JSValue** operand, GPRReg tag, GPRReg payload)
+
+void JIT::emitGetGlobalVar(uintptr_t operand)
{
- loadPtr(operand, payload);
- load32(Address(payload, TagOffset), tag);
- load32(Address(payload, PayloadOffset), payload);
+ load32(reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), regT1);
+ load32(reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), regT0);
}
void JIT::emitGetClosureVar(int scope, uintptr_t operand)
{
emitLoad(scope, regT1, regT0);
- load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset), regT1);
- load32(Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset), regT0);
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+ load32(Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), regT1);
+ load32(Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), regT0);
}
void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
{
int dst = currentInstruction[1].u.operand;
int scope = currentInstruction[2].u.operand;
- ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
-
- auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
- switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks: {
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- GPRReg base = regT2;
- GPRReg resultTag = regT1;
- GPRReg resultPayload = regT0;
- GPRReg offset = regT3;
-
- move(regT0, base);
- load32(operandSlot, offset);
- if (!ASSERT_DISABLED) {
- Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(firstOutOfLineOffset));
- abortWithReason(JITOffsetIsNotOutOfLine);
- isOutOfLine.link(this);
- }
- loadPtr(Address(base, JSObject::butterflyOffset()), base);
- neg32(offset);
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultPayload);
- load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) + (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue)), resultTag);
- break;
- }
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- case GlobalLexicalVar:
- case GlobalLexicalVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- if (indirectLoadForOperand)
- emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0);
- else
- emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0);
- if (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks) // TDZ check.
- addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
- break;
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitGetClosureVar(scope, *operandSlot);
- break;
- case Dynamic:
- addSlowCase(jump());
- break;
- case ModuleVar:
- case LocalClosureVar:
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks:
- RELEASE_ASSERT_NOT_REACHED();
- }
- };
switch (resolveType) {
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks: {
- JumpList skipToEnd;
- load32(&currentInstruction[4], regT0);
- and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
-
- Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
- Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
- isGlobalProperty.link(this);
- emitCode(GlobalProperty, false);
- skipToEnd.append(jump());
- notGlobalPropertyWithVarInjections.link(this);
-
- Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
- emitCode(GlobalLexicalVar, true);
- skipToEnd.append(jump());
- notGlobalLexicalVar.link(this);
-
- Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
- emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
- skipToEnd.append(jump());
- notGlobalLexicalVarWithVarInjections.link(this);
-
- addSlowCase(jump());
-
- skipToEnd.link(this);
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks:
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitGetGlobalProperty(operandSlot);
break;
- }
-
- default:
- emitCode(resolveType, false);
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetGlobalVar(*operandSlot);
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitGetClosureVar(scope, *operandSlot);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
break;
}
emitValueProfilingSite();
@@ -900,221 +790,141 @@ void JIT::emit_op_get_from_scope(Instruction* currentInstruction)
void JIT::emitSlow_op_get_from_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
int dst = currentInstruction[1].u.operand;
- ResolveType resolveType = GetPutInfo(currentInstruction[4].u.operand).resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
if (resolveType == GlobalVar || resolveType == ClosureVar)
return;
- if (resolveType == GlobalLexicalVarWithVarInjectionChecks) // Var Injections check.
- linkSlowCase(iter);
-
- if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
- // GlobalProperty/GlobalPropertyWithVarInjectionChecks
- linkSlowCase(iter); // emitLoadWithStructureCheck
- // GlobalLexicalVar
- linkSlowCase(iter); // TDZ check.
- // GlobalLexicalVarWithVarInjectionChecks.
- linkSlowCase(iter); // var injection check.
- linkSlowCase(iter); // TDZ check.
- }
-
linkSlowCase(iter);
callOperation(WithProfile, operationGetFromScope, dst, currentInstruction);
}
-void JIT::emitPutGlobalVariable(JSValue* operand, int value, WatchpointSet* set)
+void JIT::emitPutGlobalProperty(uintptr_t* operandSlot, int value)
{
- emitLoad(value, regT1, regT0);
- emitNotifyWrite(set);
- uintptr_t rawAddress = bitwise_cast<uintptr_t>(operand);
- store32(regT1, bitwise_cast<void*>(rawAddress + TagOffset));
- store32(regT0, bitwise_cast<void*>(rawAddress + PayloadOffset));
+ emitLoad(value, regT3, regT2);
+
+ loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
+ loadPtr(operandSlot, regT1);
+ negPtr(regT1);
+ store32(regT3, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ store32(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+}
+
+void JIT::emitNotifyWrite(RegisterID tag, RegisterID payload, RegisterID scratch, VariableWatchpointSet* set)
+{
+ if (!set || set->state() == IsInvalidated)
+ return;
+
+ load8(set->addressOfState(), scratch);
+
+ JumpList ready;
+
+ ready.append(branch32(Equal, scratch, TrustedImm32(IsInvalidated)));
+
+ if (set->state() == ClearWatchpoint) {
+ Jump isWatched = branch32(NotEqual, scratch, TrustedImm32(ClearWatchpoint));
+
+ store32(tag, &set->addressOfInferredValue()->u.asBits.tag);
+ store32(payload, &set->addressOfInferredValue()->u.asBits.payload);
+ store8(TrustedImm32(IsWatched), set->addressOfState());
+ ready.append(jump());
+
+ isWatched.link(this);
+ }
+
+ Jump definitelyNotEqual = branch32(
+ NotEqual, AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.payload), payload);
+ ready.append(branch32(
+ Equal, AbsoluteAddress(&set->addressOfInferredValue()->u.asBits.tag), tag));
+ definitelyNotEqual.link(this);
+ addSlowCase(branchTest8(NonZero, AbsoluteAddress(set->addressOfSetIsNotEmpty())));
+ store8(TrustedImm32(IsInvalidated), set->addressOfState());
+ store32(
+ TrustedImm32(JSValue::EmptyValueTag), &set->addressOfInferredValue()->u.asBits.tag);
+ store32(TrustedImm32(0), &set->addressOfInferredValue()->u.asBits.payload);
+
+ ready.link(this);
}
-void JIT::emitPutGlobalVariableIndirect(JSValue** addressOfOperand, int value, WatchpointSet** indirectWatchpointSet)
+void JIT::emitPutGlobalVar(uintptr_t operand, int value, VariableWatchpointSet* set)
{
emitLoad(value, regT1, regT0);
- loadPtr(indirectWatchpointSet, regT2);
- emitNotifyWrite(regT2);
- loadPtr(addressOfOperand, regT2);
- store32(regT1, Address(regT2, TagOffset));
- store32(regT0, Address(regT2, PayloadOffset));
+ emitNotifyWrite(regT1, regT0, regT2, set);
+ store32(regT1, reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ store32(regT0, reinterpret_cast<char*>(operand) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
}
-void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value, WatchpointSet* set)
+void JIT::emitPutClosureVar(int scope, uintptr_t operand, int value)
{
emitLoad(value, regT3, regT2);
emitLoad(scope, regT1, regT0);
- emitNotifyWrite(set);
- store32(regT3, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + TagOffset));
- store32(regT2, Address(regT0, JSEnvironmentRecord::offsetOfVariables() + operand * sizeof(Register) + PayloadOffset));
+ loadPtr(Address(regT0, JSVariableObject::offsetOfRegisters()), regT0);
+ store32(regT3, Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ store32(regT2, Address(regT0, operand * sizeof(Register) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
}
void JIT::emit_op_put_to_scope(Instruction* currentInstruction)
{
int scope = currentInstruction[1].u.operand;
int value = currentInstruction[3].u.operand;
- GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
- ResolveType resolveType = getPutInfo.resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
Structure** structureSlot = currentInstruction[5].u.structure.slot();
uintptr_t* operandSlot = reinterpret_cast<uintptr_t*>(&currentInstruction[6].u.pointer);
-
- auto emitCode = [&] (ResolveType resolveType, bool indirectLoadForOperand) {
- switch (resolveType) {
- case GlobalProperty:
- case GlobalPropertyWithVarInjectionChecks: {
- emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
- emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
- emitLoad(value, regT3, regT2);
-
- loadPtr(Address(regT0, JSObject::butterflyOffset()), regT0);
- loadPtr(operandSlot, regT1);
- negPtr(regT1);
- store32(regT3, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
- store32(regT2, BaseIndex(regT0, regT1, TimesEight, (firstOutOfLineOffset - 2) * sizeof(EncodedJSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- break;
- }
- case GlobalVar:
- case GlobalVarWithVarInjectionChecks:
- case GlobalLexicalVar:
- case GlobalLexicalVarWithVarInjectionChecks: {
- JSScope* constantScope = JSScope::constantScopeForCodeBlock(resolveType, m_codeBlock);
- RELEASE_ASSERT(constantScope);
- emitWriteBarrier(constantScope, value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) {
- // We need to do a TDZ check here because we can't always prove we need to emit TDZ checks statically.
- if (indirectLoadForOperand)
- emitGetVarFromIndirectPointer(bitwise_cast<JSValue**>(operandSlot), regT1, regT0);
- else
- emitGetVarFromPointer(bitwise_cast<JSValue*>(*operandSlot), regT1, regT0);
- addSlowCase(branch32(Equal, regT1, TrustedImm32(JSValue::EmptyValueTag)));
- }
- if (indirectLoadForOperand)
- emitPutGlobalVariableIndirect(bitwise_cast<JSValue**>(operandSlot), value, bitwise_cast<WatchpointSet**>(&currentInstruction[5]));
- else
- emitPutGlobalVariable(bitwise_cast<JSValue*>(*operandSlot), value, currentInstruction[5].u.watchpointSet);
- break;
- }
- case LocalClosureVar:
- case ClosureVar:
- case ClosureVarWithVarInjectionChecks:
- emitWriteBarrier(scope, value, ShouldFilterValue);
- emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
- emitPutClosureVar(scope, *operandSlot, value, currentInstruction[5].u.watchpointSet);
- break;
- case ModuleVar:
- case Dynamic:
- addSlowCase(jump());
- break;
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks:
- RELEASE_ASSERT_NOT_REACHED();
- }
- };
switch (resolveType) {
- case UnresolvedProperty:
- case UnresolvedPropertyWithVarInjectionChecks: {
- JumpList skipToEnd;
- load32(&currentInstruction[4], regT0);
- and32(TrustedImm32(GetPutInfo::typeBits), regT0); // Load ResolveType into T0
-
- Jump isGlobalProperty = branch32(Equal, regT0, TrustedImm32(GlobalProperty));
- Jump notGlobalPropertyWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalPropertyWithVarInjectionChecks));
- isGlobalProperty.link(this);
- emitCode(GlobalProperty, false);
- skipToEnd.append(jump());
- notGlobalPropertyWithVarInjections.link(this);
-
- Jump notGlobalLexicalVar = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVar));
- emitCode(GlobalLexicalVar, true);
- skipToEnd.append(jump());
- notGlobalLexicalVar.link(this);
-
- Jump notGlobalLexicalVarWithVarInjections = branch32(NotEqual, regT0, TrustedImm32(GlobalLexicalVarWithVarInjectionChecks));
- emitCode(GlobalLexicalVarWithVarInjectionChecks, true);
- skipToEnd.append(jump());
- notGlobalLexicalVarWithVarInjections.link(this);
-
- addSlowCase(jump());
-
- skipToEnd.link(this);
+ case GlobalProperty:
+ case GlobalPropertyWithVarInjectionChecks:
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ emitLoadWithStructureCheck(scope, structureSlot); // Structure check covers var injection.
+ emitPutGlobalProperty(operandSlot, value);
break;
- }
-
- default:
- emitCode(resolveType, false);
+ case GlobalVar:
+ case GlobalVarWithVarInjectionChecks:
+ emitWriteBarrier(m_codeBlock->globalObject(), value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutGlobalVar(*operandSlot, value, currentInstruction[5].u.watchpointSet);
+ break;
+ case ClosureVar:
+ case ClosureVarWithVarInjectionChecks:
+ emitWriteBarrier(scope, value, ShouldFilterValue);
+ emitVarInjectionCheck(needsVarInjectionChecks(resolveType));
+ emitPutClosureVar(scope, *operandSlot, value);
+ break;
+ case Dynamic:
+ addSlowCase(jump());
break;
}
}
void JIT::emitSlow_op_put_to_scope(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter)
{
- GetPutInfo getPutInfo = GetPutInfo(currentInstruction[4].u.operand);
- ResolveType resolveType = getPutInfo.resolveType();
+ ResolveType resolveType = ResolveModeAndType(currentInstruction[4].u.operand).type();
unsigned linkCount = 0;
- if (resolveType != GlobalVar && resolveType != ClosureVar && resolveType != LocalClosureVar && resolveType != GlobalLexicalVar)
+ if (resolveType != GlobalVar && resolveType != ClosureVar)
linkCount++;
- if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks || resolveType == LocalClosureVar
- || resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)
+ if ((resolveType == GlobalVar || resolveType == GlobalVarWithVarInjectionChecks)
&& currentInstruction[5].u.watchpointSet->state() != IsInvalidated)
linkCount++;
- if (getPutInfo.initializationMode() != Initialization && (resolveType == GlobalLexicalVar || resolveType == GlobalLexicalVarWithVarInjectionChecks)) // TDZ check.
- linkCount++;
- if (resolveType == UnresolvedProperty || resolveType == UnresolvedPropertyWithVarInjectionChecks) {
- // GlobalProperty/GlobalPropertyWithVarInjectionsCheck
- linkCount++; // emitLoadWithStructureCheck
-
- // GlobalLexicalVar
- bool needsTDZCheck = getPutInfo.initializationMode() != Initialization;
- if (needsTDZCheck)
- linkCount++;
- linkCount++; // Notify write check.
-
- // GlobalLexicalVarWithVarInjectionsCheck
- linkCount++; // var injection check.
- if (needsTDZCheck)
- linkCount++;
- linkCount++; // Notify write check.
- }
if (!linkCount)
return;
while (linkCount--)
linkSlowCase(iter);
-
- if (resolveType == ModuleVar) {
- JITSlowPathCall slowPathCall(this, currentInstruction, slow_path_throw_strict_mode_readonly_property_write_error);
- slowPathCall.call();
- } else
- callOperation(operationPutToScope, currentInstruction);
+ callOperation(operationPutToScope, currentInstruction);
}
-void JIT::emit_op_get_from_arguments(Instruction* currentInstruction)
+void JIT::emit_op_init_global_const(Instruction* currentInstruction)
{
- int dst = currentInstruction[1].u.operand;
- int arguments = currentInstruction[2].u.operand;
- int index = currentInstruction[3].u.operand;
-
- emitLoadPayload(arguments, regT0);
- load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset), regT1);
- load32(Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset), regT0);
- emitValueProfilingSite();
- emitStore(dst, regT1, regT0);
-}
+ WriteBarrier<Unknown>* registerPointer = currentInstruction[1].u.registerPointer;
+ int value = currentInstruction[2].u.operand;
-void JIT::emit_op_put_to_arguments(Instruction* currentInstruction)
-{
- int arguments = currentInstruction[1].u.operand;
- int index = currentInstruction[2].u.operand;
- int value = currentInstruction[3].u.operand;
-
- emitWriteBarrier(arguments, value, ShouldFilterValue);
+ JSGlobalObject* globalObject = m_codeBlock->globalObject();
+
+ emitWriteBarrier(globalObject, value, ShouldFilterValue);
+
+ emitLoad(value, regT1, regT0);
- emitLoadPayload(arguments, regT0);
- emitLoad(value, regT1, regT2);
- store32(regT1, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + TagOffset));
- store32(regT2, Address(regT0, DirectArguments::storageOffset() + index * sizeof(WriteBarrier<Unknown>) + PayloadOffset));
+ store32(regT1, registerPointer->tagPointer());
+ store32(regT0, registerPointer->payloadPointer());
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp b/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp
deleted file mode 100644
index 4e75fafc0..000000000
--- a/Source/JavaScriptCore/jit/JITRightShiftGenerator.cpp
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITRightShiftGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITRightShiftGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(m_scratchGPR != InvalidGPRReg);
- ASSERT(m_scratchGPR != m_left.payloadGPR());
- ASSERT(m_scratchGPR != m_right.payloadGPR());
-#if USE(JSVALUE32_64)
- ASSERT(m_scratchGPR != m_left.tagGPR());
- ASSERT(m_scratchGPR != m_right.tagGPR());
- ASSERT(m_scratchFPR != InvalidFPRReg);
-#endif
-
- ASSERT(!m_leftOperand.isConstInt32() || !m_rightOperand.isConstInt32());
-
- m_didEmitFastPath = true;
-
- if (m_rightOperand.isConstInt32()) {
- // Try to do (intVar >> intConstant).
- CCallHelpers::Jump notInt = jit.branchIfNotInt32(m_left);
-
- jit.moveValueRegs(m_left, m_result);
- int32_t shiftAmount = m_rightOperand.asConstInt32() & 0x1f;
- if (shiftAmount) {
- if (m_shiftType == SignedShift)
- jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
- else
- jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_result.payloadGPR());
-#if USE(JSVALUE64)
- jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
-#endif
- }
-
- if (jit.supportsFloatingPointTruncate()) {
- m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.
-
- // Try to do (doubleVar >> intConstant).
- notInt.link(&jit);
-
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
-
- jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
- m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));
-
- if (shiftAmount) {
- if (m_shiftType == SignedShift)
- jit.rshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
- else
- jit.urshift32(CCallHelpers::Imm32(shiftAmount), m_scratchGPR);
- }
- jit.boxInt32(m_scratchGPR, m_result);
-
- } else
- m_slowPathJumpList.append(notInt);
-
- } else {
- // Try to do (intConstant >> intVar) or (intVar >> intVar).
- m_slowPathJumpList.append(jit.branchIfNotInt32(m_right));
-
- GPRReg rightOperandGPR = m_right.payloadGPR();
- if (rightOperandGPR == m_result.payloadGPR())
- rightOperandGPR = m_scratchGPR;
-
- CCallHelpers::Jump leftNotInt;
- if (m_leftOperand.isConstInt32()) {
- jit.move(m_right.payloadGPR(), rightOperandGPR);
-#if USE(JSVALUE32_64)
- jit.move(m_right.tagGPR(), m_result.tagGPR());
-#endif
- jit.move(CCallHelpers::Imm32(m_leftOperand.asConstInt32()), m_result.payloadGPR());
- } else {
- leftNotInt = jit.branchIfNotInt32(m_left);
- jit.move(m_right.payloadGPR(), rightOperandGPR);
- jit.moveValueRegs(m_left, m_result);
- }
-
- if (m_shiftType == SignedShift)
- jit.rshift32(rightOperandGPR, m_result.payloadGPR());
- else
- jit.urshift32(rightOperandGPR, m_result.payloadGPR());
-#if USE(JSVALUE64)
- jit.or64(GPRInfo::tagTypeNumberRegister, m_result.payloadGPR());
-#endif
- if (m_leftOperand.isConstInt32())
- return;
-
- if (jit.supportsFloatingPointTruncate()) {
- m_endJumpList.append(jit.jump()); // Terminate the above case before emitting more code.
-
- // Try to do (doubleVar >> intVar).
- leftNotInt.link(&jit);
-
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
- jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
- m_slowPathJumpList.append(jit.branchTruncateDoubleToInt32(m_leftFPR, m_scratchGPR));
-
- if (m_shiftType == SignedShift)
- jit.rshift32(m_right.payloadGPR(), m_scratchGPR);
- else
- jit.urshift32(m_right.payloadGPR(), m_scratchGPR);
- jit.boxInt32(m_scratchGPR, m_result);
-
- } else
- m_slowPathJumpList.append(leftNotInt);
- }
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITRightShiftGenerator.h b/Source/JavaScriptCore/jit/JITRightShiftGenerator.h
deleted file mode 100644
index a3676ec3c..000000000
--- a/Source/JavaScriptCore/jit/JITRightShiftGenerator.h
+++ /dev/null
@@ -1,63 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITRightShiftGenerator_h
-#define JITRightShiftGenerator_h
-
-#if ENABLE(JIT)
-
-#include "JITBitBinaryOpGenerator.h"
-
-namespace JSC {
-
-class JITRightShiftGenerator : public JITBitBinaryOpGenerator {
-public:
- enum ShiftType {
- SignedShift,
- UnsignedShift
- };
-
- JITRightShiftGenerator(const SnippetOperand& leftOperand, const SnippetOperand& rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right,
- FPRReg leftFPR, GPRReg scratchGPR, FPRReg scratchFPR, ShiftType type = SignedShift)
- : JITBitBinaryOpGenerator(leftOperand, rightOperand, result, left, right, scratchGPR)
- , m_shiftType(type)
- , m_leftFPR(leftFPR)
- , m_scratchFPR(scratchFPR)
- { }
-
- void generateFastPath(CCallHelpers&);
-
-private:
- ShiftType m_shiftType;
- FPRReg m_leftFPR;
- FPRReg m_scratchFPR;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITRightShiftGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.cpp b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
index 74e537747..28543a8b8 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.cpp
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -29,18 +29,13 @@
#if ENABLE(JIT)
#include "JSObject.h"
-#include "JSCInlines.h"
+
#include "SlotVisitor.h"
namespace JSC {
JITStubRoutine::~JITStubRoutine() { }
-bool JITStubRoutine::visitWeak(VM&)
-{
- return true;
-}
-
void JITStubRoutine::observeZeroRefCount()
{
RELEASE_ASSERT(!m_refCount);
diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h
index db9aaa770..020ef6907 100644
--- a/Source/JavaScriptCore/jit/JITStubRoutine.h
+++ b/Source/JavaScriptCore/jit/JITStubRoutine.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,6 +26,8 @@
#ifndef JITStubRoutine_h
#define JITStubRoutine_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "ExecutableAllocator.h"
@@ -59,14 +61,13 @@ public:
// Use this if you want to pass a CodePtr to someone who insists on taking
// a RefPtr<JITStubRoutine>.
- static Ref<JITStubRoutine> createSelfManagedRoutine(
+ static PassRefPtr<JITStubRoutine> createSelfManagedRoutine(
MacroAssemblerCodePtr rawCodePointer)
{
- return adoptRef(*new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer)));
+ return adoptRef(new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer)));
}
virtual ~JITStubRoutine();
- virtual void aboutToDie() { }
// MacroAssemblerCodeRef is copyable, but at the cost of reference
// counting churn. Returning a reference is a good way of reducing
@@ -140,11 +141,6 @@ public:
return true;
}
-
- // Return true if you are still valid after. Return false if you are now invalid. If you return
- // false, you will usually not do any clearing because the idea is that you will simply be
- // destroyed.
- virtual bool visitWeak(VM&);
protected:
virtual void observeZeroRefCount();
@@ -154,8 +150,11 @@ protected:
};
// Helper for the creation of simple stub routines that need no help from the GC.
-#define FINALIZE_CODE_FOR_STUB(codeBlock, patchBuffer, dataLogFArguments) \
- (adoptRef(new JITStubRoutine(FINALIZE_CODE_FOR((codeBlock), (patchBuffer), dataLogFArguments))))
+#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogFArguments) \
+ (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogFArguments))))
+
+#define FINALIZE_CODE_FOR_DFG_STUB(patchBuffer, dataLogFArguments) \
+ (adoptRef(new JITStubRoutine(FINALIZE_DFG_CODE((patchBuffer), dataLogFArguments))))
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp
new file mode 100644
index 000000000..47c509e3d
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubs.cpp
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "config.h"
+
+#if ENABLE(JIT)
+#include "JITStubs.h"
+
+#if CPU(ARM_TRADITIONAL)
+#include "JITStubsARM.h"
+#elif CPU(ARM_THUMB2)
+#include "JITStubsARMv7.h"
+#elif CPU(X86)
+#include "JITStubsX86.h"
+#elif CPU(X86_64)
+#include "JITStubsX86_64.h"
+#elif CPU(ARM64)
+// There isn't an ARM64 specific .h file
+#elif CPU(MIPS)
+// There isn't a MIPS specific .h file
+#elif CPU(SH4)
+// There isn't an SH4 specific .h file
+#else
+#error "JIT not supported on this platform."
+#endif
+
+#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h
new file mode 100644
index 000000000..24d95dfd4
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubs.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2008, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) Research In Motion Limited 2010. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubs_h
+#define JITStubs_h
+
+#include "JSCJSValue.h"
+
+namespace JSC {
+
+#if ENABLE(JIT)
+
+#if OS(WINDOWS)
+class ExecState;
+class Register;
+struct ProtoCallFrame;
+
+extern "C" {
+ EncodedJSValue callToJavaScript(void*, ExecState**, ProtoCallFrame*, Register*);
+ void returnFromJavaScript();
+ EncodedJSValue callToNativeFunction(void*, ExecState**, ProtoCallFrame*, Register*);
+}
+#endif
+
+#if USE(MASM_PROBE)
+extern "C" void ctiMasmProbeTrampoline();
+#endif
+
+#endif // ENABLE(JIT)
+
+} // namespace JSC
+
+#endif // JITStubs_h
diff --git a/Source/JavaScriptCore/jit/JITStubsARM.h b/Source/JavaScriptCore/jit/JITStubsARM.h
new file mode 100644
index 000000000..fd59188f4
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubsARM.h
@@ -0,0 +1,302 @@
+/*
+ * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubsARM_h
+#define JITStubsARM_h
+
+#if !CPU(ARM_TRADITIONAL)
+#error "JITStubsARM.h should only be #included if CPU(ARM_TRADITIONAL)"
+#endif
+
+#if !USE(JSVALUE32_64)
+#error "JITStubsARM.h only implements USE(JSVALUE32_64)"
+#endif
+
+namespace JSC {
+
+#if COMPILER(GCC)
+
+#if USE(MASM_PROBE)
+// The following are offsets for MacroAssembler::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+
+#define PTR_SIZE 4
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE)
+
+#define GPREG_SIZE 4
+#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
+
+#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
+
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssembler::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r70_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+#undef PROBE_OFFSETOF
+
+#endif // USE(MASM_PROBE)
+
+
+#if USE(MASM_PROBE)
+asm (
+".text" "\n"
+".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+INLINE_ARM_FUNCTION(ctiMasmProbeTrampoline) "\n"
+SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+ // MacroAssembler::probe() has already generated code to store some values.
+ // The top of stack now looks like this:
+ // esp[0 * ptrSize]: probeFunction
+ // esp[1 * ptrSize]: arg1
+ // esp[2 * ptrSize]: arg2
+ // esp[3 * ptrSize]: saved r3 / S0
+ // esp[4 * ptrSize]: saved ip
+ // esp[5 * ptrSize]: saved lr
+ // esp[6 * ptrSize]: saved sp
+
+ "mov ip, sp" "\n"
+ "mov r3, sp" "\n"
+ "sub r3, r3, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
+
+ // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+ "bic r3, r3, #0xf" "\n"
+ "mov sp, r3" "\n"
+
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "\n"
+ "stmia lr, { r0-r11 }" "\n"
+ "mrs lr, APSR" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+ "vmrs lr, FPSCR" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+
+ "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+ "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+ "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+ "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R3_OFFSET) "]" "\n"
+ "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
+ "vstmia.64 ip, { d0-d15 }" "\n"
+
+ "mov fp, sp" "\n" // Save the ProbeContext*.
+
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+ "mov r0, sp" "\n" // the ProbeContext* arg.
+ "blx ip" "\n"
+
+ "mov sp, fp" "\n"
+
+ // To enable probes to modify register state, we copy all registers
+ // out of the ProbeContext before returning.
+
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n"
+ "vldmdb.64 ip!, { d0-d15 }" "\n"
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
+ "ldmdb ip, { r0-r11 }" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+ "vmsr FPSCR, ip" "\n"
+
+ // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
+ // There are 2 issues that complicate the restoration of these last few
+ // registers:
+ //
+ // 1. Normal ARM calling convention relies on moving lr to pc to return to
+ // the caller. In our case, the address to return to is specified by
+ // ProbeContext.cpu.pc. And at that moment, we won't have any available
+ // scratch registers to hold the return address (lr needs to hold
+ // ProbeContext.cpu.lr, not the return address).
+ //
+ // The solution is to store the return address on the stack and load the
+ // pc from there.
+ //
+ // 2. Issue 1 means we will need to write to the stack location at
+ // ProbeContext.cpu.sp - 4. But if the user probe function had modified
+ // the value of ProbeContext.cpu.sp to point in the range between
+ // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
+ // Issue 1 may trash the values to be restored before we can restore
+ // them.
+ //
+ // The solution is to check if ProbeContext.cpu.sp contains a value in
+ // the undesirable range. If so, we copy the remaining ProbeContext
+ // register data to a safe range (at memory lower than where
+ // ProbeContext.cpu.sp points) first, and restore the remaining register
+ // from this new range.
+
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "cmp lr, ip" "\n"
+ "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+ // We get here because the new expected stack pointer location is lower
+ // than where it's supposed to be. This means the safe range of stack
+ // memory where we'll be copying the remaining register restore values to
+ // might be in a region of memory below the sp i.e. unallocated stack
+ // memory. This in turn makes it vulnerable to interrupts potentially
+ // trashing the copied values. To prevent that, we must first allocate the
+ // needed stack memory by adjusting the sp before the copying.
+
+ "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
+ " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
+
+ "mov ip, sp" "\n"
+ "mov sp, lr" "\n"
+ "mov lr, ip" "\n"
+
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+
+SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
+ "str ip, [lr]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+ "msr APSR, ip" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "mov lr, ip" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+ "pop { pc }" "\n"
+);
+#endif // USE(MASM_PROBE)
+
+
+
+#endif // COMPILER(GCC)
+
+} // namespace JSC
+
+#endif // JITStubsARM_h
diff --git a/Source/JavaScriptCore/jit/JITStubsARMv7.h b/Source/JavaScriptCore/jit/JITStubsARMv7.h
new file mode 100644
index 000000000..28bbf8a92
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubsARMv7.h
@@ -0,0 +1,351 @@
+/*
+ * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubsARMv7_h
+#define JITStubsARMv7_h
+
+#if !CPU(ARM_THUMB2)
+#error "JITStubsARMv7.h should only be #included if CPU(ARM_THUMB2)"
+#endif
+
+#if !USE(JSVALUE32_64)
+#error "JITStubsARMv7.h only implements USE(JSVALUE32_64)"
+#endif
+
+namespace JSC {
+
+#if COMPILER(GCC)
+
+#if USE(MASM_PROBE)
+// The following are offsets for MacroAssembler::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+
+#define PTR_SIZE 4
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_FIRST_GPREG_OFFSET (4 * PTR_SIZE)
+
+#define GPREG_SIZE 4
+#define PROBE_CPU_R0_OFFSET (PROBE_FIRST_GPREG_OFFSET + (0 * GPREG_SIZE))
+#define PROBE_CPU_R1_OFFSET (PROBE_FIRST_GPREG_OFFSET + (1 * GPREG_SIZE))
+#define PROBE_CPU_R2_OFFSET (PROBE_FIRST_GPREG_OFFSET + (2 * GPREG_SIZE))
+#define PROBE_CPU_R3_OFFSET (PROBE_FIRST_GPREG_OFFSET + (3 * GPREG_SIZE))
+#define PROBE_CPU_R4_OFFSET (PROBE_FIRST_GPREG_OFFSET + (4 * GPREG_SIZE))
+#define PROBE_CPU_R5_OFFSET (PROBE_FIRST_GPREG_OFFSET + (5 * GPREG_SIZE))
+#define PROBE_CPU_R6_OFFSET (PROBE_FIRST_GPREG_OFFSET + (6 * GPREG_SIZE))
+#define PROBE_CPU_R7_OFFSET (PROBE_FIRST_GPREG_OFFSET + (7 * GPREG_SIZE))
+#define PROBE_CPU_R8_OFFSET (PROBE_FIRST_GPREG_OFFSET + (8 * GPREG_SIZE))
+#define PROBE_CPU_R9_OFFSET (PROBE_FIRST_GPREG_OFFSET + (9 * GPREG_SIZE))
+#define PROBE_CPU_R10_OFFSET (PROBE_FIRST_GPREG_OFFSET + (10 * GPREG_SIZE))
+#define PROBE_CPU_R11_OFFSET (PROBE_FIRST_GPREG_OFFSET + (11 * GPREG_SIZE))
+#define PROBE_CPU_IP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (12 * GPREG_SIZE))
+#define PROBE_CPU_SP_OFFSET (PROBE_FIRST_GPREG_OFFSET + (13 * GPREG_SIZE))
+#define PROBE_CPU_LR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (14 * GPREG_SIZE))
+#define PROBE_CPU_PC_OFFSET (PROBE_FIRST_GPREG_OFFSET + (15 * GPREG_SIZE))
+
+#define PROBE_CPU_APSR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (16 * GPREG_SIZE))
+#define PROBE_CPU_FPSCR_OFFSET (PROBE_FIRST_GPREG_OFFSET + (17 * GPREG_SIZE))
+
+#define PROBE_FIRST_FPREG_OFFSET (PROBE_FIRST_GPREG_OFFSET + (18 * GPREG_SIZE))
+
+#define FPREG_SIZE 8
+#define PROBE_CPU_D0_OFFSET (PROBE_FIRST_FPREG_OFFSET + (0 * FPREG_SIZE))
+#define PROBE_CPU_D1_OFFSET (PROBE_FIRST_FPREG_OFFSET + (1 * FPREG_SIZE))
+#define PROBE_CPU_D2_OFFSET (PROBE_FIRST_FPREG_OFFSET + (2 * FPREG_SIZE))
+#define PROBE_CPU_D3_OFFSET (PROBE_FIRST_FPREG_OFFSET + (3 * FPREG_SIZE))
+#define PROBE_CPU_D4_OFFSET (PROBE_FIRST_FPREG_OFFSET + (4 * FPREG_SIZE))
+#define PROBE_CPU_D5_OFFSET (PROBE_FIRST_FPREG_OFFSET + (5 * FPREG_SIZE))
+#define PROBE_CPU_D6_OFFSET (PROBE_FIRST_FPREG_OFFSET + (6 * FPREG_SIZE))
+#define PROBE_CPU_D7_OFFSET (PROBE_FIRST_FPREG_OFFSET + (7 * FPREG_SIZE))
+#define PROBE_CPU_D8_OFFSET (PROBE_FIRST_FPREG_OFFSET + (8 * FPREG_SIZE))
+#define PROBE_CPU_D9_OFFSET (PROBE_FIRST_FPREG_OFFSET + (9 * FPREG_SIZE))
+#define PROBE_CPU_D10_OFFSET (PROBE_FIRST_FPREG_OFFSET + (10 * FPREG_SIZE))
+#define PROBE_CPU_D11_OFFSET (PROBE_FIRST_FPREG_OFFSET + (11 * FPREG_SIZE))
+#define PROBE_CPU_D12_OFFSET (PROBE_FIRST_FPREG_OFFSET + (12 * FPREG_SIZE))
+#define PROBE_CPU_D13_OFFSET (PROBE_FIRST_FPREG_OFFSET + (13 * FPREG_SIZE))
+#define PROBE_CPU_D14_OFFSET (PROBE_FIRST_FPREG_OFFSET + (14 * FPREG_SIZE))
+#define PROBE_CPU_D15_OFFSET (PROBE_FIRST_FPREG_OFFSET + (15 * FPREG_SIZE))
+
+#if CPU(APPLE_ARMV7S)
+#define PROBE_CPU_D16_OFFSET (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+#define PROBE_CPU_D17_OFFSET (PROBE_FIRST_FPREG_OFFSET + (17 * FPREG_SIZE))
+#define PROBE_CPU_D18_OFFSET (PROBE_FIRST_FPREG_OFFSET + (18 * FPREG_SIZE))
+#define PROBE_CPU_D19_OFFSET (PROBE_FIRST_FPREG_OFFSET + (19 * FPREG_SIZE))
+#define PROBE_CPU_D20_OFFSET (PROBE_FIRST_FPREG_OFFSET + (20 * FPREG_SIZE))
+#define PROBE_CPU_D21_OFFSET (PROBE_FIRST_FPREG_OFFSET + (21 * FPREG_SIZE))
+#define PROBE_CPU_D22_OFFSET (PROBE_FIRST_FPREG_OFFSET + (22 * FPREG_SIZE))
+#define PROBE_CPU_D23_OFFSET (PROBE_FIRST_FPREG_OFFSET + (23 * FPREG_SIZE))
+#define PROBE_CPU_D24_OFFSET (PROBE_FIRST_FPREG_OFFSET + (24 * FPREG_SIZE))
+#define PROBE_CPU_D25_OFFSET (PROBE_FIRST_FPREG_OFFSET + (25 * FPREG_SIZE))
+#define PROBE_CPU_D26_OFFSET (PROBE_FIRST_FPREG_OFFSET + (26 * FPREG_SIZE))
+#define PROBE_CPU_D27_OFFSET (PROBE_FIRST_FPREG_OFFSET + (27 * FPREG_SIZE))
+#define PROBE_CPU_D28_OFFSET (PROBE_FIRST_FPREG_OFFSET + (28 * FPREG_SIZE))
+#define PROBE_CPU_D29_OFFSET (PROBE_FIRST_FPREG_OFFSET + (29 * FPREG_SIZE))
+#define PROBE_CPU_D30_OFFSET (PROBE_FIRST_FPREG_OFFSET + (30 * FPREG_SIZE))
+#define PROBE_CPU_D31_OFFSET (PROBE_FIRST_FPREG_OFFSET + (31 * FPREG_SIZE))
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (32 * FPREG_SIZE))
+#else
+#define PROBE_SIZE (PROBE_FIRST_FPREG_OFFSET + (16 * FPREG_SIZE))
+#endif // CPU(APPLE_ARMV7S)
+
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssembler::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r0) == PROBE_CPU_R0_OFFSET, ProbeContext_cpu_r0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r1) == PROBE_CPU_R1_OFFSET, ProbeContext_cpu_r1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r2) == PROBE_CPU_R2_OFFSET, ProbeContext_cpu_r2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r3) == PROBE_CPU_R3_OFFSET, ProbeContext_cpu_r3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r4) == PROBE_CPU_R4_OFFSET, ProbeContext_cpu_r4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r5) == PROBE_CPU_R5_OFFSET, ProbeContext_cpu_r5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r6) == PROBE_CPU_R6_OFFSET, ProbeContext_cpu_r6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r7) == PROBE_CPU_R7_OFFSET, ProbeContext_cpu_r70_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ip) == PROBE_CPU_IP_OFFSET, ProbeContext_cpu_ip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.sp) == PROBE_CPU_SP_OFFSET, ProbeContext_cpu_sp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.lr) == PROBE_CPU_LR_OFFSET, ProbeContext_cpu_lr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.pc) == PROBE_CPU_PC_OFFSET, ProbeContext_cpu_pc_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.apsr) == PROBE_CPU_APSR_OFFSET, ProbeContext_cpu_apsr_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.fpscr) == PROBE_CPU_FPSCR_OFFSET, ProbeContext_cpu_fpscr_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d0) == PROBE_CPU_D0_OFFSET, ProbeContext_cpu_d0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d1) == PROBE_CPU_D1_OFFSET, ProbeContext_cpu_d1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d2) == PROBE_CPU_D2_OFFSET, ProbeContext_cpu_d2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d3) == PROBE_CPU_D3_OFFSET, ProbeContext_cpu_d3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d4) == PROBE_CPU_D4_OFFSET, ProbeContext_cpu_d4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d5) == PROBE_CPU_D5_OFFSET, ProbeContext_cpu_d5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d6) == PROBE_CPU_D6_OFFSET, ProbeContext_cpu_d6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d7) == PROBE_CPU_D7_OFFSET, ProbeContext_cpu_d7_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d8) == PROBE_CPU_D8_OFFSET, ProbeContext_cpu_d8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d9) == PROBE_CPU_D9_OFFSET, ProbeContext_cpu_d9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d10) == PROBE_CPU_D10_OFFSET, ProbeContext_cpu_d10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d11) == PROBE_CPU_D11_OFFSET, ProbeContext_cpu_d11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d12) == PROBE_CPU_D12_OFFSET, ProbeContext_cpu_d12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d13) == PROBE_CPU_D13_OFFSET, ProbeContext_cpu_d13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d14) == PROBE_CPU_D14_OFFSET, ProbeContext_cpu_d14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d15) == PROBE_CPU_D15_OFFSET, ProbeContext_cpu_d15_offset_matches_ctiMasmProbeTrampoline);
+
+#if CPU(APPLE_ARMV7S)
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d16) == PROBE_CPU_D16_OFFSET, ProbeContext_cpu_d16_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d17) == PROBE_CPU_D17_OFFSET, ProbeContext_cpu_d17_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d18) == PROBE_CPU_D18_OFFSET, ProbeContext_cpu_d18_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d19) == PROBE_CPU_D19_OFFSET, ProbeContext_cpu_d19_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d20) == PROBE_CPU_D20_OFFSET, ProbeContext_cpu_d20_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d21) == PROBE_CPU_D21_OFFSET, ProbeContext_cpu_d21_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d22) == PROBE_CPU_D22_OFFSET, ProbeContext_cpu_d22_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d23) == PROBE_CPU_D23_OFFSET, ProbeContext_cpu_d23_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d24) == PROBE_CPU_D24_OFFSET, ProbeContext_cpu_d24_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d25) == PROBE_CPU_D25_OFFSET, ProbeContext_cpu_d25_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d26) == PROBE_CPU_D26_OFFSET, ProbeContext_cpu_d26_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d27) == PROBE_CPU_D27_OFFSET, ProbeContext_cpu_d27_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d28) == PROBE_CPU_D28_OFFSET, ProbeContext_cpu_d28_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d29) == PROBE_CPU_D29_OFFSET, ProbeContext_cpu_d29_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d30) == PROBE_CPU_D30_OFFSET, ProbeContext_cpu_d30_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.d31) == PROBE_CPU_D31_OFFSET, ProbeContext_cpu_d31_offset_matches_ctiMasmProbeTrampoline);
+#endif // CPU(APPLE_ARMV7S)
+
+COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+#undef PROBE_OFFSETOF
+
+asm (
+".text" "\n"
+".align 2" "\n"
+".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+".thumb" "\n"
+".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampoline) "\n"
+SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+ // MacroAssembler::probe() has already generated code to store some values.
+ // The top of stack now looks like this:
+ // esp[0 * ptrSize]: probeFunction
+ // esp[1 * ptrSize]: arg1
+ // esp[2 * ptrSize]: arg2
+ // esp[3 * ptrSize]: saved r0
+ // esp[4 * ptrSize]: saved ip
+ // esp[5 * ptrSize]: saved lr
+ // esp[6 * ptrSize]: saved sp
+
+ "mov ip, sp" "\n"
+ "mov r0, sp" "\n"
+ "sub r0, r0, #" STRINGIZE_VALUE_OF(PROBE_SIZE) "\n"
+
+ // The ARM EABI specifies that the stack needs to be 16 byte aligned.
+ "bic r0, r0, #0xf" "\n"
+ "mov sp, r0" "\n"
+
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "add lr, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R1_OFFSET) "\n"
+ "stmia lr, { r1-r11 }" "\n"
+ "mrs lr, APSR" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+ "vmrs lr, FPSCR" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+
+ "ldr lr, [ip, #0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+ "ldr lr, [ip, #1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "]" "\n"
+ "ldr lr, [ip, #2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "]" "\n"
+ "ldr lr, [ip, #3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R0_OFFSET) "]" "\n"
+ "ldr lr, [ip, #4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "ldr lr, [ip, #5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "ldr lr, [ip, #6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D0_OFFSET) "\n"
+#if CPU(APPLE_ARMV7S)
+ "vstmia.64 ip, { d0-d31 }" "\n"
+#else
+ "vstmia.64 ip, { d0-d15 }" "\n"
+#endif
+
+ "mov fp, sp" "\n" // Save the ProbeContext*.
+
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "]" "\n"
+ "mov r0, sp" "\n" // the ProbeContext* arg.
+ "blx ip" "\n"
+
+ "mov sp, fp" "\n"
+
+ // To enable probes to modify register state, we copy all registers
+ // out of the ProbeContext before returning.
+
+#if CPU(APPLE_ARMV7S)
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D31_OFFSET + FPREG_SIZE) "\n"
+ "vldmdb.64 ip!, { d0-d31 }" "\n"
+#else
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_D15_OFFSET + FPREG_SIZE) "\n"
+ "vldmdb.64 ip!, { d0-d15 }" "\n"
+#endif
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET + GPREG_SIZE) "\n"
+ "ldmdb ip, { r0-r11 }" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_FPSCR_OFFSET) "]" "\n"
+ "vmsr FPSCR, ip" "\n"
+
+ // There are 5 more registers left to restore: ip, sp, lr, pc, and apsr.
+ // There are 2 issues that complicate the restoration of these last few
+ // registers:
+ //
+ // 1. Normal ARM calling convention relies on moving lr to pc to return to
+ // the caller. In our case, the address to return to is specified by
+ // ProbeContext.cpu.pc. And at that moment, we won't have any available
+ // scratch registers to hold the return address (lr needs to hold
+ // ProbeContext.cpu.lr, not the return address).
+ //
+ // The solution is to store the return address on the stack and load the
+ // pc from there.
+ //
+ // 2. Issue 1 means we will need to write to the stack location at
+ // ProbeContext.cpu.sp - 4. But if the user probe function had modified
+ // the value of ProbeContext.cpu.sp to point in the range between
+ // &ProbeContext.cpu.ip thru &ProbeContext.cpu.aspr, then the action for
+ // Issue 1 may trash the values to be restored before we can restore
+ // them.
+ //
+ // The solution is to check if ProbeContext.cpu.sp contains a value in
+ // the undesirable range. If so, we copy the remaining ProbeContext
+ // register data to a safe range (at memory lower than where
+ // ProbeContext.cpu.sp points) first, and restore the remaining register
+ // from this new range.
+
+ "add ip, sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "cmp lr, ip" "\n"
+ "it gt" "\n"
+ "bgt " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+ // We get here because the new expected stack pointer location is lower
+ // than where it's supposed to be. This means the safe range of stack
+ // memory where we'll be copying the remaining register restore values to
+ // might be in a region of memory below the sp i.e. unallocated stack
+ // memory. This, in turn, makes it vulnerable to interrupts potentially
+ // trashing the copied values. To prevent that, we must first allocate the
+ // needed stack memory by adjusting the sp before the copying.
+
+ "sub lr, lr, #(6 * " STRINGIZE_VALUE_OF(PTR_SIZE)
+ " + " STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) ")" "\n"
+
+ "mov ip, sp" "\n"
+ "mov sp, lr" "\n"
+ "mov lr, ip" "\n"
+
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "ldr ip, [lr, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+ "str ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+
+".thumb_func " THUMB_FUNC_PARAM(ctiMasmProbeTrampolineEnd) "\n"
+SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_PC_OFFSET) "]" "\n"
+ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+ "sub lr, lr, #" STRINGIZE_VALUE_OF(PTR_SIZE) "\n"
+ "str ip, [lr]" "\n"
+ "str lr, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_APSR_OFFSET) "]" "\n"
+ "msr APSR, ip" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_LR_OFFSET) "]" "\n"
+ "mov lr, ip" "\n"
+ "ldr ip, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_IP_OFFSET) "]" "\n"
+ "ldr sp, [sp, #" STRINGIZE_VALUE_OF(PROBE_CPU_SP_OFFSET) "]" "\n"
+
+ "pop { pc }" "\n"
+);
+#endif // USE(MASM_PROBE)
+
+#endif // COMPILER(GCC)
+
+} // namespace JSC
+
+#endif // JITStubsARMv7_h
diff --git a/Source/JavaScriptCore/jit/JITStubsMSVC64.asm b/Source/JavaScriptCore/jit/JITStubsMSVC64.asm
deleted file mode 100644
index ef9cd4e0e..000000000
--- a/Source/JavaScriptCore/jit/JITStubsMSVC64.asm
+++ /dev/null
@@ -1,44 +0,0 @@
-;/*
-; Copyright (C) 2014 Apple Inc. All rights reserved.
-;
-; Redistribution and use in source and binary forms, with or without
-; modification, are permitted provided that the following conditions
-; are met:
-; 1. Redistributions of source code must retain the above copyright
-; notice, this list of conditions and the following disclaimer.
-; 2. Redistributions in binary form must reproduce the above copyright
-; notice, this list of conditions and the following disclaimer in the
-; documentation and/or other materials provided with the distribution.
-;
-; THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
-; EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
-; IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
-; PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
-; CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
-; EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
-; PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
-; PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
-; OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-; (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-; OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
-;*/
-
-EXTERN getHostCallReturnValueWithExecState : near
-
-PUBLIC getHostCallReturnValue
-
-_TEXT SEGMENT
-
-getHostCallReturnValue PROC
- lea rcx, [rsp - 8]
- ; Allocate space for all 4 parameter registers, and align stack pointer to 16 bytes boundary by allocating another 8 bytes.
- ; The stack alignment is needed to fix a crash in the CRT library on a floating point instruction.
- sub rsp, 40
- call getHostCallReturnValueWithExecState
- add rsp, 40
- ret
-getHostCallReturnValue ENDP
-
-_TEXT ENDS
-
-END
diff --git a/Source/JavaScriptCore/jit/JITStubsX86.h b/Source/JavaScriptCore/jit/JITStubsX86.h
new file mode 100644
index 000000000..7a26a5afa
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubsX86.h
@@ -0,0 +1,649 @@
+/*
+ * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubsX86_h
+#define JITStubsX86_h
+
+#include "JITStubsX86Common.h"
+#include <wtf/InlineASM.h>
+
+#if !CPU(X86)
+#error "JITStubsX86.h should only be #included if CPU(X86)"
+#endif
+
+#if !USE(JSVALUE32_64)
+#error "JITStubsX86.h only implements USE(JSVALUE32_64)"
+#endif
+
+namespace JSC {
+
+#if COMPILER(GCC)
+
+#if USE(MASM_PROBE)
+asm (
+".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+ "pushfd" "\n"
+
+ // MacroAssembler::probe() has already generated code to store some values.
+ // Together with the eflags pushed above, the top of stack now looks like
+ // this:
+ // esp[0 * ptrSize]: eflags
+ // esp[1 * ptrSize]: return address / saved eip
+ // esp[2 * ptrSize]: probeFunction
+ // esp[3 * ptrSize]: arg1
+ // esp[4 * ptrSize]: arg2
+ // esp[5 * ptrSize]: saved eax
+ // esp[6 * ptrSize]: saved esp
+
+ "movl %esp, %eax" "\n"
+ "subl $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %esp" "\n"
+
+ // The X86_64 ABI specifies that the worse case stack alignment requirement
+ // is 32 bytes.
+ "andl $~0x1f, %esp" "\n"
+
+ "movl %ebp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%esp)" "\n"
+ "movl %esp, %ebp" "\n" // Save the ProbeContext*.
+
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp)" "\n"
+ "movl %edx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp)" "\n"
+ "movl %ebx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp)" "\n"
+ "movl %esi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp)" "\n"
+ "movl %edi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp)" "\n"
+
+ "movl 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp)" "\n"
+ "movl 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp)" "\n"
+ "movl 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
+ "movl 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%ebp)" "\n"
+ "movl 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%ebp)" "\n"
+ "movl 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp)" "\n"
+ "movl 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
+
+ "movdqa %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp)" "\n"
+ "movdqa %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp)" "\n"
+ "movdqa %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp)" "\n"
+ "movdqa %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp)" "\n"
+ "movdqa %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp)" "\n"
+ "movdqa %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp)" "\n"
+ "movdqa %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp)" "\n"
+ "movdqa %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp)" "\n"
+
+ // Reserve stack space for the arg while maintaining the required stack
+ // pointer 32 byte alignment:
+ "subl $0x20, %esp" "\n"
+ "movl %ebp, 0(%esp)" "\n" // the ProbeContext* arg.
+
+ "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%ebp)" "\n"
+
+ // To enable probes to modify register state, we copy all registers
+ // out of the ProbeContext before returning.
+
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%ebp), %edx" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%ebp), %ebx" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%ebp), %esi" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%ebp), %edi" "\n"
+
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%ebp), %xmm0" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%ebp), %xmm1" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%ebp), %xmm2" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%ebp), %xmm3" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%ebp), %xmm4" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%ebp), %xmm5" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%ebp), %xmm6" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%ebp), %xmm7" "\n"
+
+ // There are 6 more registers left to restore:
+ // eax, ecx, ebp, esp, eip, and eflags.
+ // We need to handle these last few restores carefully because:
+ //
+ // 1. We need to push the return address on the stack for ret to use.
+ // That means we need to write to the stack.
+ // 2. The user probe function may have altered the restore value of esp to
+ // point to the vicinity of one of the restore values for the remaining
+ // registers left to be restored.
+ // That means, for requirement 1, we may end up writing over some of the
+ // restore values. We can check for this, and first copy the restore
+ // values to a "safe area" on the stack before commencing with the action
+ // for requirement 1.
+ // 3. For requirement 2, we need to ensure that the "safe area" is
+ // protected from interrupt handlers overwriting it. Hence, the esp needs
+ // to be adjusted to include the "safe area" before we start copying the
+ // the restore values.
+
+ "movl %ebp, %eax" "\n"
+ "addl $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %eax" "\n"
+ "cmpl %eax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp)" "\n"
+ "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+ // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
+ // rsp will be. This time we don't have to 32-byte align it because we're
+ // not using to store any xmm regs.
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
+ "subl $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %eax" "\n"
+ "movl %eax, %esp" "\n"
+
+ "subl $" STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) ", %eax" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%eax)" "\n"
+ "movl %eax, %ebp" "\n"
+
+SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%ebp), %eax" "\n"
+ "subl $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %eax" "\n"
+ // At this point, %esp should be < %eax.
+
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+ "movl " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%ebp), %ecx" "\n"
+ "movl %ecx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%eax)" "\n"
+ "movl %eax, %esp" "\n"
+
+ "popfd" "\n"
+ "popl %eax" "\n"
+ "popl %ecx" "\n"
+ "popl %ebp" "\n"
+ "ret" "\n"
+);
+#endif // USE(MASM_PROBE)
+
+#if OS(WINDOWS)
+extern "C" {
+
+ // FIXME: Since Windows doesn't use the LLInt, we have inline stubs here.
+ // Until the LLInt is changed to support Windows, these stub needs to be updated.
+ asm (
+ ".globl " SYMBOL_STRING(callToJavaScript) "\n"
+ HIDE_SYMBOL(callToJavaScript) "\n"
+ SYMBOL_STRING(callToJavaScript) ":" "\n"
+ "mov (%esp),%edx" "\n"
+ "push %ebp" "\n"
+ "mov %ebp,%eax" "\n"
+ "mov %esp,%ebp" "\n"
+ "push %esi" "\n"
+ "push %edi" "\n"
+ "push %ebx" "\n"
+ "sub $0x1c,%esp" "\n"
+ "mov 0x34(%esp),%ecx" "\n"
+ "mov 0x38(%esp),%esi" "\n"
+ "mov 0x3c(%esp),%ebp" "\n"
+ "sub $0x20,%ebp" "\n"
+ "movl $0x0,0x24(%ebp)" "\n"
+ "movl $0x0,0x20(%ebp)" "\n"
+ "movl $0x0,0x1c(%ebp)" "\n"
+ "mov %ecx,0x18(%ebp)" "\n"
+ "mov (%ecx),%ebx" "\n"
+ "movl $0x0,0x14(%ebp)" "\n"
+ "mov %ebx,0x10(%ebp)" "\n"
+ "movl $0x0,0xc(%ebp)" "\n"
+ "movl $0x1,0x8(%ebp)" "\n"
+ "mov %edx,0x4(%ebp)" "\n"
+ "mov %eax,0x0(%ebp)" "\n"
+ "mov %ebp,%eax" "\n"
+
+ "mov 0x28(%esi),%edx" "\n"
+ "add $0x5,%edx" "\n"
+ "shl $0x3,%edx" "\n"
+ "sub %edx,%ebp" "\n"
+ "mov %eax,0x0(%ebp)" "\n"
+
+ "mov $0x5,%eax" "\n"
+
+ ".copyHeaderLoop:" "\n"
+ "sub $0x1,%eax" "\n"
+ "mov (%esi,%eax,8),%ecx" "\n"
+ "mov %ecx,0x8(%ebp,%eax,8)" "\n"
+ "mov 0x4(%esi,%eax,8),%ecx" "\n"
+ "mov %ecx,0xc(%ebp,%eax,8)" "\n"
+ "test %eax,%eax" "\n"
+ "jne .copyHeaderLoop" "\n"
+
+ "mov 0x18(%esi),%edx" "\n"
+ "sub $0x1,%edx" "\n"
+ "mov 0x28(%esi),%ecx" "\n"
+ "sub $0x1,%ecx" "\n"
+
+ "cmp %ecx,%edx" "\n"
+ "je .copyArgs" "\n"
+
+ "xor %eax,%eax" "\n"
+ "mov $0xfffffffc,%ebx" "\n"
+
+ ".fillExtraArgsLoop:" "\n"
+ "sub $0x1,%ecx" "\n"
+ "mov %eax,0x30(%ebp,%ecx,8)" "\n"
+ "mov %ebx,0x34(%ebp,%ecx,8)" "\n"
+ "cmp %ecx,%edx" "\n"
+ "jne .fillExtraArgsLoop" "\n"
+
+ ".copyArgs:" "\n"
+ "mov 0x2c(%esi),%eax" "\n"
+
+ ".copyArgsLoop:" "\n"
+ "test %edx,%edx" "\n"
+ "je .copyArgsDone" "\n"
+ "sub $0x1,%edx" "\n"
+ "mov (%eax,%edx,8),%ecx" "\n"
+ "mov 0x4(%eax,%edx,8),%ebx" "\n"
+ "mov %ecx,0x30(%ebp,%edx,8)" "\n"
+ "mov %ebx,0x34(%ebp,%edx,8)" "\n"
+ "jmp .copyArgsLoop" "\n"
+
+ ".copyArgsDone:" "\n"
+ "mov 0x34(%esp),%ecx" "\n"
+ "mov %ebp,(%ecx)" "\n"
+
+ "call *0x30(%esp)" "\n"
+
+ "cmpl $0x1,0x8(%ebp)" "\n"
+ "je .calleeFramePopped" "\n"
+ "mov 0x0(%ebp),%ebp" "\n"
+
+ ".calleeFramePopped:" "\n"
+ "mov 0x18(%ebp),%ecx" "\n"
+ "mov 0x10(%ebp),%ebx" "\n"
+ "mov %ebx,(%ecx)" "\n"
+
+ "add $0x1c,%esp" "\n"
+ "pop %ebx" "\n"
+ "pop %edi" "\n"
+ "pop %esi" "\n"
+ "pop %ebp" "\n"
+ "ret" "\n"
+
+ ".globl " SYMBOL_STRING(returnFromJavaScript) "\n"
+ HIDE_SYMBOL(returnFromJavaScript) "\n"
+ SYMBOL_STRING(returnFromJavaScript) ":" "\n"
+ "add $0x1c,%esp" "\n"
+ "pop %ebx" "\n"
+ "pop %edi" "\n"
+ "pop %esi" "\n"
+ "pop %ebp" "\n"
+ "ret" "\n"
+
+ ".globl " SYMBOL_STRING(callToNativeFunction) "\n"
+ HIDE_SYMBOL(callToNativeFunction) "\n"
+ SYMBOL_STRING(callToNativeFunction) ":" "\n"
+ "mov (%esp),%edx" "\n"
+ "push %ebp" "\n"
+ "mov %ebp,%eax" "\n"
+ "mov %esp,%ebp" "\n"
+ "push %esi" "\n"
+ "push %edi" "\n"
+ "push %ebx" "\n"
+ "sub $0x1c,%esp" "\n"
+ "mov 0x34(%esp),%ecx" "\n"
+ "mov 0x38(%esp),%esi" "\n"
+ "mov 0x3c(%esp),%ebp" "\n"
+ "sub $0x20,%ebp" "\n"
+ "movl $0x0,0x24(%ebp)" "\n"
+ "movl $0x0,0x20(%ebp)" "\n"
+ "movl $0x0,0x1c(%ebp)" "\n"
+ "mov %ecx,0x18(%ebp)" "\n"
+ "mov (%ecx),%ebx" "\n"
+ "movl $0x0,0x14(%ebp)" "\n"
+ "mov %ebx,0x10(%ebp)" "\n"
+ "movl $0x0,0xc(%ebp)" "\n"
+ "movl $0x1,0x8(%ebp)" "\n"
+ "mov %edx,0x4(%ebp)" "\n"
+ "mov %eax,0x0(%ebp)" "\n"
+ "mov %ebp,%eax" "\n"
+
+ "mov 0x28(%esi),%edx" "\n"
+ "add $0x5,%edx" "\n"
+ "shl $0x3,%edx" "\n"
+ "sub %edx,%ebp" "\n"
+ "mov %eax,0x0(%ebp)" "\n"
+
+ "mov $0x5,%eax" "\n"
+
+ "copyHeaderLoop:" "\n"
+ "sub $0x1,%eax" "\n"
+ "mov (%esi,%eax,8),%ecx" "\n"
+ "mov %ecx,0x8(%ebp,%eax,8)" "\n"
+ "mov 0x4(%esi,%eax,8),%ecx" "\n"
+ "mov %ecx,0xc(%ebp,%eax,8)" "\n"
+ "test %eax,%eax" "\n"
+ "jne copyHeaderLoop" "\n"
+
+ "mov 0x18(%esi),%edx" "\n"
+ "sub $0x1,%edx" "\n"
+ "mov 0x28(%esi),%ecx" "\n"
+ "sub $0x1,%ecx" "\n"
+
+ "cmp %ecx,%edx" "\n"
+ "je copyArgs" "\n"
+
+ "xor %eax,%eax" "\n"
+ "mov $0xfffffffc,%ebx" "\n"
+
+ "fillExtraArgsLoop:" "\n"
+ "sub $0x1,%ecx" "\n"
+ "mov %eax,0x30(%ebp,%ecx,8)" "\n"
+ "mov %ebx,0x34(%ebp,%ecx,8)" "\n"
+ "cmp %ecx,%edx" "\n"
+ "jne fillExtraArgsLoop" "\n"
+
+ "copyArgs:" "\n"
+ "mov 0x2c(%esi),%eax" "\n"
+
+ "copyArgsLoop:" "\n"
+ "test %edx,%edx" "\n"
+ "je copyArgsDone" "\n"
+ "sub $0x1,%edx" "\n"
+ "mov (%eax,%edx,8),%ecx" "\n"
+ "mov 0x4(%eax,%edx,8),%ebx" "\n"
+ "mov %ecx,0x30(%ebp,%edx,8)" "\n"
+ "mov %ebx,0x34(%ebp,%edx,8)" "\n"
+ "jmp copyArgsLoop" "\n"
+
+ "copyArgsDone:" "\n"
+ "mov 0x34(%esp),%ecx" "\n"
+ "mov %ebp,(%ecx)" "\n"
+
+ "mov 0x30(%esp),%edi" "\n"
+ "mov %ebp,0x30(%esp)" "\n"
+ "mov %ebp,%ecx" "\n"
+ "call *%edi" "\n"
+
+ "cmpl $0x1,0x8(%ebp)" "\n"
+ "je calleeFramePopped" "\n"
+ "mov 0x0(%ebp),%ebp" "\n"
+
+ "calleeFramePopped:" "\n"
+ "mov 0x18(%ebp),%ecx" "\n"
+ "mov 0x10(%ebp),%ebx" "\n"
+ "mov %ebx,(%ecx)" "\n"
+
+ "add $0x1c,%esp" "\n"
+ "pop %ebx" "\n"
+ "pop %edi" "\n"
+ "pop %esi" "\n"
+ "pop %ebp" "\n"
+ "ret" "\n"
+ );
+}
+
+#endif // OS(WINDOWS)
+
+#endif // COMPILER(GCC)
+
+#if COMPILER(MSVC)
+
+extern "C" {
+
+ // FIXME: Since Windows doesn't use the LLInt, we have inline stubs here.
+ // Until the LLInt is changed to support Windows, these stub needs to be updated.
+ __declspec(naked) EncodedJSValue callToJavaScript(void* code, ExecState**, ProtoCallFrame*, Register*)
+ {
+ __asm {
+ mov edx, [esp]
+ push ebp;
+ mov eax, ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x1c;
+ mov ecx, dword ptr[esp + 0x34];
+ mov esi, dword ptr[esp + 0x38];
+ mov ebp, dword ptr[esp + 0x3c];
+ sub ebp, 0x20;
+ mov dword ptr[ebp + 0x24], 0;
+ mov dword ptr[ebp + 0x20], 0;
+ mov dword ptr[ebp + 0x1c], 0;
+ mov dword ptr[ebp + 0x18], ecx;
+ mov ebx, [ecx];
+ mov dword ptr[ebp + 0x14], 0;
+ mov dword ptr[ebp + 0x10], ebx;
+ mov dword ptr[ebp + 0xc], 0;
+ mov dword ptr[ebp + 0x8], 1;
+ mov dword ptr[ebp + 0x4], edx;
+ mov dword ptr[ebp], eax;
+ mov eax, ebp;
+
+ mov edx, dword ptr[esi + 0x28];
+ add edx, 5;
+ sal edx, 3;
+ sub ebp, edx;
+ mov dword ptr[ebp], eax;
+
+ mov eax, 5;
+
+ copyHeaderLoop:
+ sub eax, 1;
+ mov ecx, dword ptr[esi + eax * 8];
+ mov dword ptr 8[ebp + eax * 8], ecx;
+ mov ecx, dword ptr 4[esi + eax * 8];
+ mov dword ptr 12[ebp + eax * 8], ecx;
+ test eax, eax;
+ jnz copyHeaderLoop;
+
+ mov edx, dword ptr[esi + 0x18];
+ sub edx, 1;
+ mov ecx, dword ptr[esi + 0x28];
+ sub ecx, 1;
+
+ cmp edx, ecx;
+ je copyArgs;
+
+ xor eax, eax;
+ mov ebx, -4;
+
+ fillExtraArgsLoop:
+ sub ecx, 1;
+ mov dword ptr 0x30[ebp + ecx * 8], eax;
+ mov dword ptr 0x34[ebp + ecx * 8], ebx;
+ cmp edx, ecx;
+ jne fillExtraArgsLoop;
+
+ copyArgs:
+ mov eax, dword ptr[esi + 0x2c];
+
+ copyArgsLoop:
+ test edx, edx;
+ jz copyArgsDone;
+ sub edx, 1;
+ mov ecx, dword ptr 0[eax + edx * 8];
+ mov ebx, dword ptr 4[eax + edx * 8];
+ mov dword ptr 0x30[ebp + edx * 8], ecx;
+ mov dword ptr 0x34[ebp + edx * 8], ebx;
+ jmp copyArgsLoop;
+
+ copyArgsDone:
+ mov ecx, dword ptr[esp + 0x34];
+ mov dword ptr[ecx], ebp;
+
+ call dword ptr[esp + 0x30];
+
+ cmp dword ptr[ebp + 8], 1;
+ je calleeFramePopped;
+ mov ebp, dword ptr[ebp];
+
+ calleeFramePopped:
+ mov ecx, dword ptr[ebp + 0x18];
+ mov ebx, dword ptr[ebp + 0x10];
+ mov dword ptr[ecx], ebx;
+
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) void returnFromJavaScript()
+ {
+ __asm {
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+
+ __declspec(naked) EncodedJSValue callToNativeFunction(void* code, ExecState**, ProtoCallFrame*, Register*)
+ {
+ __asm {
+ mov edx, [esp]
+ push ebp;
+ mov eax, ebp;
+ mov ebp, esp;
+ push esi;
+ push edi;
+ push ebx;
+ sub esp, 0x1c;
+ mov ecx, [esp + 0x34];
+ mov esi, [esp + 0x38];
+ mov ebp, [esp + 0x3c];
+ sub ebp, 0x20;
+ mov dword ptr[ebp + 0x24], 0;
+ mov dword ptr[ebp + 0x20], 0;
+ mov dword ptr[ebp + 0x1c], 0;
+ mov dword ptr[ebp + 0x18], ecx;
+ mov ebx, [ecx];
+ mov dword ptr[ebp + 0x14], 0;
+ mov dword ptr[ebp + 0x10], ebx;
+ mov dword ptr[ebp + 0xc], 0;
+ mov dword ptr[ebp + 0x8], 1;
+ mov dword ptr[ebp + 0x4], edx;
+ mov dword ptr[ebp], eax;
+ mov eax, ebp;
+
+ mov edx, dword ptr[esi + 0x28];
+ add edx, 5;
+ sal edx, 3;
+ sub ebp, edx;
+ mov dword ptr[ebp], eax;
+
+ mov eax, 5;
+
+ copyHeaderLoop:
+ sub eax, 1;
+ mov ecx, dword ptr[esi + eax * 8];
+ mov dword ptr 8[ebp + eax * 8], ecx;
+ mov ecx, dword ptr 4[esi + eax * 8];
+ mov dword ptr 12[ebp + eax * 8], ecx;
+ test eax, eax;
+ jnz copyHeaderLoop;
+
+ mov edx, dword ptr[esi + 0x18];
+ sub edx, 1;
+ mov ecx, dword ptr[esi + 0x28];
+ sub ecx, 1;
+
+ cmp edx, ecx;
+ je copyArgs;
+
+ xor eax, eax;
+ mov ebx, -4;
+
+ fillExtraArgsLoop:
+ sub ecx, 1;
+ mov dword ptr 0x30[ebp + ecx * 8], eax;
+ mov dword ptr 0x34[ebp + ecx * 8], ebx;
+ cmp edx, ecx;
+ jne fillExtraArgsLoop;
+
+ copyArgs:
+ mov eax, dword ptr[esi + 0x2c];
+
+ copyArgsLoop:
+ test edx, edx;
+ jz copyArgsDone;
+ sub edx, 1;
+ mov ecx, dword ptr 0[eax + edx * 8];
+ mov ebx, dword ptr 4[eax + edx * 8];
+ mov dword ptr 0x30[ebp + edx * 8], ecx;
+ mov dword ptr 0x34[ebp + edx * 8], ebx;
+ jmp copyArgsLoop;
+
+ copyArgsDone:
+ mov ecx, dword ptr[esp + 0x34];
+ mov dword ptr[ecx], ebp;
+
+ mov edi, dword ptr[esp + 0x30];
+ mov dword ptr[esp + 0x30], ebp;
+ mov ecx, ebp;
+ call edi;
+
+ cmp dword ptr[ebp + 8], 1;
+ je calleeFramePopped;
+ mov ebp, dword ptr[ebp];
+
+ calleeFramePopped:
+ mov ecx, dword ptr[ebp + 0x18];
+ mov ebx, dword ptr[ebp + 0x10];
+ mov dword ptr[ecx], ebx;
+
+ add esp, 0x1c;
+ pop ebx;
+ pop edi;
+ pop esi;
+ pop ebp;
+ ret;
+ }
+ }
+}
+
+#endif // COMPILER(MSVC)
+
+} // namespace JSC
+
+#endif // JITStubsX86_h
diff --git a/Source/JavaScriptCore/jit/JITStubsX86Common.h b/Source/JavaScriptCore/jit/JITStubsX86Common.h
new file mode 100644
index 000000000..f102f3b25
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubsX86Common.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubsX86Common_h
+#define JITStubsX86Common_h
+
+#include "MacroAssembler.h"
+
+#if !CPU(X86) && !CPU(X86_64)
+#error "JITStubsX86Common.h should only be #included if CPU(X86) || CPU(X86_64)"
+#endif
+
+namespace JSC {
+
+#if COMPILER(GCC)
+
+#if USE(MASM_PROBE)
+// The following are offsets for MacroAssembler::ProbeContext fields accessed
+// by the ctiMasmProbeTrampoline stub.
+
+#if CPU(X86)
+#define PTR_SIZE 4
+#else // CPU(X86_64)
+#define PTR_SIZE 8
+#endif
+
+#define PROBE_PROBE_FUNCTION_OFFSET (0 * PTR_SIZE)
+#define PROBE_ARG1_OFFSET (1 * PTR_SIZE)
+#define PROBE_ARG2_OFFSET (2 * PTR_SIZE)
+
+#define PROBE_CPU_EAX_OFFSET (4 * PTR_SIZE)
+#define PROBE_CPU_EBX_OFFSET (5 * PTR_SIZE)
+#define PROBE_CPU_ECX_OFFSET (6 * PTR_SIZE)
+#define PROBE_CPU_EDX_OFFSET (7 * PTR_SIZE)
+#define PROBE_CPU_ESI_OFFSET (8 * PTR_SIZE)
+#define PROBE_CPU_EDI_OFFSET (9 * PTR_SIZE)
+#define PROBE_CPU_EBP_OFFSET (10 * PTR_SIZE)
+#define PROBE_CPU_ESP_OFFSET (11 * PTR_SIZE)
+
+#if CPU(X86)
+#define PROBE_FIRST_SPECIAL_OFFSET (12 * PTR_SIZE)
+#else // CPU(X86_64)
+#define PROBE_CPU_R8_OFFSET (12 * PTR_SIZE)
+#define PROBE_CPU_R9_OFFSET (13 * PTR_SIZE)
+#define PROBE_CPU_R10_OFFSET (14 * PTR_SIZE)
+#define PROBE_CPU_R11_OFFSET (15 * PTR_SIZE)
+#define PROBE_CPU_R12_OFFSET (16 * PTR_SIZE)
+#define PROBE_CPU_R13_OFFSET (17 * PTR_SIZE)
+#define PROBE_CPU_R14_OFFSET (18 * PTR_SIZE)
+#define PROBE_CPU_R15_OFFSET (19 * PTR_SIZE)
+#define PROBE_FIRST_SPECIAL_OFFSET (20 * PTR_SIZE)
+#endif // CPU(X86_64)
+
+#define PROBE_CPU_EIP_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (0 * PTR_SIZE))
+#define PROBE_CPU_EFLAGS_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (1 * PTR_SIZE))
+
+#if CPU(X86)
+#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (4 * PTR_SIZE)) // After padding.
+#else // CPU(X86_64)
+#define PROBE_FIRST_XMM_OFFSET (PROBE_FIRST_SPECIAL_OFFSET + (2 * PTR_SIZE)) // After padding.
+#endif // CPU(X86_64)
+
+#define XMM_SIZE 16
+#define PROBE_CPU_XMM0_OFFSET (PROBE_FIRST_XMM_OFFSET + (0 * XMM_SIZE))
+#define PROBE_CPU_XMM1_OFFSET (PROBE_FIRST_XMM_OFFSET + (1 * XMM_SIZE))
+#define PROBE_CPU_XMM2_OFFSET (PROBE_FIRST_XMM_OFFSET + (2 * XMM_SIZE))
+#define PROBE_CPU_XMM3_OFFSET (PROBE_FIRST_XMM_OFFSET + (3 * XMM_SIZE))
+#define PROBE_CPU_XMM4_OFFSET (PROBE_FIRST_XMM_OFFSET + (4 * XMM_SIZE))
+#define PROBE_CPU_XMM5_OFFSET (PROBE_FIRST_XMM_OFFSET + (5 * XMM_SIZE))
+#define PROBE_CPU_XMM6_OFFSET (PROBE_FIRST_XMM_OFFSET + (6 * XMM_SIZE))
+#define PROBE_CPU_XMM7_OFFSET (PROBE_FIRST_XMM_OFFSET + (7 * XMM_SIZE))
+
+#define PROBE_SIZE (PROBE_CPU_XMM7_OFFSET + XMM_SIZE)
+
+// These ASSERTs remind you that if you change the layout of ProbeContext,
+// you need to change ctiMasmProbeTrampoline offsets above to match.
+#define PROBE_OFFSETOF(x) offsetof(struct MacroAssembler::ProbeContext, x)
+COMPILE_ASSERT(PROBE_OFFSETOF(probeFunction) == PROBE_PROBE_FUNCTION_OFFSET, ProbeContext_probeFunction_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg1) == PROBE_ARG1_OFFSET, ProbeContext_arg1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(arg2) == PROBE_ARG2_OFFSET, ProbeContext_arg2_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eax) == PROBE_CPU_EAX_OFFSET, ProbeContext_cpu_eax_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ecx) == PROBE_CPU_ECX_OFFSET, ProbeContext_cpu_ecx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edx) == PROBE_CPU_EDX_OFFSET, ProbeContext_cpu_edx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebx) == PROBE_CPU_EBX_OFFSET, ProbeContext_cpu_ebx_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esp) == PROBE_CPU_ESP_OFFSET, ProbeContext_cpu_esp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.ebp) == PROBE_CPU_EBP_OFFSET, ProbeContext_cpu_ebp_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.esi) == PROBE_CPU_ESI_OFFSET, ProbeContext_cpu_esi_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.edi) == PROBE_CPU_EDI_OFFSET, ProbeContext_cpu_edi_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eip) == PROBE_CPU_EIP_OFFSET, ProbeContext_cpu_eip_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.eflags) == PROBE_CPU_EFLAGS_OFFSET, ProbeContext_cpu_eflags_offset_matches_ctiMasmProbeTrampoline);
+
+#if CPU(X86_64)
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r8) == PROBE_CPU_R8_OFFSET, ProbeContext_cpu_r8_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r9) == PROBE_CPU_R9_OFFSET, ProbeContext_cpu_r9_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r10) == PROBE_CPU_R10_OFFSET, ProbeContext_cpu_r10_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r11) == PROBE_CPU_R11_OFFSET, ProbeContext_cpu_r11_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r12) == PROBE_CPU_R12_OFFSET, ProbeContext_cpu_r12_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r13) == PROBE_CPU_R13_OFFSET, ProbeContext_cpu_r13_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r14) == PROBE_CPU_R14_OFFSET, ProbeContext_cpu_r14_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.r15) == PROBE_CPU_R15_OFFSET, ProbeContext_cpu_r15_offset_matches_ctiMasmProbeTrampoline);
+#endif // CPU(X86_64)
+
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm0) == PROBE_CPU_XMM0_OFFSET, ProbeContext_cpu_xmm0_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm1) == PROBE_CPU_XMM1_OFFSET, ProbeContext_cpu_xmm1_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm2) == PROBE_CPU_XMM2_OFFSET, ProbeContext_cpu_xmm2_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm3) == PROBE_CPU_XMM3_OFFSET, ProbeContext_cpu_xmm3_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm4) == PROBE_CPU_XMM4_OFFSET, ProbeContext_cpu_xmm4_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm5) == PROBE_CPU_XMM5_OFFSET, ProbeContext_cpu_xmm5_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm6) == PROBE_CPU_XMM6_OFFSET, ProbeContext_cpu_xmm6_offset_matches_ctiMasmProbeTrampoline);
+COMPILE_ASSERT(PROBE_OFFSETOF(cpu.xmm7) == PROBE_CPU_XMM7_OFFSET, ProbeContext_cpu_xmm7_offset_matches_ctiMasmProbeTrampoline);
+
+COMPILE_ASSERT(sizeof(MacroAssembler::ProbeContext) == PROBE_SIZE, ProbeContext_size_matches_ctiMasmProbeTrampoline);
+
+// Also double check that the xmm registers are 16 byte (128-bit) aligned as
+// required by the movdqa instruction used in the trampoline.
+COMPILE_ASSERT(!(PROBE_OFFSETOF(cpu.xmm0) % 16), ProbeContext_xmm0_offset_not_aligned_properly);
+#undef PROBE_OFFSETOF
+
+#endif // USE(MASM_PROBE)
+
+#endif // COMPILER(GCC)
+
+} // namespace JSC
+
+#endif // JITStubsX86Common
diff --git a/Source/JavaScriptCore/jit/JITStubsX86_64.h b/Source/JavaScriptCore/jit/JITStubsX86_64.h
new file mode 100644
index 000000000..f2ed206ab
--- /dev/null
+++ b/Source/JavaScriptCore/jit/JITStubsX86_64.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2008, 2009, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca>
+ * Copyright (C) Research In Motion Limited 2010, 2011. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of Apple Computer, Inc. ("Apple") nor the names of
+ * its contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE AND ITS CONTRIBUTORS "AS IS" AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+ * DISCLAIMED. IN NO EVENT SHALL APPLE OR ITS CONTRIBUTORS BE LIABLE FOR ANY
+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
+ * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef JITStubsX86_64_h
+#define JITStubsX86_64_h
+
+#include "JITStubsX86Common.h"
+
+#if !CPU(X86_64)
+#error "JITStubsX86_64.h should only be #included if CPU(X86_64)"
+#endif
+
+#if !USE(JSVALUE64)
+#error "JITStubsX86_64.h only implements USE(JSVALUE64)"
+#endif
+
+namespace JSC {
+
+#if COMPILER(GCC)
+
+#if USE(MASM_PROBE)
+asm (
+".globl " SYMBOL_STRING(ctiMasmProbeTrampoline) "\n"
+HIDE_SYMBOL(ctiMasmProbeTrampoline) "\n"
+SYMBOL_STRING(ctiMasmProbeTrampoline) ":" "\n"
+
+ "pushfq" "\n"
+
+ // MacroAssembler::probe() has already generated code to store some values.
+ // Together with the rflags pushed above, the top of stack now looks like
+ // this:
+ // esp[0 * ptrSize]: rflags
+ // esp[1 * ptrSize]: return address / saved rip
+ // esp[2 * ptrSize]: probeFunction
+ // esp[3 * ptrSize]: arg1
+ // esp[4 * ptrSize]: arg2
+ // esp[5 * ptrSize]: saved rax
+ // esp[6 * ptrSize]: saved rsp
+
+ "movq %rsp, %rax" "\n"
+ "subq $" STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rsp" "\n"
+
+ // The X86_64 ABI specifies that the worse case stack alignment requirement
+ // is 32 bytes.
+ "andq $~0x1f, %rsp" "\n"
+
+ "movq %rbp, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rsp)" "\n"
+ "movq %rsp, %rbp" "\n" // Save the ProbeContext*.
+
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp)" "\n"
+ "movq %rdx, " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp)" "\n"
+ "movq %rbx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp)" "\n"
+ "movq %rsi, " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp)" "\n"
+ "movq %rdi, " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp)" "\n"
+
+ "movq 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp)" "\n"
+ "movq 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp)" "\n"
+ "movq 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
+ "movq 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG1_OFFSET) "(%rbp)" "\n"
+ "movq 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_ARG2_OFFSET) "(%rbp)" "\n"
+ "movq 5 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp)" "\n"
+ "movq 6 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
+
+ "movq %r8, " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp)" "\n"
+ "movq %r9, " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp)" "\n"
+ "movq %r10, " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp)" "\n"
+ "movq %r11, " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp)" "\n"
+ "movq %r12, " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp)" "\n"
+ "movq %r13, " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp)" "\n"
+ "movq %r14, " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp)" "\n"
+ "movq %r15, " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp)" "\n"
+
+ "movdqa %xmm0, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp)" "\n"
+ "movdqa %xmm1, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp)" "\n"
+ "movdqa %xmm2, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp)" "\n"
+ "movdqa %xmm3, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp)" "\n"
+ "movdqa %xmm4, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp)" "\n"
+ "movdqa %xmm5, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp)" "\n"
+ "movdqa %xmm6, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp)" "\n"
+ "movdqa %xmm7, " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp)" "\n"
+
+ "movq %rbp, %rdi" "\n" // the ProbeContext* arg.
+ "call *" STRINGIZE_VALUE_OF(PROBE_PROBE_FUNCTION_OFFSET) "(%rbp)" "\n"
+
+ // To enable probes to modify register state, we copy all registers
+ // out of the ProbeContext before returning.
+
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDX_OFFSET) "(%rbp), %rdx" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBX_OFFSET) "(%rbp), %rbx" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESI_OFFSET) "(%rbp), %rsi" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EDI_OFFSET) "(%rbp), %rdi" "\n"
+
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R8_OFFSET) "(%rbp), %r8" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R9_OFFSET) "(%rbp), %r9" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R10_OFFSET) "(%rbp), %r10" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R11_OFFSET) "(%rbp), %r11" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R12_OFFSET) "(%rbp), %r12" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R13_OFFSET) "(%rbp), %r13" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R14_OFFSET) "(%rbp), %r14" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_R15_OFFSET) "(%rbp), %r15" "\n"
+
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM0_OFFSET) "(%rbp), %xmm0" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM1_OFFSET) "(%rbp), %xmm1" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM2_OFFSET) "(%rbp), %xmm2" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM3_OFFSET) "(%rbp), %xmm3" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM4_OFFSET) "(%rbp), %xmm4" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM5_OFFSET) "(%rbp), %xmm5" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM6_OFFSET) "(%rbp), %xmm6" "\n"
+ "movdqa " STRINGIZE_VALUE_OF(PROBE_CPU_XMM7_OFFSET) "(%rbp), %xmm7" "\n"
+
+ // There are 6 more registers left to restore:
+ // rax, rcx, rbp, rsp, rip, and rflags.
+ // We need to handle these last few restores carefully because:
+ //
+ // 1. We need to push the return address on the stack for ret to use
+ // That means we need to write to the stack.
+ // 2. The user probe function may have altered the restore value of esp to
+ // point to the vicinity of one of the restore values for the remaining
+ // registers left to be restored.
+ // That means, for requirement 1, we may end up writing over some of the
+ // restore values. We can check for this, and first copy the restore
+ // values to a "safe area" on the stack before commencing with the action
+ // for requirement 1.
+ // 3. For both requirement 2, we need to ensure that the "safe area" is
+ // protected from interrupt handlers overwriting it. Hence, the esp needs
+ // to be adjusted to include the "safe area" before we start copying the
+ // the restore values.
+
+ "movq %rbp, %rax" "\n"
+ "addq $" STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) ", %rax" "\n"
+ "cmpq %rax, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp)" "\n"
+ "jg " SYMBOL_STRING(ctiMasmProbeTrampolineEnd) "\n"
+
+ // Locate the "safe area" at 2x sizeof(ProbeContext) below where the new
+ // rsp will be. This time we don't have to 32-byte align it because we're
+ // not using to store any xmm regs.
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
+ "subq $2 * " STRINGIZE_VALUE_OF(PROBE_SIZE) ", %rax" "\n"
+ "movq %rax, %rsp" "\n"
+
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rax)" "\n"
+ "movq %rax, %rbp" "\n"
+
+SYMBOL_STRING(ctiMasmProbeTrampolineEnd) ":" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ESP_OFFSET) "(%rbp), %rax" "\n"
+ "subq $5 * " STRINGIZE_VALUE_OF(PTR_SIZE) ", %rax" "\n"
+ // At this point, %rsp should be < %rax.
+
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EFLAGS_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, 0 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EAX_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, 1 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_ECX_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, 2 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EBP_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, 3 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+ "movq " STRINGIZE_VALUE_OF(PROBE_CPU_EIP_OFFSET) "(%rbp), %rcx" "\n"
+ "movq %rcx, 4 * " STRINGIZE_VALUE_OF(PTR_SIZE) "(%rax)" "\n"
+ "movq %rax, %rsp" "\n"
+
+ "popfq" "\n"
+ "popq %rax" "\n"
+ "popq %rcx" "\n"
+ "popq %rbp" "\n"
+ "ret" "\n"
+);
+#endif // USE(MASM_PROBE)
+
+#endif // COMPILER(GCC)
+
+} // namespace JSC
+
+#endif // JITStubsX86_64_h
diff --git a/Source/JavaScriptCore/jit/JITSubGenerator.cpp b/Source/JavaScriptCore/jit/JITSubGenerator.cpp
deleted file mode 100644
index 3ebaaa372..000000000
--- a/Source/JavaScriptCore/jit/JITSubGenerator.cpp
+++ /dev/null
@@ -1,91 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "JITSubGenerator.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-void JITSubGenerator::generateFastPath(CCallHelpers& jit)
-{
- ASSERT(m_scratchGPR != InvalidGPRReg);
- ASSERT(m_scratchGPR != m_left.payloadGPR());
- ASSERT(m_scratchGPR != m_right.payloadGPR());
-#if USE(JSVALUE32_64)
- ASSERT(m_scratchGPR != m_left.tagGPR());
- ASSERT(m_scratchGPR != m_right.tagGPR());
- ASSERT(m_scratchFPR != InvalidFPRReg);
-#endif
-
- m_didEmitFastPath = true;
-
- CCallHelpers::Jump leftNotInt = jit.branchIfNotInt32(m_left);
- CCallHelpers::Jump rightNotInt = jit.branchIfNotInt32(m_right);
-
- jit.move(m_left.payloadGPR(), m_scratchGPR);
- m_slowPathJumpList.append(jit.branchSub32(CCallHelpers::Overflow, m_right.payloadGPR(), m_scratchGPR));
-
- jit.boxInt32(m_scratchGPR, m_result);
-
- m_endJumpList.append(jit.jump());
-
- if (!jit.supportsFloatingPoint()) {
- m_slowPathJumpList.append(leftNotInt);
- m_slowPathJumpList.append(rightNotInt);
- return;
- }
-
- leftNotInt.link(&jit);
- if (!m_leftOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_left, m_scratchGPR));
- if (!m_rightOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
-
- jit.unboxDoubleNonDestructive(m_left, m_leftFPR, m_scratchGPR, m_scratchFPR);
- CCallHelpers::Jump rightIsDouble = jit.branchIfNotInt32(m_right);
-
- jit.convertInt32ToDouble(m_right.payloadGPR(), m_rightFPR);
- CCallHelpers::Jump rightWasInteger = jit.jump();
-
- rightNotInt.link(&jit);
- if (!m_rightOperand.definitelyIsNumber())
- m_slowPathJumpList.append(jit.branchIfNotNumber(m_right, m_scratchGPR));
-
- jit.convertInt32ToDouble(m_left.payloadGPR(), m_leftFPR);
-
- rightIsDouble.link(&jit);
- jit.unboxDoubleNonDestructive(m_right, m_rightFPR, m_scratchGPR, m_scratchFPR);
-
- rightWasInteger.link(&jit);
-
- jit.subDouble(m_rightFPR, m_leftFPR);
- jit.boxDouble(m_leftFPR, m_result);
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/JITSubGenerator.h b/Source/JavaScriptCore/jit/JITSubGenerator.h
deleted file mode 100644
index f677e0878..000000000
--- a/Source/JavaScriptCore/jit/JITSubGenerator.h
+++ /dev/null
@@ -1,78 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef JITSubGenerator_h
-#define JITSubGenerator_h
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "SnippetOperand.h"
-
-namespace JSC {
-
-class JITSubGenerator {
-public:
- JITSubGenerator(SnippetOperand leftOperand, SnippetOperand rightOperand,
- JSValueRegs result, JSValueRegs left, JSValueRegs right,
- FPRReg leftFPR, FPRReg rightFPR, GPRReg scratchGPR, FPRReg scratchFPR)
- : m_leftOperand(leftOperand)
- , m_rightOperand(rightOperand)
- , m_result(result)
- , m_left(left)
- , m_right(right)
- , m_leftFPR(leftFPR)
- , m_rightFPR(rightFPR)
- , m_scratchGPR(scratchGPR)
- , m_scratchFPR(scratchFPR)
- { }
-
- void generateFastPath(CCallHelpers&);
-
- bool didEmitFastPath() const { return m_didEmitFastPath; }
- CCallHelpers::JumpList& endJumpList() { return m_endJumpList; }
- CCallHelpers::JumpList& slowPathJumpList() { return m_slowPathJumpList; }
-
-private:
- SnippetOperand m_leftOperand;
- SnippetOperand m_rightOperand;
- JSValueRegs m_result;
- JSValueRegs m_left;
- JSValueRegs m_right;
- FPRReg m_leftFPR;
- FPRReg m_rightFPR;
- GPRReg m_scratchGPR;
- FPRReg m_scratchFPR;
- bool m_didEmitFastPath { false };
-
- CCallHelpers::JumpList m_endJumpList;
- CCallHelpers::JumpList m_slowPathJumpList;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // JITSubGenerator_h
diff --git a/Source/JavaScriptCore/jit/JITThunks.cpp b/Source/JavaScriptCore/jit/JITThunks.cpp
index 5d4269d61..4c48163e9 100644
--- a/Source/JavaScriptCore/jit/JITThunks.cpp
+++ b/Source/JavaScriptCore/jit/JITThunks.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2013, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,12 +31,12 @@
#include "Executable.h"
#include "JIT.h"
#include "VM.h"
-#include "JSCInlines.h"
+#include "Operations.h"
namespace JSC {
JITThunks::JITThunks()
- : m_hostFunctionStubMap(std::make_unique<HostFunctionStubMap>())
+ : m_hostFunctionStubMap(adoptPtr(new HostFunctionStubMap))
{
}
@@ -46,27 +46,24 @@ JITThunks::~JITThunks()
MacroAssemblerCodePtr JITThunks::ctiNativeCall(VM* vm)
{
+#if ENABLE(LLINT)
if (!vm->canUseJIT())
return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_call_trampoline);
+#endif
return ctiStub(vm, nativeCallGenerator).code();
}
-
MacroAssemblerCodePtr JITThunks::ctiNativeConstruct(VM* vm)
{
+#if ENABLE(LLINT)
if (!vm->canUseJIT())
return MacroAssemblerCodePtr::createLLIntCodePtr(llint_native_construct_trampoline);
+#endif
return ctiStub(vm, nativeConstructGenerator).code();
}
-MacroAssemblerCodePtr JITThunks::ctiNativeTailCall(VM* vm)
-{
- ASSERT(vm->canUseJIT());
- return ctiStub(vm, nativeTailCallGenerator).code();
-}
-
MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
{
- LockHolder locker(m_lock);
+ Locker locker(m_lock);
CTIStubMap::AddResult entry = m_ctiStubMap.add(generator, MacroAssemblerCodeRef());
if (entry.isNewEntry) {
// Compilation thread can only retrieve existing entries.
@@ -76,54 +73,42 @@ MacroAssemblerCodeRef JITThunks::ctiStub(VM* vm, ThunkGenerator generator)
return entry.iterator->value;
}
-void JITThunks::finalize(Handle<Unknown> handle, void*)
-{
- auto* nativeExecutable = jsCast<NativeExecutable*>(handle.get().asCell());
- weakRemove(*m_hostFunctionStubMap, std::make_pair(nativeExecutable->function(), nativeExecutable->constructor()), nativeExecutable);
-}
-
-NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor, const String& name)
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, NativeFunction constructor)
{
ASSERT(!isCompilationThread());
if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, constructor)))
return nativeExecutable;
- NativeExecutable* nativeExecutable = NativeExecutable::create(
- *vm,
- adoptRef(new NativeJITCode(JIT::compileCTINativeCall(vm, function), JITCode::HostCallThunk)),
- function,
- adoptRef(new NativeJITCode(MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), JITCode::HostCallThunk)),
- constructor, NoIntrinsic, name);
- weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), Weak<NativeExecutable>(nativeExecutable, this));
+ NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, JIT::compileCTINativeCall(vm, function), function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), constructor, NoIntrinsic);
+ weakAdd(*m_hostFunctionStubMap, std::make_pair(function, constructor), Weak<NativeExecutable>(nativeExecutable));
return nativeExecutable;
}
-NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic, const String& name)
+NativeExecutable* JITThunks::hostFunctionStub(VM* vm, NativeFunction function, ThunkGenerator generator, Intrinsic intrinsic)
{
ASSERT(!isCompilationThread());
- ASSERT(vm->canUseJIT());
if (NativeExecutable* nativeExecutable = m_hostFunctionStubMap->get(std::make_pair(function, &callHostFunctionAsConstructor)))
return nativeExecutable;
- RefPtr<JITCode> forCall;
+ MacroAssemblerCodeRef code;
if (generator) {
- MacroAssemblerCodeRef entry = generator(vm);
- forCall = adoptRef(new DirectJITCode(entry, entry.code(), JITCode::HostCallThunk));
+ if (vm->canUseJIT())
+ code = generator(vm);
+ else
+ code = MacroAssemblerCodeRef();
} else
- forCall = adoptRef(new NativeJITCode(JIT::compileCTINativeCall(vm, function), JITCode::HostCallThunk));
-
- RefPtr<JITCode> forConstruct = adoptRef(new NativeJITCode(MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), JITCode::HostCallThunk));
-
- NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, forCall, function, forConstruct, callHostFunctionAsConstructor, intrinsic, name);
- weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), Weak<NativeExecutable>(nativeExecutable, this));
+ code = JIT::compileCTINativeCall(vm, function);
+
+ NativeExecutable* nativeExecutable = NativeExecutable::create(*vm, code, function, MacroAssemblerCodeRef::createSelfManagedCodeRef(ctiNativeConstruct(vm)), callHostFunctionAsConstructor, intrinsic);
+ weakAdd(*m_hostFunctionStubMap, std::make_pair(function, &callHostFunctionAsConstructor), Weak<NativeExecutable>(nativeExecutable));
return nativeExecutable;
}
void JITThunks::clearHostFunctionStubs()
{
- m_hostFunctionStubMap = nullptr;
+ m_hostFunctionStubMap.clear();
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITThunks.h b/Source/JavaScriptCore/jit/JITThunks.h
index f17f56eb6..97e7ecd6b 100644
--- a/Source/JavaScriptCore/jit/JITThunks.h
+++ b/Source/JavaScriptCore/jit/JITThunks.h
@@ -26,6 +26,8 @@
#ifndef JITThunks_h
#define JITThunks_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "CallData.h"
@@ -34,9 +36,9 @@
#include "MacroAssemblerCodeRef.h"
#include "ThunkGenerator.h"
#include "Weak.h"
-#include "WeakHandleOwner.h"
#include "WeakInlines.h"
#include <wtf/HashMap.h>
+#include <wtf/OwnPtr.h>
#include <wtf/RefPtr.h>
#include <wtf/ThreadingPrimitives.h>
@@ -45,30 +47,30 @@ namespace JSC {
class VM;
class NativeExecutable;
-class JITThunks final : private WeakHandleOwner {
- WTF_MAKE_FAST_ALLOCATED;
+class JITThunks {
public:
JITThunks();
- virtual ~JITThunks();
+ ~JITThunks();
MacroAssemblerCodePtr ctiNativeCall(VM*);
MacroAssemblerCodePtr ctiNativeConstruct(VM*);
- MacroAssemblerCodePtr ctiNativeTailCall(VM*);
MacroAssemblerCodeRef ctiStub(VM*, ThunkGenerator);
- NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor, const String& name);
- NativeExecutable* hostFunctionStub(VM*, NativeFunction, ThunkGenerator, Intrinsic, const String& name);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, NativeFunction constructor);
+ NativeExecutable* hostFunctionStub(VM*, NativeFunction, ThunkGenerator, Intrinsic);
void clearHostFunctionStubs();
private:
- void finalize(Handle<Unknown>, void* context) override;
+ // Main thread can hold this lock for a while, so use an adaptive mutex.
+ typedef Mutex Lock;
+ typedef MutexLocker Locker;
typedef HashMap<ThunkGenerator, MacroAssemblerCodeRef> CTIStubMap;
CTIStubMap m_ctiStubMap;
typedef HashMap<std::pair<NativeFunction, NativeFunction>, Weak<NativeExecutable>> HostFunctionStubMap;
- std::unique_ptr<HostFunctionStubMap> m_hostFunctionStubMap;
+ OwnPtr<HostFunctionStubMap> m_hostFunctionStubMap;
Lock m_lock;
};
diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
index 876b0401b..c83125da4 100644
--- a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
+++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.cpp
@@ -30,22 +30,20 @@
#include "CodeBlock.h"
#include "Executable.h"
-#include "JSCInlines.h"
namespace JSC {
JITToDFGDeferredCompilationCallback::JITToDFGDeferredCompilationCallback() { }
JITToDFGDeferredCompilationCallback::~JITToDFGDeferredCompilationCallback() { }
-Ref<JITToDFGDeferredCompilationCallback> JITToDFGDeferredCompilationCallback::create()
+PassRefPtr<JITToDFGDeferredCompilationCallback> JITToDFGDeferredCompilationCallback::create()
{
- return adoptRef(*new JITToDFGDeferredCompilationCallback());
+ return adoptRef(new JITToDFGDeferredCompilationCallback());
}
void JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronously(
- CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock)
+ CodeBlock* codeBlock)
{
- ASSERT_UNUSED(profiledDFGCodeBlock, !profiledDFGCodeBlock);
ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
if (Options::verboseOSR())
@@ -55,20 +53,17 @@ void JITToDFGDeferredCompilationCallback::compilationDidBecomeReadyAsynchronousl
}
void JITToDFGDeferredCompilationCallback::compilationDidComplete(
- CodeBlock* codeBlock, CodeBlock* profiledDFGCodeBlock, CompilationResult result)
+ CodeBlock* codeBlock, CompilationResult result)
{
- ASSERT(!profiledDFGCodeBlock);
ASSERT(codeBlock->alternative()->jitType() == JITCode::BaselineJIT);
if (Options::verboseOSR())
dataLog("Optimizing compilation of ", *codeBlock, " result: ", result, "\n");
if (result == CompilationSuccessful)
- codeBlock->ownerScriptExecutable()->installCode(codeBlock);
+ codeBlock->install();
codeBlock->alternative()->setOptimizationThresholdBasedOnCompilationResult(result);
-
- DeferredCompilationCallback::compilationDidComplete(codeBlock, profiledDFGCodeBlock, result);
}
} // JSC
diff --git a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
index af2532f92..cf1c0770c 100644
--- a/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
+++ b/Source/JavaScriptCore/jit/JITToDFGDeferredCompilationCallback.h
@@ -26,6 +26,8 @@
#ifndef JITToDFGDeferredCompilationCallback_h
#define JITToDFGDeferredCompilationCallback_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
#include "DeferredCompilationCallback.h"
@@ -42,10 +44,10 @@ protected:
public:
virtual ~JITToDFGDeferredCompilationCallback();
- static Ref<JITToDFGDeferredCompilationCallback> create();
+ static PassRefPtr<JITToDFGDeferredCompilationCallback> create();
- virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*, CodeBlock* profiledDFGCodeBlock) override;
- virtual void compilationDidComplete(CodeBlock*, CodeBlock* profiledDFGCodeBlock, CompilationResult) override;
+ virtual void compilationDidBecomeReadyAsynchronously(CodeBlock*) override;
+ virtual void compilationDidComplete(CodeBlock*, CompilationResult) override;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/JITWriteBarrier.h b/Source/JavaScriptCore/jit/JITWriteBarrier.h
index b410ecadb..ca2ca6eb2 100644
--- a/Source/JavaScriptCore/jit/JITWriteBarrier.h
+++ b/Source/JavaScriptCore/jit/JITWriteBarrier.h
@@ -31,7 +31,6 @@
#include "MacroAssembler.h"
#include "SlotVisitor.h"
#include "UnusedPointer.h"
-#include "VM.h"
#include "WriteBarrier.h"
namespace JSC {
@@ -43,7 +42,8 @@ class VM;
#define JITWriteBarrierFlag ((void*)2)
class JITWriteBarrierBase {
public:
- explicit operator bool() const { return get(); }
+ typedef void* (JITWriteBarrierBase::*UnspecifiedBoolType);
+ operator UnspecifiedBoolType*() const { return get() ? reinterpret_cast<UnspecifiedBoolType*>(1) : 0; }
bool operator!() const { return !get(); }
void setFlagOnBarrier()
@@ -77,9 +77,9 @@ protected:
{
}
- void set(VM& vm, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
+ void set(VM&, CodeLocationDataLabelPtr location, JSCell* owner, JSCell* value)
{
- vm.heap.writeBarrier(owner, value);
+ Heap::writeBarrier(owner, value);
m_location = location;
ASSERT(((!!m_location) && m_location.executableAddress() != JITWriteBarrierFlag) || (location.executableAddress() == m_location.executableAddress()));
MacroAssembler::repatchPointer(m_location, value);
diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
index 201e3ab2b..ac1ab7965 100644
--- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h
+++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h
@@ -32,6 +32,7 @@
#include "GPRInfo.h"
#include "JITCode.h"
#include "JITOperations.h"
+#include "JITStubs.h"
#include "JSCJSValue.h"
#include "JSStack.h"
#include "JSString.h"
@@ -49,7 +50,9 @@ namespace JSC {
}
#if USE(JSVALUE32_64)
- static const unsigned Int32Tag = static_cast<unsigned>(JSValue::Int32Tag);
+ // Can't just propogate JSValue::Int32Tag as visual studio doesn't like it
+ static const unsigned Int32Tag = 0xffffffff;
+ COMPILE_ASSERT(Int32Tag == JSValue::Int32Tag, Int32Tag_out_of_sync);
#else
static const unsigned Int32Tag = static_cast<unsigned>(TagTypeNumber >> 32);
#endif
@@ -64,16 +67,17 @@ namespace JSC {
#if USE(JSVALUE64)
Jump emitJumpIfNotJSCell(RegisterID);
- Jump emitJumpIfNumber(RegisterID);
- Jump emitJumpIfNotNumber(RegisterID);
- void emitTagInt(RegisterID src, RegisterID dest);
+ Jump emitJumpIfImmediateNumber(RegisterID reg);
+ Jump emitJumpIfNotImmediateNumber(RegisterID reg);
+ void emitFastArithImmToInt(RegisterID reg);
+ void emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest);
#endif
- Jump emitJumpIfNotType(RegisterID baseReg, JSType);
+ Jump emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType);
void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister);
void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
- void emitPutToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
+ void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry);
void emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry);
inline Address payloadFor(int index, RegisterID base = callFrameRegister);
@@ -152,11 +156,11 @@ namespace JSC {
return branchTest64(NonZero, reg, tagMaskRegister);
}
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNumber(RegisterID reg)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfImmediateNumber(RegisterID reg)
{
return branchTest64(NonZero, reg, tagTypeNumberRegister);
}
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotNumber(RegisterID reg)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotImmediateNumber(RegisterID reg)
{
return branchTest64(Zero, reg, tagTypeNumberRegister);
}
@@ -177,7 +181,7 @@ namespace JSC {
inline JSInterfaceJIT::Jump JSInterfaceJIT::emitLoadDouble(unsigned virtualRegisterIndex, FPRegisterID dst, RegisterID scratch)
{
load64(addressFor(virtualRegisterIndex), scratch);
- Jump notNumber = emitJumpIfNotNumber(scratch);
+ Jump notNumber = emitJumpIfNotImmediateNumber(scratch);
Jump notInt = branch64(Below, scratch, tagTypeNumberRegister);
convertInt32ToDouble(scratch, dst);
Jump done = jump();
@@ -188,8 +192,12 @@ namespace JSC {
return notNumber;
}
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithImmToInt(RegisterID)
+ {
+ }
+
// operand is int32_t, must have been zero-extended if register is 64-bit.
- ALWAYS_INLINE void JSInterfaceJIT::emitTagInt(RegisterID src, RegisterID dest)
+ ALWAYS_INLINE void JSInterfaceJIT::emitFastArithIntToImmNoCheck(RegisterID src, RegisterID dest)
{
if (src != dest)
move(src, dest);
@@ -216,9 +224,10 @@ namespace JSC {
}
#endif
- ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, JSType type)
+ ALWAYS_INLINE JSInterfaceJIT::Jump JSInterfaceJIT::emitJumpIfNotType(RegisterID baseReg, RegisterID scratchReg, JSType type)
{
- return branch8(NotEqual, Address(baseReg, JSCell::typeInfoTypeOffset()), TrustedImm32(type));
+ loadPtr(Address(baseReg, JSCell::structureOffset()), scratchReg);
+ return branch8(NotEqual, Address(scratchReg, Structure::typeInfoTypeOffset()), TrustedImm32(type));
}
ALWAYS_INLINE void JSInterfaceJIT::emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry entry, RegisterID to, RegisterID from)
@@ -235,7 +244,7 @@ namespace JSC {
#endif
}
- ALWAYS_INLINE void JSInterfaceJIT::emitPutToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
+ ALWAYS_INLINE void JSInterfaceJIT::emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry entry)
{
storePtr(TrustedImmPtr(value), Address(callFrameRegister, entry * sizeof(Register)));
}
diff --git a/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp b/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp
deleted file mode 100644
index 8676d2de0..000000000
--- a/Source/JavaScriptCore/jit/PCToCodeOriginMap.cpp
+++ /dev/null
@@ -1,301 +0,0 @@
-/*
- * Copyright (C) 2016 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "PCToCodeOriginMap.h"
-
-#if ENABLE(JIT)
-
-#include "B3PCToOriginMap.h"
-#include "DFGNode.h"
-#include "LinkBuffer.h"
-
-namespace JSC {
-
-namespace {
-
-class DeltaCompressionBuilder {
-public:
- DeltaCompressionBuilder(size_t maxSize)
- : m_offset(0)
- , m_maxSize(maxSize)
- {
- m_buffer = static_cast<uint8_t*>(fastMalloc(m_maxSize));
- }
-
- template <typename T>
- void write(T item)
- {
- RELEASE_ASSERT(m_offset + sizeof(T) <= m_maxSize);
- static const uint8_t mask = std::numeric_limits<uint8_t>::max();
- for (unsigned i = 0; i < sizeof(T); i++) {
- *(m_buffer + m_offset) = static_cast<uint8_t>(item & mask);
- item = item >> (sizeof(uint8_t) * 8);
- m_offset += 1;
- }
- }
-
- uint8_t* m_buffer;
- size_t m_offset;
- size_t m_maxSize;
-};
-
-class DeltaCompresseionReader {
-public:
- DeltaCompresseionReader(uint8_t* buffer, size_t size)
- : m_buffer(buffer)
- , m_size(size)
- , m_offset(0)
- { }
-
- template <typename T>
- T read()
- {
- RELEASE_ASSERT(m_offset + sizeof(T) <= m_size);
- T result = 0;
- for (unsigned i = 0; i < sizeof(T); i++) {
- uint8_t bitsAsInt8 = *(m_buffer + m_offset);
- T bits = static_cast<T>(bitsAsInt8);
- bits = bits << (sizeof(uint8_t) * 8 * i);
- result |= bits;
- m_offset += 1;
- }
- return result;
- }
-
-private:
- uint8_t* m_buffer;
- size_t m_size;
- size_t m_offset;
-};
-
-} // anonymous namespace
-
-PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(VM& vm)
- : m_vm(vm)
- , m_shouldBuildMapping(vm.shouldBuilderPCToCodeOriginMapping())
-{ }
-
-PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(PCToCodeOriginMapBuilder&& other)
- : m_vm(other.m_vm)
- , m_codeRanges(WTFMove(other.m_codeRanges))
- , m_shouldBuildMapping(other.m_shouldBuildMapping)
-{ }
-
-#if ENABLE(FTL_JIT)
-PCToCodeOriginMapBuilder::PCToCodeOriginMapBuilder(VM& vm, B3::PCToOriginMap&& b3PCToOriginMap)
- : m_vm(vm)
- , m_shouldBuildMapping(vm.shouldBuilderPCToCodeOriginMapping())
-{
- if (!m_shouldBuildMapping)
- return;
-
- for (const B3::PCToOriginMap::OriginRange& originRange : b3PCToOriginMap.ranges()) {
- DFG::Node* node = bitwise_cast<DFG::Node*>(originRange.origin.data());
- if (node)
- appendItem(originRange.label, node->origin.semantic);
- else
- appendItem(originRange.label, PCToCodeOriginMapBuilder::defaultCodeOrigin());
- }
-}
-#endif
-
-void PCToCodeOriginMapBuilder::appendItem(MacroAssembler::Label label, const CodeOrigin& codeOrigin)
-{
- if (!m_shouldBuildMapping)
- return;
-
- if (m_codeRanges.size()) {
- if (m_codeRanges.last().end == label)
- return;
- m_codeRanges.last().end = label;
- if (m_codeRanges.last().codeOrigin == codeOrigin || !codeOrigin)
- return;
- }
-
- m_codeRanges.append(CodeRange{label, label, codeOrigin});
-}
-
-
-static const uint8_t sentinelPCDelta = 0;
-static const int8_t sentinelBytecodeDelta = 0;
-
-PCToCodeOriginMap::PCToCodeOriginMap(PCToCodeOriginMapBuilder&& builder, LinkBuffer& linkBuffer)
-{
- RELEASE_ASSERT(builder.didBuildMapping());
-
- if (!builder.m_codeRanges.size()) {
- m_pcRangeStart = std::numeric_limits<uintptr_t>::max();
- m_pcRangeEnd = std::numeric_limits<uintptr_t>::max();
-
- m_compressedPCBufferSize = 0;
- m_compressedPCs = nullptr;
-
- m_compressedCodeOriginsSize = 0;
- m_compressedCodeOrigins = nullptr;
-
- return;
- }
-
- // We do a final touch-up on the last range here because of how we generate the table.
- // The final range (if non empty) would be ignored if we didn't append any (arbitrary)
- // range as the last item of the vector.
- PCToCodeOriginMapBuilder::CodeRange& last = builder.m_codeRanges.last();
- if (!(last.start == last.end))
- builder.m_codeRanges.append(PCToCodeOriginMapBuilder::CodeRange{ last.end, last.end, last.codeOrigin }); // This range will never actually be found, but it ensures the real last range is found.
-
- DeltaCompressionBuilder pcCompressor((sizeof(uintptr_t) + sizeof(uint8_t)) * builder.m_codeRanges.size());
- void* lastPCValue = nullptr;
- auto buildPCTable = [&] (void* pcValue) {
- RELEASE_ASSERT(pcValue > lastPCValue);
- uintptr_t delta = bitwise_cast<uintptr_t>(pcValue) - bitwise_cast<uintptr_t>(lastPCValue);
- RELEASE_ASSERT(delta != sentinelPCDelta);
- lastPCValue = pcValue;
- if (delta > std::numeric_limits<uint8_t>::max()) {
- pcCompressor.write<uint8_t>(sentinelPCDelta);
- pcCompressor.write<uintptr_t>(delta);
- return;
- }
-
- pcCompressor.write<uint8_t>(static_cast<uint8_t>(delta));
- };
-
- DeltaCompressionBuilder codeOriginCompressor((sizeof(intptr_t) + sizeof(int8_t) + sizeof(int8_t) + sizeof(InlineCallFrame*)) * builder.m_codeRanges.size());
- CodeOrigin lastCodeOrigin(0, nullptr);
- auto buildCodeOriginTable = [&] (const CodeOrigin& codeOrigin) {
- intptr_t delta = static_cast<intptr_t>(codeOrigin.bytecodeIndex) - static_cast<intptr_t>(lastCodeOrigin.bytecodeIndex);
- lastCodeOrigin = codeOrigin;
- if (delta > std::numeric_limits<int8_t>::max() || delta < std::numeric_limits<int8_t>::min() || delta == sentinelBytecodeDelta) {
- codeOriginCompressor.write<int8_t>(sentinelBytecodeDelta);
- codeOriginCompressor.write<intptr_t>(delta);
- } else
- codeOriginCompressor.write<int8_t>(static_cast<int8_t>(delta));
-
- int8_t hasInlineCallFrameByte = codeOrigin.inlineCallFrame ? 1 : 0;
- codeOriginCompressor.write<int8_t>(hasInlineCallFrameByte);
- if (hasInlineCallFrameByte)
- codeOriginCompressor.write<uintptr_t>(bitwise_cast<uintptr_t>(codeOrigin.inlineCallFrame));
- };
-
- m_pcRangeStart = bitwise_cast<uintptr_t>(linkBuffer.locationOf(builder.m_codeRanges.first().start).dataLocation());
- m_pcRangeEnd = bitwise_cast<uintptr_t>(linkBuffer.locationOf(builder.m_codeRanges.last().end).dataLocation());
- m_pcRangeEnd -= 1;
-
- for (unsigned i = 0; i < builder.m_codeRanges.size(); i++) {
- PCToCodeOriginMapBuilder::CodeRange& codeRange = builder.m_codeRanges[i];
- void* start = linkBuffer.locationOf(codeRange.start).dataLocation();
- void* end = linkBuffer.locationOf(codeRange.end).dataLocation();
- ASSERT(m_pcRangeStart <= bitwise_cast<uintptr_t>(start));
- ASSERT(m_pcRangeEnd >= bitwise_cast<uintptr_t>(end) - 1);
- if (start == end)
- ASSERT(i == builder.m_codeRanges.size() - 1);
- if (i > 0)
- ASSERT(linkBuffer.locationOf(builder.m_codeRanges[i - 1].end).dataLocation() == start);
-
- buildPCTable(start);
- buildCodeOriginTable(codeRange.codeOrigin);
- }
-
- m_compressedPCBufferSize = pcCompressor.m_offset;
- m_compressedPCs = static_cast<uint8_t*>(fastRealloc(pcCompressor.m_buffer, m_compressedPCBufferSize));
-
- m_compressedCodeOriginsSize = codeOriginCompressor.m_offset;
- m_compressedCodeOrigins = static_cast<uint8_t*>(fastRealloc(codeOriginCompressor.m_buffer, m_compressedCodeOriginsSize));
-}
-
-PCToCodeOriginMap::~PCToCodeOriginMap()
-{
- if (m_compressedPCs)
- fastFree(m_compressedPCs);
- if (m_compressedCodeOrigins)
- fastFree(m_compressedCodeOrigins);
-}
-
-double PCToCodeOriginMap::memorySize()
-{
- double size = 0;
- size += m_compressedPCBufferSize;
- size += m_compressedCodeOriginsSize;
- return size;
-}
-
-Optional<CodeOrigin> PCToCodeOriginMap::findPC(void* pc) const
-{
- uintptr_t pcAsInt = bitwise_cast<uintptr_t>(pc);
- if (!(m_pcRangeStart <= pcAsInt && pcAsInt <= m_pcRangeEnd))
- return Nullopt;
-
- uintptr_t currentPC = 0;
- CodeOrigin currentCodeOrigin(0, nullptr);
-
- DeltaCompresseionReader pcReader(m_compressedPCs, m_compressedPCBufferSize);
- DeltaCompresseionReader codeOriginReader(m_compressedCodeOrigins, m_compressedCodeOriginsSize);
- while (true) {
- uintptr_t previousPC = currentPC;
- {
- uint8_t value = pcReader.read<uint8_t>();
- uintptr_t delta;
- if (value == sentinelPCDelta)
- delta = pcReader.read<uintptr_t>();
- else
- delta = value;
- currentPC += delta;
- }
-
- CodeOrigin previousOrigin = currentCodeOrigin;
- {
- int8_t value = codeOriginReader.read<int8_t>();
- intptr_t delta;
- if (value == sentinelBytecodeDelta)
- delta = codeOriginReader.read<intptr_t>();
- else
- delta = static_cast<intptr_t>(value);
-
- currentCodeOrigin.bytecodeIndex = static_cast<unsigned>(static_cast<intptr_t>(currentCodeOrigin.bytecodeIndex) + delta);
-
- int8_t hasInlineFrame = codeOriginReader.read<int8_t>();
- ASSERT(hasInlineFrame == 0 || hasInlineFrame == 1);
- if (hasInlineFrame)
- currentCodeOrigin.inlineCallFrame = bitwise_cast<InlineCallFrame*>(codeOriginReader.read<uintptr_t>());
- else
- currentCodeOrigin.inlineCallFrame = nullptr;
- }
-
- if (previousPC) {
- uintptr_t startOfRange = previousPC;
- // We subtract 1 because we generate end points inclusively in this table, even though we are interested in ranges of the form: [previousPC, currentPC)
- uintptr_t endOfRange = currentPC - 1;
- if (startOfRange <= pcAsInt && pcAsInt <= endOfRange)
- return Optional<CodeOrigin>(previousOrigin); // We return previousOrigin here because CodeOrigin's are mapped to the startValue of the range.
- }
- }
-
- RELEASE_ASSERT_NOT_REACHED();
- return Nullopt;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PCToCodeOriginMap.h b/Source/JavaScriptCore/jit/PCToCodeOriginMap.h
deleted file mode 100644
index 75b54448c..000000000
--- a/Source/JavaScriptCore/jit/PCToCodeOriginMap.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * Copyright (C) 2016 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PCToCodeOriginMap_h
-#define PCToCodeOriginMap_h
-
-#if ENABLE(JIT)
-
-#include "CodeOrigin.h"
-#include "DFGCommon.h"
-#include "MacroAssembler.h"
-#include "VM.h"
-#include <wtf/Bag.h>
-#include <wtf/Optional.h>
-#include <wtf/RedBlackTree.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-#if ENABLE(FTL_JIT)
-namespace B3 {
-class PCToOriginMap;
-}
-#endif
-
-class LinkBuffer;
-class PCToCodeOriginMapBuilder;
-
-class PCToCodeOriginMapBuilder {
- WTF_MAKE_NONCOPYABLE(PCToCodeOriginMapBuilder);
- friend class PCToCodeOriginMap;
-
-public:
- PCToCodeOriginMapBuilder(VM&);
- PCToCodeOriginMapBuilder(PCToCodeOriginMapBuilder&& other);
-
-#if ENABLE(FTL_JIT)
- PCToCodeOriginMapBuilder(VM&, B3::PCToOriginMap&&);
-#endif
-
- void appendItem(MacroAssembler::Label, const CodeOrigin&);
- static CodeOrigin defaultCodeOrigin() { return CodeOrigin(0, nullptr); }
-
- bool didBuildMapping() const { return m_shouldBuildMapping; }
-
-private:
-
- struct CodeRange {
- MacroAssembler::Label start;
- MacroAssembler::Label end;
- CodeOrigin codeOrigin;
- };
-
- VM& m_vm;
- Vector<CodeRange> m_codeRanges;
- bool m_shouldBuildMapping;
-};
-
-class PCToCodeOriginMap {
- WTF_MAKE_NONCOPYABLE(PCToCodeOriginMap);
-public:
- PCToCodeOriginMap(PCToCodeOriginMapBuilder&&, LinkBuffer&);
- ~PCToCodeOriginMap();
-
- Optional<CodeOrigin> findPC(void* pc) const;
-
- double memorySize();
-
-private:
- size_t m_compressedPCBufferSize;
- size_t m_compressedCodeOriginsSize;
- uint8_t* m_compressedPCs;
- uint8_t* m_compressedCodeOrigins;
- uintptr_t m_pcRangeStart;
- uintptr_t m_pcRangeEnd;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // PCToCodeOriginMap_h
diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
deleted file mode 100644
index b12b55299..000000000
--- a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.cpp
+++ /dev/null
@@ -1,137 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "PolymorphicCallStubRoutine.h"
-
-#if ENABLE(JIT)
-
-#include "CallLinkInfo.h"
-#include "CodeBlock.h"
-#include "JSCInlines.h"
-#include "LinkBuffer.h"
-
-namespace JSC {
-
-PolymorphicCallNode::~PolymorphicCallNode()
-{
- if (isOnList())
- remove();
-}
-
-void PolymorphicCallNode::unlink(VM& vm)
-{
- if (m_callLinkInfo) {
- if (Options::dumpDisassembly())
- dataLog("Unlinking polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n");
-
- m_callLinkInfo->unlink(vm);
- }
-
- if (isOnList())
- remove();
-}
-
-void PolymorphicCallNode::clearCallLinkInfo()
-{
- if (Options::dumpDisassembly())
- dataLog("Clearing call link info for polymorphic call at ", m_callLinkInfo->callReturnLocation(), ", ", m_callLinkInfo->codeOrigin(), "\n");
-
- m_callLinkInfo = nullptr;
-}
-
-void PolymorphicCallCase::dump(PrintStream& out) const
-{
- out.print("<variant = ", m_variant, ", codeBlock = ", pointerDump(m_codeBlock), ">");
-}
-
-PolymorphicCallStubRoutine::PolymorphicCallStubRoutine(
- const MacroAssemblerCodeRef& codeRef, VM& vm, const JSCell* owner, ExecState* callerFrame,
- CallLinkInfo& info, const Vector<PolymorphicCallCase>& cases,
- std::unique_ptr<uint32_t[]> fastCounts)
- : GCAwareJITStubRoutine(codeRef, vm)
- , m_fastCounts(WTFMove(fastCounts))
-{
- for (PolymorphicCallCase callCase : cases) {
- m_variants.append(WriteBarrier<JSCell>(vm, owner, callCase.variant().rawCalleeCell()));
- if (shouldDumpDisassemblyFor(callerFrame->codeBlock()))
- dataLog("Linking polymorphic call in ", *callerFrame->codeBlock(), " at ", callerFrame->codeOrigin(), " to ", callCase.variant(), ", codeBlock = ", pointerDump(callCase.codeBlock()), "\n");
- if (CodeBlock* codeBlock = callCase.codeBlock())
- codeBlock->linkIncomingPolymorphicCall(callerFrame, m_callNodes.add(&info));
- }
- m_variants.shrinkToFit();
- WTF::storeStoreFence();
-}
-
-PolymorphicCallStubRoutine::~PolymorphicCallStubRoutine() { }
-
-CallVariantList PolymorphicCallStubRoutine::variants() const
-{
- CallVariantList result;
- for (size_t i = 0; i < m_variants.size(); ++i)
- result.append(CallVariant(m_variants[i].get()));
- return result;
-}
-
-CallEdgeList PolymorphicCallStubRoutine::edges() const
-{
- // We wouldn't have these if this was an FTL stub routine. We shouldn't be asking for profiling
- // from the FTL.
- RELEASE_ASSERT(m_fastCounts);
-
- CallEdgeList result;
- for (size_t i = 0; i < m_variants.size(); ++i)
- result.append(CallEdge(CallVariant(m_variants[i].get()), m_fastCounts[i]));
- return result;
-}
-
-void PolymorphicCallStubRoutine::clearCallNodesFor(CallLinkInfo* info)
-{
- for (Bag<PolymorphicCallNode>::iterator iter = m_callNodes.begin(); !!iter; ++iter) {
- PolymorphicCallNode& node = **iter;
- // All nodes should point to info, but okay to be a little paranoid.
- if (node.hasCallLinkInfo(info))
- node.clearCallLinkInfo();
- }
-}
-
-bool PolymorphicCallStubRoutine::visitWeak(VM&)
-{
- for (auto& variant : m_variants) {
- if (!Heap::isMarked(variant.get()))
- return false;
- }
- return true;
-}
-
-void PolymorphicCallStubRoutine::markRequiredObjectsInternal(SlotVisitor& visitor)
-{
- for (auto& variant : m_variants)
- visitor.append(&variant);
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h b/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h
deleted file mode 100644
index 9d1491ca0..000000000
--- a/Source/JavaScriptCore/jit/PolymorphicCallStubRoutine.h
+++ /dev/null
@@ -1,115 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef PolymorphicCallStubRoutine_h
-#define PolymorphicCallStubRoutine_h
-
-#if ENABLE(JIT)
-
-#include "CallEdge.h"
-#include "CallVariant.h"
-#include "CodeOrigin.h"
-#include "GCAwareJITStubRoutine.h"
-#include <wtf/FastMalloc.h>
-#include <wtf/Noncopyable.h>
-#include <wtf/Vector.h>
-
-namespace JSC {
-
-class CallLinkInfo;
-
-class PolymorphicCallNode : public BasicRawSentinelNode<PolymorphicCallNode> {
- WTF_MAKE_NONCOPYABLE(PolymorphicCallNode);
-public:
- PolymorphicCallNode(CallLinkInfo* info)
- : m_callLinkInfo(info)
- {
- }
-
- ~PolymorphicCallNode();
-
- void unlink(VM&);
-
- bool hasCallLinkInfo(CallLinkInfo* info) { return m_callLinkInfo == info; }
- void clearCallLinkInfo();
-
-private:
- CallLinkInfo* m_callLinkInfo;
-};
-
-class PolymorphicCallCase {
-public:
- PolymorphicCallCase()
- : m_codeBlock(nullptr)
- {
- }
-
- PolymorphicCallCase(CallVariant variant, CodeBlock* codeBlock)
- : m_variant(variant)
- , m_codeBlock(codeBlock)
- {
- }
-
- CallVariant variant() const { return m_variant; }
- CodeBlock* codeBlock() const { return m_codeBlock; }
-
- void dump(PrintStream&) const;
-
-private:
- CallVariant m_variant;
- CodeBlock* m_codeBlock;
-};
-
-class PolymorphicCallStubRoutine : public GCAwareJITStubRoutine {
-public:
- PolymorphicCallStubRoutine(
- const MacroAssemblerCodeRef&, VM&, const JSCell* owner,
- ExecState* callerFrame, CallLinkInfo&, const Vector<PolymorphicCallCase>&,
- std::unique_ptr<uint32_t[]> fastCounts);
-
- virtual ~PolymorphicCallStubRoutine();
-
- CallVariantList variants() const;
- CallEdgeList edges() const;
-
- void clearCallNodesFor(CallLinkInfo*);
-
- bool visitWeak(VM&) override;
-
-protected:
- virtual void markRequiredObjectsInternal(SlotVisitor&) override;
-
-private:
- Vector<WriteBarrier<JSCell>, 2> m_variants;
- std::unique_ptr<uint32_t[]> m_fastCounts;
- Bag<PolymorphicCallNode> m_callNodes;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // PolymorphicCallStubRoutine_h
-
diff --git a/Source/JavaScriptCore/jit/Reg.h b/Source/JavaScriptCore/jit/Reg.h
deleted file mode 100644
index 4db916321..000000000
--- a/Source/JavaScriptCore/jit/Reg.h
+++ /dev/null
@@ -1,250 +0,0 @@
-/*
- * Copyright (C) 2014, 2016 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef Reg_h
-#define Reg_h
-
-#if ENABLE(JIT)
-
-#include "MacroAssembler.h"
-
-namespace JSC {
-
-// Reg is a polymorphic register class. It can refer to either integer or float registers.
-// Here are some use cases:
-//
-// GPRReg gpr;
-// Reg reg = gpr;
-// reg.isSet() == true
-// reg.isGPR() == true
-// reg.isFPR() == false
-//
-// for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
-// if (reg.isGPR()) {
-// } else /* reg.isFPR() */ {
-// }
-// }
-//
-// The above loop could have also used !!reg or reg.isSet() as a condition.
-
-class Reg {
-public:
- Reg()
- : m_index(invalid())
- {
- }
-
- Reg(WTF::HashTableDeletedValueType)
- : m_index(deleted())
- {
- }
-
- Reg(MacroAssembler::RegisterID reg)
- : m_index(MacroAssembler::registerIndex(reg))
- {
- }
-
- Reg(MacroAssembler::FPRegisterID reg)
- : m_index(MacroAssembler::registerIndex(reg))
- {
- }
-
- static Reg fromIndex(unsigned index)
- {
- Reg result;
- result.m_index = index;
- return result;
- }
-
- static Reg first()
- {
- Reg result;
- result.m_index = 0;
- return result;
- }
-
- static Reg last()
- {
- Reg result;
- result.m_index = MacroAssembler::numberOfRegisters() + MacroAssembler::numberOfFPRegisters() - 1;
- return result;
- }
-
- Reg next() const
- {
- ASSERT(!!*this);
- if (*this == last())
- return Reg();
- Reg result;
- result.m_index = m_index + 1;
- return result;
- }
-
- unsigned index() const { return m_index; }
-
- static unsigned maxIndex()
- {
- return last().index();
- }
-
- bool isSet() const { return m_index != invalid(); }
- explicit operator bool() const { return isSet(); }
-
- bool isHashTableDeletedValue() const { return m_index == deleted(); }
-
- bool isGPR() const
- {
- return m_index < MacroAssembler::numberOfRegisters();
- }
-
- bool isFPR() const
- {
- return (m_index - MacroAssembler::numberOfRegisters()) < MacroAssembler::numberOfFPRegisters();
- }
-
- MacroAssembler::RegisterID gpr() const
- {
- ASSERT(isGPR());
- return static_cast<MacroAssembler::RegisterID>(MacroAssembler::firstRegister() + m_index);
- }
-
- MacroAssembler::FPRegisterID fpr() const
- {
- ASSERT(isFPR());
- return static_cast<MacroAssembler::FPRegisterID>(
- MacroAssembler::firstFPRegister() + (m_index - MacroAssembler::numberOfRegisters()));
- }
-
- bool operator==(const Reg& other) const
- {
- return m_index == other.m_index;
- }
-
- bool operator!=(const Reg& other) const
- {
- return m_index != other.m_index;
- }
-
- bool operator<(const Reg& other) const
- {
- return m_index < other.m_index;
- }
-
- bool operator>(const Reg& other) const
- {
- return m_index > other.m_index;
- }
-
- bool operator<=(const Reg& other) const
- {
- return m_index <= other.m_index;
- }
-
- bool operator>=(const Reg& other) const
- {
- return m_index >= other.m_index;
- }
-
- unsigned hash() const
- {
- return m_index;
- }
-
- void dump(PrintStream&) const;
-
- class AllRegsIterable {
- public:
-
- class iterator {
- public:
- iterator() { }
-
- explicit iterator(Reg reg)
- : m_regIndex(reg.index())
- {
- }
-
- Reg operator*() const { return Reg::fromIndex(m_regIndex); }
-
- iterator& operator++()
- {
- m_regIndex = Reg::fromIndex(m_regIndex).next().index();
- return *this;
- }
-
- bool operator==(const iterator& other) const
- {
- return m_regIndex == other.m_regIndex;
- }
-
- bool operator!=(const iterator& other) const
- {
- return !(*this == other);
- }
-
- private:
- unsigned m_regIndex;
- };
-
- iterator begin() const { return iterator(Reg::first()); }
- iterator end() const { return iterator(Reg()); }
- };
-
- static AllRegsIterable all() { return AllRegsIterable(); }
-
-private:
- static uint8_t invalid() { return 0xff; }
-
- static uint8_t deleted() { return 0xfe; }
-
- uint8_t m_index;
-};
-
-struct RegHash {
- static unsigned hash(const Reg& key) { return key.hash(); }
- static bool equal(const Reg& a, const Reg& b) { return a == b; }
- static const bool safeToCompareToEmptyOrDeleted = true;
-};
-
-} // namespace JSC
-
-namespace WTF {
-
-template<typename T> struct DefaultHash;
-template<> struct DefaultHash<JSC::Reg> {
- typedef JSC::RegHash Hash;
-};
-
-template<typename T> struct HashTraits;
-template<> struct HashTraits<JSC::Reg> : SimpleClassHashTraits<JSC::Reg> {
- static const bool emptyValueIsZero = false;
- };
-
-} // namespace WTF
-
-#endif // ENABLE(JIT)
-
-#endif // Reg_h
-
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffset.cpp b/Source/JavaScriptCore/jit/RegisterAtOffset.cpp
deleted file mode 100644
index 16a639ca8..000000000
--- a/Source/JavaScriptCore/jit/RegisterAtOffset.cpp
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "RegisterAtOffset.h"
-
-#if ENABLE(JIT)
-
-namespace JSC {
-
-#if !COMPILER(MSVC)
-static_assert(sizeof(RegisterAtOffset) == sizeof(ptrdiff_t), "RegisterAtOffset should be small.");
-#endif
-
-void RegisterAtOffset::dump(PrintStream& out) const
-{
- out.print(reg(), " at ", offset());
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffset.h b/Source/JavaScriptCore/jit/RegisterAtOffset.h
deleted file mode 100644
index 3fc177070..000000000
--- a/Source/JavaScriptCore/jit/RegisterAtOffset.h
+++ /dev/null
@@ -1,81 +0,0 @@
-/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef RegisterAtOffset_h
-#define RegisterAtOffset_h
-
-#if ENABLE(JIT)
-
-#include "Reg.h"
-#include <wtf/PrintStream.h>
-
-namespace JSC {
-
-class RegisterAtOffset {
-public:
- RegisterAtOffset()
- : m_offset(0)
- {
- }
-
- RegisterAtOffset(Reg reg, ptrdiff_t offset)
- : m_reg(reg)
- , m_offset(offset)
- {
- }
-
- bool operator!() const { return !m_reg; }
-
- Reg reg() const { return m_reg; }
- ptrdiff_t offset() const { return m_offset; }
- int offsetAsIndex() const { return offset() / sizeof(void*); }
-
- bool operator==(const RegisterAtOffset& other) const
- {
- return reg() == other.reg() && offset() == other.offset();
- }
-
- bool operator<(const RegisterAtOffset& other) const
- {
- if (reg() != other.reg())
- return reg() < other.reg();
- return offset() < other.offset();
- }
-
- static Reg getReg(RegisterAtOffset* value) { return value->reg(); }
-
- void dump(PrintStream& out) const;
-
-private:
- Reg m_reg;
- ptrdiff_t m_offset : sizeof(ptrdiff_t) * 8 - sizeof(Reg) * 8;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // RegisterAtOffset_h
-
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp b/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp
deleted file mode 100644
index 9df5d40fc..000000000
--- a/Source/JavaScriptCore/jit/RegisterAtOffsetList.cpp
+++ /dev/null
@@ -1,80 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "RegisterAtOffsetList.h"
-
-#if ENABLE(JIT)
-
-#include <wtf/ListDump.h>
-
-namespace JSC {
-
-RegisterAtOffsetList::RegisterAtOffsetList() { }
-
-RegisterAtOffsetList::RegisterAtOffsetList(RegisterSet registerSet, OffsetBaseType offsetBaseType)
-{
- size_t numberOfRegisters = registerSet.numberOfSetRegisters();
- ptrdiff_t offset = 0;
-
- if (offsetBaseType == FramePointerBased)
- offset = -(static_cast<ptrdiff_t>(numberOfRegisters) * sizeof(void*));
-
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- if (registerSet.get(reg)) {
- append(RegisterAtOffset(reg, offset));
- offset += sizeof(void*);
- }
- }
-
- sort();
-}
-
-void RegisterAtOffsetList::sort()
-{
- std::sort(m_registers.begin(), m_registers.end());
-}
-
-void RegisterAtOffsetList::dump(PrintStream& out) const
-{
- out.print(listDump(m_registers));
-}
-
-RegisterAtOffset* RegisterAtOffsetList::find(Reg reg) const
-{
- return tryBinarySearch<RegisterAtOffset, Reg>(m_registers, m_registers.size(), reg, RegisterAtOffset::getReg);
-}
-
-unsigned RegisterAtOffsetList::indexOf(Reg reg) const
-{
- if (RegisterAtOffset* pointer = find(reg))
- return pointer - m_registers.begin();
- return UINT_MAX;
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
diff --git a/Source/JavaScriptCore/jit/RegisterAtOffsetList.h b/Source/JavaScriptCore/jit/RegisterAtOffsetList.h
deleted file mode 100644
index 3a771beff..000000000
--- a/Source/JavaScriptCore/jit/RegisterAtOffsetList.h
+++ /dev/null
@@ -1,82 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef RegisterAtOffsetList_h
-#define RegisterAtOffsetList_h
-
-#if ENABLE(JIT)
-
-#include "RegisterAtOffset.h"
-#include "RegisterSet.h"
-
-namespace JSC {
-
-class RegisterAtOffsetList {
- WTF_MAKE_FAST_ALLOCATED;
-public:
- enum OffsetBaseType { FramePointerBased, ZeroBased };
-
- RegisterAtOffsetList();
- RegisterAtOffsetList(RegisterSet, OffsetBaseType = FramePointerBased);
-
- void dump(PrintStream&) const;
-
- void clear()
- {
- m_registers.clear();
- }
-
- size_t size() const
- {
- return m_registers.size();
- }
-
- RegisterAtOffset& at(size_t index)
- {
- return m_registers.at(index);
- }
-
- void append(RegisterAtOffset registerAtOffset)
- {
- m_registers.append(registerAtOffset);
- }
-
- void sort();
- RegisterAtOffset* find(Reg) const;
- unsigned indexOf(Reg) const; // Returns UINT_MAX if not found.
-
- Vector<RegisterAtOffset>::const_iterator begin() const { return m_registers.begin(); }
- Vector<RegisterAtOffset>::const_iterator end() const { return m_registers.end(); }
-
-private:
- Vector<RegisterAtOffset> m_registers;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // RegisterAtOffsetList_h
-
diff --git a/Source/JavaScriptCore/jit/RegisterMap.h b/Source/JavaScriptCore/jit/RegisterMap.h
deleted file mode 100644
index 2ebf09af3..000000000
--- a/Source/JavaScriptCore/jit/RegisterMap.h
+++ /dev/null
@@ -1,113 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef RegisterMap_h
-#define RegisterMap_h
-
-#if ENABLE(JIT)
-
-#include "FPRInfo.h"
-#include "GPRInfo.h"
-#include "MacroAssembler.h"
-#include "Reg.h"
-
-namespace JSC {
-
-template<typename T>
-class RegisterMap {
-public:
- T& operator[](Reg reg)
- {
- return m_map[reg.index()];
- }
-
- T& operator[](GPRReg gpr)
- {
- return m_map[MacroAssembler::registerIndex(gpr)];
- }
-
- T& operator[](FPRReg fpr)
- {
- return m_map[MacroAssembler::registerIndex(fpr)];
- }
-
- const T& operator[](Reg reg) const
- {
- return m_map[reg.index()];
- }
-
- const T& operator[](GPRReg gpr) const
- {
- return m_map[MacroAssembler::registerIndex(gpr)];
- }
-
- const T& operator[](FPRReg fpr) const
- {
- return m_map[MacroAssembler::registerIndex(fpr)];
- }
-
-private:
- std::array<T, MacroAssembler::totalNumberOfRegisters()> m_map { { } };
-};
-
-template<typename T>
-class GPRMap {
-public:
- T& operator[](GPRReg gpr)
- {
- return m_map[MacroAssembler::registerIndex(gpr)];
- }
-
- const T& operator[](GPRReg gpr) const
- {
- return m_map[MacroAssembler::registerIndex(gpr)];
- }
-
-private:
- std::array<T, MacroAssembler::numberOfRegisters()> m_map { { } };
-};
-
-template<typename T>
-class FPRMap {
-public:
- T& operator[](FPRReg fpr)
- {
- return m_map[MacroAssembler::fpRegisterIndex(fpr)];
- }
-
- const T& operator[](FPRReg fpr) const
- {
- return m_map[MacroAssembler::fpRegisterIndex(fpr)];
- }
-
-private:
- std::array<T, MacroAssembler::numberOfFPRegisters()> m_map { { } };
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // RegisterMap_h
diff --git a/Source/JavaScriptCore/jit/RegisterSet.cpp b/Source/JavaScriptCore/jit/RegisterSet.cpp
index 5418400b0..362ada0de 100644
--- a/Source/JavaScriptCore/jit/RegisterSet.cpp
+++ b/Source/JavaScriptCore/jit/RegisterSet.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -30,325 +30,45 @@
#include "GPRInfo.h"
#include "MacroAssembler.h"
-#include "JSCInlines.h"
-#include <wtf/CommaPrinter.h>
namespace JSC {
RegisterSet RegisterSet::stackRegisters()
{
- return RegisterSet(
- MacroAssembler::stackPointerRegister,
- MacroAssembler::framePointerRegister);
-}
-
-RegisterSet RegisterSet::reservedHardwareRegisters()
-{
-#if CPU(ARM64)
-#if PLATFORM(IOS)
- return RegisterSet(ARM64Registers::x18, ARM64Registers::lr);
-#else
- return RegisterSet(ARM64Registers::lr);
-#endif // PLATFORM(IOS)
-#else
- return RegisterSet();
-#endif
-}
-
-RegisterSet RegisterSet::runtimeRegisters()
-{
-#if USE(JSVALUE64)
- return RegisterSet(GPRInfo::tagTypeNumberRegister, GPRInfo::tagMaskRegister);
-#else
- return RegisterSet();
-#endif
+ RegisterSet result;
+ result.set(MacroAssembler::stackPointerRegister);
+ result.set(MacroAssembler::framePointerRegister);
+ return result;
}
RegisterSet RegisterSet::specialRegisters()
{
- return RegisterSet(
- stackRegisters(), reservedHardwareRegisters(), runtimeRegisters());
-}
-
-RegisterSet RegisterSet::volatileRegistersForJSCall()
-{
- RegisterSet volatileRegisters = allRegisters();
- volatileRegisters.exclude(RegisterSet::stackRegisters());
- volatileRegisters.exclude(RegisterSet::reservedHardwareRegisters());
- volatileRegisters.exclude(RegisterSet::vmCalleeSaveRegisters());
- return volatileRegisters;
-}
-
-RegisterSet RegisterSet::stubUnavailableRegisters()
-{
- return RegisterSet(specialRegisters(), vmCalleeSaveRegisters());
-}
-
-RegisterSet RegisterSet::macroScratchRegisters()
-{
-#if CPU(X86_64)
- return RegisterSet(MacroAssembler::s_scratchRegister);
-#elif CPU(ARM64)
- return RegisterSet(MacroAssembler::dataTempRegister, MacroAssembler::memoryTempRegister);
-#elif CPU(MIPS)
RegisterSet result;
- result.set(MacroAssembler::immTempRegister);
- result.set(MacroAssembler::dataTempRegister);
- result.set(MacroAssembler::addrTempRegister);
- result.set(MacroAssembler::cmpTempRegister);
- return result;
-#else
- return RegisterSet();
+ result.merge(stackRegisters());
+ result.set(GPRInfo::callFrameRegister);
+#if USE(JSVALUE64)
+ result.set(GPRInfo::tagTypeNumberRegister);
+ result.set(GPRInfo::tagMaskRegister);
#endif
+ return result;
}
RegisterSet RegisterSet::calleeSaveRegisters()
{
RegisterSet result;
-#if CPU(X86)
- result.set(X86Registers::ebx);
- result.set(X86Registers::ebp);
- result.set(X86Registers::edi);
- result.set(X86Registers::esi);
-#elif CPU(X86_64)
+#if CPU(X86_64)
result.set(X86Registers::ebx);
result.set(X86Registers::ebp);
result.set(X86Registers::r12);
result.set(X86Registers::r13);
result.set(X86Registers::r14);
result.set(X86Registers::r15);
-#elif CPU(ARM_THUMB2)
- result.set(ARMRegisters::r4);
- result.set(ARMRegisters::r5);
- result.set(ARMRegisters::r6);
- result.set(ARMRegisters::r8);
-#if !PLATFORM(IOS)
- result.set(ARMRegisters::r9);
-#endif
- result.set(ARMRegisters::r10);
- result.set(ARMRegisters::r11);
-#elif CPU(ARM_TRADITIONAL)
- result.set(ARMRegisters::r4);
- result.set(ARMRegisters::r5);
- result.set(ARMRegisters::r6);
- result.set(ARMRegisters::r7);
- result.set(ARMRegisters::r8);
- result.set(ARMRegisters::r9);
- result.set(ARMRegisters::r10);
- result.set(ARMRegisters::r11);
-#elif CPU(ARM64)
- // We don't include LR in the set of callee-save registers even though it technically belongs
- // there. This is because we use this set to describe the set of registers that need to be saved
- // beyond what you would save by the platform-agnostic "preserve return address" and "restore
- // return address" operations in CCallHelpers.
- for (
- ARM64Registers::RegisterID reg = ARM64Registers::x19;
- reg <= ARM64Registers::x28;
- reg = static_cast<ARM64Registers::RegisterID>(reg + 1))
- result.set(reg);
- result.set(ARM64Registers::fp);
- for (
- ARM64Registers::FPRegisterID reg = ARM64Registers::q8;
- reg <= ARM64Registers::q15;
- reg = static_cast<ARM64Registers::FPRegisterID>(reg + 1))
- result.set(reg);
-#elif CPU(MIPS)
-#else
- UNREACHABLE_FOR_PLATFORM();
-#endif
- return result;
-}
-
-RegisterSet RegisterSet::vmCalleeSaveRegisters()
-{
- RegisterSet result;
-#if CPU(X86_64)
- result.set(GPRInfo::regCS0);
- result.set(GPRInfo::regCS1);
- result.set(GPRInfo::regCS2);
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
-#if OS(WINDOWS)
- result.set(GPRInfo::regCS5);
- result.set(GPRInfo::regCS6);
-#endif
-#elif CPU(ARM64)
- result.set(GPRInfo::regCS0);
- result.set(GPRInfo::regCS1);
- result.set(GPRInfo::regCS2);
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
- result.set(GPRInfo::regCS5);
- result.set(GPRInfo::regCS6);
- result.set(GPRInfo::regCS7);
- result.set(GPRInfo::regCS8);
- result.set(GPRInfo::regCS9);
- result.set(FPRInfo::fpRegCS0);
- result.set(FPRInfo::fpRegCS1);
- result.set(FPRInfo::fpRegCS2);
- result.set(FPRInfo::fpRegCS3);
- result.set(FPRInfo::fpRegCS4);
- result.set(FPRInfo::fpRegCS5);
- result.set(FPRInfo::fpRegCS6);
- result.set(FPRInfo::fpRegCS7);
-#endif
- return result;
-}
-
-RegisterSet RegisterSet::llintBaselineCalleeSaveRegisters()
-{
- RegisterSet result;
-#if CPU(X86)
-#elif CPU(X86_64)
-#if !OS(WINDOWS)
- result.set(GPRInfo::regCS2);
- ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
-#else
- result.set(GPRInfo::regCS4);
- ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS5);
- result.set(GPRInfo::regCS6);
-#endif
-#elif CPU(ARM_THUMB2)
-#elif CPU(ARM_TRADITIONAL)
-#elif CPU(ARM64)
- result.set(GPRInfo::regCS7);
- ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS8);
- result.set(GPRInfo::regCS9);
-#elif CPU(MIPS)
-#elif CPU(SH4)
-#else
- UNREACHABLE_FOR_PLATFORM();
-#endif
- return result;
-}
-
-RegisterSet RegisterSet::dfgCalleeSaveRegisters()
-{
- RegisterSet result;
-#if CPU(X86)
-#elif CPU(X86_64)
- result.set(GPRInfo::regCS0);
- result.set(GPRInfo::regCS1);
- result.set(GPRInfo::regCS2);
-#if !OS(WINDOWS)
- ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
-#else
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
- ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS5);
- result.set(GPRInfo::regCS6);
-#endif
-#elif CPU(ARM_THUMB2)
-#elif CPU(ARM_TRADITIONAL)
-#elif CPU(ARM64)
- ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS8);
- result.set(GPRInfo::regCS9);
-#elif CPU(MIPS)
-#elif CPU(SH4)
-#else
- UNREACHABLE_FOR_PLATFORM();
-#endif
- return result;
-}
-
-RegisterSet RegisterSet::ftlCalleeSaveRegisters()
-{
- RegisterSet result;
-#if ENABLE(FTL_JIT)
-#if CPU(X86_64) && !OS(WINDOWS)
- result.set(GPRInfo::regCS0);
- result.set(GPRInfo::regCS1);
- result.set(GPRInfo::regCS2);
- ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
-#elif CPU(ARM64)
- // B3 might save and use all ARM64 callee saves specified in the ABI.
- result.set(GPRInfo::regCS0);
- result.set(GPRInfo::regCS1);
- result.set(GPRInfo::regCS2);
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
- result.set(GPRInfo::regCS5);
- result.set(GPRInfo::regCS6);
- result.set(GPRInfo::regCS7);
- ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS8);
- result.set(GPRInfo::regCS9);
- result.set(FPRInfo::fpRegCS0);
- result.set(FPRInfo::fpRegCS1);
- result.set(FPRInfo::fpRegCS2);
- result.set(FPRInfo::fpRegCS3);
- result.set(FPRInfo::fpRegCS4);
- result.set(FPRInfo::fpRegCS5);
- result.set(FPRInfo::fpRegCS6);
- result.set(FPRInfo::fpRegCS7);
#else
UNREACHABLE_FOR_PLATFORM();
#endif
-#endif
return result;
}
-#if ENABLE(WEBASSEMBLY)
-RegisterSet RegisterSet::webAssemblyCalleeSaveRegisters()
-{
- RegisterSet result;
-#if CPU(X86)
-#elif CPU(X86_64)
-#if !OS(WINDOWS)
- ASSERT(GPRInfo::regCS3 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS4 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS3);
- result.set(GPRInfo::regCS4);
-#else
- ASSERT(GPRInfo::regCS5 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS6 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS5);
- result.set(GPRInfo::regCS6);
-#endif
-#elif CPU(ARM_THUMB2)
-#elif CPU(ARM_TRADITIONAL)
-#elif CPU(ARM64)
- ASSERT(GPRInfo::regCS8 == GPRInfo::tagTypeNumberRegister);
- ASSERT(GPRInfo::regCS9 == GPRInfo::tagMaskRegister);
- result.set(GPRInfo::regCS8);
- result.set(GPRInfo::regCS9);
-#elif CPU(MIPS)
-#elif CPU(SH4)
-#else
- UNREACHABLE_FOR_PLATFORM();
-#endif
- return result;
-}
-#endif
-
-RegisterSet RegisterSet::registersToNotSaveForJSCall()
-{
- return RegisterSet(RegisterSet::vmCalleeSaveRegisters(), RegisterSet::stackRegisters(), RegisterSet::reservedHardwareRegisters());
-}
-
-RegisterSet RegisterSet::registersToNotSaveForCCall()
-{
- return RegisterSet(RegisterSet::calleeSaveRegisters(), RegisterSet::stackRegisters(), RegisterSet::reservedHardwareRegisters());
-}
-
RegisterSet RegisterSet::allGPRs()
{
RegisterSet result;
@@ -373,29 +93,9 @@ RegisterSet RegisterSet::allRegisters()
return result;
}
-size_t RegisterSet::numberOfSetGPRs() const
-{
- RegisterSet temp = *this;
- temp.filter(allGPRs());
- return temp.numberOfSetRegisters();
-}
-
-size_t RegisterSet::numberOfSetFPRs() const
-{
- RegisterSet temp = *this;
- temp.filter(allFPRs());
- return temp.numberOfSetRegisters();
-}
-
void RegisterSet::dump(PrintStream& out) const
{
- CommaPrinter comma;
- out.print("[");
- for (Reg reg = Reg::first(); reg <= Reg::last(); reg = reg.next()) {
- if (get(reg))
- out.print(comma, reg);
- }
- out.print("]");
+ m_vector.dump(out);
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/RegisterSet.h b/Source/JavaScriptCore/jit/RegisterSet.h
index a95583ea8..84ad226ad 100644
--- a/Source/JavaScriptCore/jit/RegisterSet.h
+++ b/Source/JavaScriptCore/jit/RegisterSet.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2013-2016 Apple Inc. All rights reserved.
+ * Copyright (C) 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,12 +26,13 @@
#ifndef RegisterSet_h
#define RegisterSet_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "FPRInfo.h"
#include "GPRInfo.h"
#include "MacroAssembler.h"
-#include "Reg.h"
#include "TempRegisterSet.h"
#include <wtf/BitVector.h>
@@ -39,72 +40,50 @@ namespace JSC {
class RegisterSet {
public:
- template<typename... Regs>
- explicit RegisterSet(Regs... regs)
- {
- setMany(regs...);
- }
+ RegisterSet() { }
- JS_EXPORT_PRIVATE static RegisterSet stackRegisters();
- JS_EXPORT_PRIVATE static RegisterSet reservedHardwareRegisters();
- static RegisterSet runtimeRegisters();
- static RegisterSet specialRegisters(); // The union of stack, reserved hardware, and runtime registers.
+ static RegisterSet stackRegisters();
+ static RegisterSet specialRegisters();
static RegisterSet calleeSaveRegisters();
- static RegisterSet vmCalleeSaveRegisters(); // Callee save registers that might be saved and used by any tier.
- static RegisterSet llintBaselineCalleeSaveRegisters(); // Registers saved and used by the LLInt.
- static RegisterSet dfgCalleeSaveRegisters(); // Registers saved and used by the DFG JIT.
- static RegisterSet ftlCalleeSaveRegisters(); // Registers that might be saved and used by the FTL JIT.
-#if ENABLE(WEBASSEMBLY)
- static RegisterSet webAssemblyCalleeSaveRegisters(); // Registers saved and used by the WebAssembly JIT.
-#endif
- static RegisterSet volatileRegistersForJSCall();
- static RegisterSet stubUnavailableRegisters(); // The union of callee saves and special registers.
- JS_EXPORT_PRIVATE static RegisterSet macroScratchRegisters();
- JS_EXPORT_PRIVATE static RegisterSet allGPRs();
- JS_EXPORT_PRIVATE static RegisterSet allFPRs();
+ static RegisterSet allGPRs();
+ static RegisterSet allFPRs();
static RegisterSet allRegisters();
- static RegisterSet registersToNotSaveForJSCall();
- static RegisterSet registersToNotSaveForCCall();
-
- void set(Reg reg, bool value = true)
+ void set(GPRReg reg, bool value = true)
{
- ASSERT(!!reg);
- m_vector.set(reg.index(), value);
+ m_vector.set(MacroAssembler::registerIndex(reg), value);
}
- void set(JSValueRegs regs, bool value = true)
+ void set(JSValueRegs regs)
{
if (regs.tagGPR() != InvalidGPRReg)
- set(regs.tagGPR(), value);
- set(regs.payloadGPR(), value);
+ set(regs.tagGPR());
+ set(regs.payloadGPR());
}
- void clear(Reg reg)
+ void clear(GPRReg reg)
{
- ASSERT(!!reg);
set(reg, false);
}
- bool get(Reg reg) const
+ bool get(GPRReg reg) const { return m_vector.get(MacroAssembler::registerIndex(reg)); }
+
+ void set(FPRReg reg, bool value = true)
{
- ASSERT(!!reg);
- return m_vector.get(reg.index());
+ m_vector.set(MacroAssembler::registerIndex(reg), value);
}
-
- template<typename Iterable>
- void setAll(const Iterable& iterable)
+
+ void clear(FPRReg reg)
{
- for (Reg reg : iterable)
- set(reg);
+ set(reg, false);
}
+ bool get(FPRReg reg) const { return m_vector.get(MacroAssembler::registerIndex(reg)); }
+
void merge(const RegisterSet& other) { m_vector.merge(other.m_vector); }
void filter(const RegisterSet& other) { m_vector.filter(other.m_vector); }
void exclude(const RegisterSet& other) { m_vector.exclude(other.m_vector); }
- size_t numberOfSetGPRs() const;
- size_t numberOfSetFPRs() const;
size_t numberOfSetRegisters() const { return m_vector.bitCount(); }
void dump(PrintStream&) const;
@@ -127,25 +106,8 @@ public:
bool operator==(const RegisterSet& other) const { return m_vector == other.m_vector; }
unsigned hash() const { return m_vector.hash(); }
-
- template<typename Functor>
- void forEach(const Functor& functor) const
- {
- for (size_t index : m_vector)
- functor(Reg::fromIndex(index));
- }
private:
- void setAny(Reg reg) { set(reg); }
- void setAny(const RegisterSet& set) { merge(set); }
- void setMany() { }
- template<typename RegType, typename... Regs>
- void setMany(RegType reg, Regs... regs)
- {
- setAny(reg);
- setMany(regs...);
- }
-
BitVector m_vector;
};
diff --git a/Source/JavaScriptCore/jit/Repatch.cpp b/Source/JavaScriptCore/jit/Repatch.cpp
index bd95f665a..9c31722e8 100644
--- a/Source/JavaScriptCore/jit/Repatch.cpp
+++ b/Source/JavaScriptCore/jit/Repatch.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -28,26 +28,21 @@
#if ENABLE(JIT)
-#include "BinarySwitch.h"
#include "CCallHelpers.h"
-#include "CallFrameShuffler.h"
+#include "CallFrameInlines.h"
#include "DFGOperations.h"
#include "DFGSpeculativeJIT.h"
#include "FTLThunks.h"
#include "GCAwareJITStubRoutine.h"
-#include "GetterSetter.h"
-#include "JIT.h"
-#include "JITInlines.h"
#include "LinkBuffer.h"
-#include "JSCInlines.h"
-#include "PolymorphicAccess.h"
+#include "Operations.h"
+#include "PolymorphicPutByIdList.h"
+#include "RepatchBuffer.h"
#include "ScratchRegisterAllocator.h"
#include "StackAlignment.h"
#include "StructureRareDataInlines.h"
#include "StructureStubClearingWatchpoint.h"
#include "ThunkGenerators.h"
-#include <wtf/CommaPrinter.h>
-#include <wtf/ListDump.h>
#include <wtf/StringPrintStream.h>
namespace JSC {
@@ -56,25 +51,32 @@ namespace JSC {
// that would ordinarily have well-known values:
// - tagTypeNumberRegister
// - tagMaskRegister
+// - callFrameRegister **
+//
+// We currently only use the callFrameRegister for closure call patching, and we're not going to
+// give the FTL closure call patching support until we switch to the C stack - but when we do that,
+// callFrameRegister will disappear.
-static FunctionPtr readCallTarget(CodeBlock* codeBlock, CodeLocationCall call)
+static FunctionPtr readCallTarget(RepatchBuffer& repatchBuffer, CodeLocationCall call)
{
FunctionPtr result = MacroAssembler::readCallTarget(call);
#if ENABLE(FTL_JIT)
+ CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
return FunctionPtr(codeBlock->vm()->ftlThunks->keyForSlowPathCallThunk(
MacroAssemblerCodePtr::createFromExecutableAddress(
result.executableAddress())).callTarget());
}
#else
- UNUSED_PARAM(codeBlock);
+ UNUSED_PARAM(repatchBuffer);
#endif // ENABLE(FTL_JIT)
return result;
}
-static void repatchCall(CodeBlock* codeBlock, CodeLocationCall call, FunctionPtr newCalleeFunction)
+static void repatchCall(RepatchBuffer& repatchBuffer, CodeLocationCall call, FunctionPtr newCalleeFunction)
{
#if ENABLE(FTL_JIT)
+ CodeBlock* codeBlock = repatchBuffer.codeBlock();
if (codeBlock->jitType() == JITCode::FTLJIT) {
VM& vm = *codeBlock->vm();
FTL::Thunks& thunks = *vm.ftlThunks;
@@ -85,252 +87,654 @@ static void repatchCall(CodeBlock* codeBlock, CodeLocationCall call, FunctionPtr
newCalleeFunction = FunctionPtr(
thunks.getSlowPathCallThunk(vm, key).code().executableAddress());
}
-#else // ENABLE(FTL_JIT)
- UNUSED_PARAM(codeBlock);
#endif // ENABLE(FTL_JIT)
- MacroAssembler::repatchCall(call, newCalleeFunction);
+ repatchBuffer.relink(call, newCalleeFunction);
}
-static void repatchByIdSelfAccess(
- CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure,
- PropertyOffset offset, const FunctionPtr &slowPathFunction,
- bool compact)
+static void repatchCall(CodeBlock* codeblock, CodeLocationCall call, FunctionPtr newCalleeFunction)
{
+ RepatchBuffer repatchBuffer(codeblock);
+ repatchCall(repatchBuffer, call, newCalleeFunction);
+}
+
+static void repatchByIdSelfAccess(VM& vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, const Identifier& propertyName, PropertyOffset offset,
+ const FunctionPtr &slowPathFunction, bool compact)
+{
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm.registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
+
+ RepatchBuffer repatchBuffer(codeBlock);
+
// Only optimize once!
- repatchCall(codeBlock, stubInfo.callReturnLocation, slowPathFunction);
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, slowPathFunction);
// Patch the structure check & the offset of the load.
- MacroAssembler::repatchInt32(
- stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall),
- bitwise_cast<int32_t>(structure->id()));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall), structure);
+ repatchBuffer.setLoadInstructionIsActive(stubInfo.callReturnLocation.convertibleLoadAtOffset(stubInfo.patch.deltaCallToStorageLoad), isOutOfLineOffset(offset));
#if USE(JSVALUE64)
if (compact)
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToBase(offset));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
else
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToBase(offset));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset));
#elif USE(JSVALUE32_64)
if (compact) {
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
} else {
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToBase(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag));
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload));
}
#endif
}
-static void resetGetByIDCheckAndLoad(StructureStubInfo& stubInfo)
+static void addStructureTransitionCheck(
+ JSCell* object, Structure* structure, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+ MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
{
- CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(
- MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureIDOffset()),
- static_cast<int32_t>(unusedPointer));
- }
- MacroAssembler::repatchInt32(structureLabel, static_cast<int32_t>(unusedPointer));
-#if USE(JSVALUE64)
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- MacroAssembler::repatchCompact(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+ if (object->structure() == structure && structure->transitionWatchpointSetIsStillValid()) {
+ structure->addTransitionWatchpoint(stubInfo.addWatchpoint(codeBlock));
+#if !ASSERT_DISABLED
+ // If we execute this code, the object must have the structure we expect. Assert
+ // this in debug modes.
+ jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
+ MacroAssembler::Jump ok = jit.branchPtr(
+ MacroAssembler::Equal,
+ MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure));
+ jit.breakpoint();
+ ok.link(&jit);
#endif
+ return;
+ }
+
+ jit.move(MacroAssembler::TrustedImmPtr(object), scratchGPR);
+ failureCases.append(
+ jit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(scratchGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure)));
}
-static void resetPutByIDCheckAndLoad(StructureStubInfo& stubInfo)
+static void addStructureTransitionCheck(
+ JSValue prototype, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+ MacroAssembler& jit, MacroAssembler::JumpList& failureCases, GPRReg scratchGPR)
{
- CodeLocationDataLabel32 structureLabel = stubInfo.callReturnLocation.dataLabel32AtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- MacroAssembler::revertJumpReplacementToPatchableBranch32WithPatch(
- MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(structureLabel),
- MacroAssembler::Address(
- static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
- JSCell::structureIDOffset()),
- static_cast<int32_t>(unusedPointer));
- }
- MacroAssembler::repatchInt32(structureLabel, static_cast<int32_t>(unusedPointer));
-#if USE(JSVALUE64)
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
-#else
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
- MacroAssembler::repatchInt32(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
-#endif
+ if (prototype.isNull())
+ return;
+
+ ASSERT(prototype.isCell());
+
+ addStructureTransitionCheck(
+ prototype.asCell(), prototype.asCell()->structure(), codeBlock, stubInfo, jit,
+ failureCases, scratchGPR);
}
-static void replaceWithJump(StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
+static void replaceWithJump(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo, const MacroAssemblerCodePtr target)
{
- if (MacroAssembler::canJumpReplacePatchableBranch32WithPatch()) {
- MacroAssembler::replaceWithJump(
- MacroAssembler::startOfPatchableBranch32WithPatchOnAddress(
- stubInfo.callReturnLocation.dataLabel32AtOffset(
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(
+ stubInfo.callReturnLocation.dataLabelPtrAtOffset(
-(intptr_t)stubInfo.patch.deltaCheckImmToCall)),
CodeLocationLabel(target));
return;
}
-
- resetGetByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(
+ repatchBuffer.relink(
stubInfo.callReturnLocation.jumpAtOffset(
stubInfo.patch.deltaCallToJump),
CodeLocationLabel(target));
}
-enum InlineCacheAction {
- GiveUpOnCache,
- RetryCacheLater,
- AttemptToCache
-};
-
-static InlineCacheAction actionForCell(VM& vm, JSCell* cell)
+static void emitRestoreScratch(MacroAssembler& stubJit, bool needToRestoreScratch, GPRReg scratchGPR, MacroAssembler::Jump& success, MacroAssembler::Jump& fail, MacroAssembler::JumpList failureCases)
{
- Structure* structure = cell->structure(vm);
-
- TypeInfo typeInfo = structure->typeInfo();
- if (typeInfo.prohibitsPropertyCaching())
- return GiveUpOnCache;
-
- if (structure->isUncacheableDictionary()) {
- if (structure->hasBeenFlattenedBefore())
- return GiveUpOnCache;
- // Flattening could have changed the offset, so return early for another try.
- asObject(cell)->flattenDictionaryObject(vm);
- return RetryCacheLater;
+ if (needToRestoreScratch) {
+ stubJit.popToRestore(scratchGPR);
+
+ success = stubJit.jump();
+
+ // link failure cases here, so we can pop scratchGPR, and then jump back.
+ failureCases.link(&stubJit);
+
+ stubJit.popToRestore(scratchGPR);
+
+ fail = stubJit.jump();
+ return;
}
- if (!structure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ success = stubJit.jump();
+}
- return AttemptToCache;
+static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases, CodeLocationLabel successLabel, CodeLocationLabel slowCaseBegin)
+{
+ patchBuffer.link(success, successLabel);
+
+ if (needToRestoreScratch) {
+ patchBuffer.link(fail, slowCaseBegin);
+ return;
+ }
+
+ // link failure cases directly back to normal path
+ patchBuffer.link(failureCases, slowCaseBegin);
}
-static bool forceICFailure(ExecState*)
+static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratch, StructureStubInfo& stubInfo, MacroAssembler::Jump success, MacroAssembler::Jump fail, MacroAssembler::JumpList failureCases)
{
- return Options::forceICFailure();
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
-static InlineCacheAction tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+enum ProtoChainGenerationResult {
+ ProtoChainGenerationFailed,
+ ProtoChainGenerationSucceeded
+};
+
+static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState*, const PropertySlot&, const Identifier&, StructureStubInfo&, StructureChain*, size_t, PropertyOffset, Structure*, CodeLocationLabel, CodeLocationLabel, RefPtr<JITStubRoutine>&) WARN_UNUSED_RETURN;
+static ProtoChainGenerationResult generateProtoChainAccessStub(ExecState* exec, const PropertySlot& slot, const Identifier& propertyName, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, PropertyOffset offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, RefPtr<JITStubRoutine>& stubRoutine)
{
- if (forceICFailure(exec))
- return GiveUpOnCache;
+ VM* vm = &exec->vm();
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+ bool needToRestoreScratch = scratchGPR == InvalidGPRReg;
+ if (needToRestoreScratch && !slot.isCacheableValue())
+ return ProtoChainGenerationFailed;
- // FIXME: Cache property access for immediates.
- if (!baseValue.isCell())
- return GiveUpOnCache;
+ CCallHelpers stubJit(&exec->vm(), exec->codeBlock());
+ if (needToRestoreScratch) {
+#if USE(JSVALUE64)
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
+ stubJit.pushToSave(scratchGPR);
+ needToRestoreScratch = true;
+ }
+
+ MacroAssembler::JumpList failureCases;
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure)));
CodeBlock* codeBlock = exec->codeBlock();
- VM& vm = exec->vm();
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ JSObject* protoObject = 0;
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ protoObject = asObject(currStructure->prototypeForLookup(exec));
+ Structure* protoStructure = protoObject->structure();
+ if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(propertyName, stubInfo.addWatchpoint(codeBlock));
+ addStructureTransitionCheck(
+ protoObject, protoStructure, codeBlock, stubInfo, stubJit,
+ failureCases, scratchGPR);
+ currStructure = it->get();
+ }
+
+ bool isAccessor = slot.isCacheableGetter() || slot.isCacheableCustom();
+ if (isAccessor)
+ stubJit.move(baseGPR, scratchGPR);
+
+ if (!slot.isCacheableCustom()) {
+ if (isInlineOffset(offset)) {
+#if USE(JSVALUE64)
+ stubJit.load64(protoObject->locationForOffset(offset), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ } else {
+ stubJit.loadPtr(protoObject->butterflyAddress(), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetInButterfly(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ }
+ }
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Call handlerCall;
+ FunctionPtr operationFunction;
+ MacroAssembler::Jump success, fail;
+ if (isAccessor) {
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ if (slot.isCacheableGetter()) {
+ stubJit.setupArguments(callFrameRegister, scratchGPR, resultGPR);
+ operationFunction = operationCallGetter;
+ } else {
+ stubJit.move(MacroAssembler::TrustedImmPtr(protoObject), scratchGPR);
+ stubJit.setupArguments(callFrameRegister, scratchGPR,
+ MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
+ MacroAssembler::TrustedImmPtr(propertyName.impl()));
+ operationFunction = operationCallCustomGetter;
+ }
- std::unique_ptr<AccessCase> newCase;
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from. It just so happens to be the place that we are at
+ // right now!
+ stubJit.store32(MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
- if (isJSArray(baseValue) && propertyName == exec->propertyNames().length)
- newCase = AccessCase::getLength(vm, codeBlock, AccessCase::ArrayLength);
- else if (isJSString(baseValue) && propertyName == exec->propertyNames().length)
- newCase = AccessCase::getLength(vm, codeBlock, AccessCase::StringLength);
- else {
- if (!slot.isCacheable() && !slot.isUnset())
- return GiveUpOnCache;
-
- ObjectPropertyConditionSet conditionSet;
- JSCell* baseCell = baseValue.asCell();
- Structure* structure = baseCell->structure(vm);
-
- bool loadTargetFromProxy = false;
- if (baseCell->type() == PureForwardingProxyType) {
- baseValue = jsCast<JSProxy*>(baseCell)->target();
- baseCell = baseValue.asCell();
- structure = baseCell->structure(vm);
- loadTargetFromProxy = true;
+ operationCall = stubJit.call();
+#if USE(JSVALUE64)
+ stubJit.move(GPRInfo::returnValueGPR, resultGPR);
+#else
+ stubJit.setupResults(resultGPR, resultTagGPR);
+#endif
+ MacroAssembler::Jump noException = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ stubJit.setupArgumentsExecState();
+ handlerCall = stubJit.call();
+ stubJit.jumpToExceptionHandler();
+
+ noException.link(&stubJit);
+ }
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
+ if (isAccessor) {
+ patchBuffer.link(operationCall, operationFunction);
+ patchBuffer.link(handlerCall, lookupExceptionHandler);
+ }
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG prototype chain access stub for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), successLabel.executableAddress()));
+ return ProtoChainGenerationSucceeded;
+}
+
+static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ // FIXME: Write a test that proves we need to check for recursion here just
+ // like the interpreter does, then add a check for recursion.
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+
+ if (isJSArray(baseValue) && propertyName == exec->propertyNames().length) {
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+ bool needToRestoreScratch = false;
+
+ MacroAssembler stubJit;
+
+ if (scratchGPR == InvalidGPRReg) {
+#if USE(JSVALUE64)
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
+ stubJit.pushToSave(scratchGPR);
+ needToRestoreScratch = true;
}
+
+ MacroAssembler::JumpList failureCases;
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSCell::structureOffset()), scratchGPR);
+ stubJit.load8(MacroAssembler::Address(scratchGPR, Structure::indexingTypeOffset()), scratchGPR);
+ failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IsArray)));
+ failureCases.append(stubJit.branchTest32(MacroAssembler::Zero, scratchGPR, MacroAssembler::TrustedImm32(IndexingShapeMask)));
+
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+ stubJit.load32(MacroAssembler::Address(scratchGPR, ArrayStorage::lengthOffset()), scratchGPR);
+ failureCases.append(stubJit.branch32(MacroAssembler::LessThan, scratchGPR, MacroAssembler::TrustedImm32(0)));
+
+ stubJit.move(scratchGPR, resultGPR);
+#if USE(JSVALUE64)
+ stubJit.or64(AssemblyHelpers::TrustedImm64(TagTypeNumber), resultGPR);
+#elif USE(JSVALUE32_64)
+ stubJit.move(AssemblyHelpers::TrustedImm32(0xffffffff), resultTagGPR); // JSValue::Int32Tag
+#endif
+
+ MacroAssembler::Jump success, fail;
+
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases);
+
+ stubInfo.stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG GetById array length stub for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress()));
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetById);
+
+ return true;
+ }
+
+ // FIXME: should support length access for String.
- InlineCacheAction action = actionForCell(vm, baseCell);
- if (action != AttemptToCache)
- return action;
-
- // Optimize self access.
- if (stubInfo.cacheType == CacheType::Unset
- && slot.isCacheableValue()
- && slot.slotBase() == baseValue
- && !slot.watchpointSet()
- && isInlineOffset(slot.cachedOffset())
- && MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToBase(slot.cachedOffset()))
- && action == AttemptToCache
- && !structure->needImpurePropertyWatchpoint()
- && !loadTargetFromProxy) {
- structure->startWatchingPropertyForReplacements(vm, slot.cachedOffset());
- repatchByIdSelfAccess(codeBlock, stubInfo, structure, slot.cachedOffset(), operationGetByIdOptimize, true);
- stubInfo.initGetByIdSelf(codeBlock, structure, slot.cachedOffset());
- return RetryCacheLater;
+ // FIXME: Cache property access for immediates.
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ if (!slot.isCacheable())
+ return false;
+ if (!structure->propertyAccessesAreCacheable())
+ return false;
+
+ // Optimize self access.
+ if (slot.slotBase() == baseValue) {
+ if (!slot.isCacheableValue()
+ || !MacroAssembler::isCompactPtrAlignedAddressOffset(maxOffsetRelativeToPatchedStorage(slot.cachedOffset()))) {
+ repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdBuildList);
+ return true;
}
- PropertyOffset offset = slot.isUnset() ? invalidOffset : slot.cachedOffset();
+ repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, propertyName, slot.cachedOffset(), operationGetByIdBuildList, true);
+ stubInfo.initGetByIdSelf(*vm, codeBlock->ownerExecutable(), structure);
+ return true;
+ }
+
+ if (structure->isDictionary())
+ return false;
+
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
+ }
+
+ PropertyOffset offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), propertyName, offset);
+ if (count == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+ if (generateProtoChainAccessStub(exec, slot, propertyName, stubInfo, prototypeChain, count, offset,
+ structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase), stubInfo.stubRoutine) == ProtoChainGenerationFailed)
+ return false;
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ replaceWithJump(repatchBuffer, stubInfo, stubInfo.stubRoutine->code().code());
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdBuildList);
+
+ stubInfo.initGetByIdChain(*vm, codeBlock->ownerExecutable(), structure, prototypeChain, count, slot.isCacheableValue());
+ return true;
+}
+
+void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
+
+ bool cached = tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo);
+ if (!cached)
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
+}
- if (slot.isUnset() || slot.slotBase() != baseValue) {
- if (structure->typeInfo().prohibitsPropertyCaching() || structure->isDictionary())
- return GiveUpOnCache;
-
- if (slot.isUnset() && structure->typeInfo().getOwnPropertySlotIsImpureForPropertyAbsence())
- return GiveUpOnCache;
+static bool getPolymorphicStructureList(
+ VM* vm, CodeBlock* codeBlock, StructureStubInfo& stubInfo,
+ PolymorphicAccessStructureList*& polymorphicStructureList, int& listIndex,
+ CodeLocationLabel& slowCase)
+{
+ slowCase = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase);
+
+ if (stubInfo.accessType == access_unset) {
+ RELEASE_ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList();
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 0, false);
+ listIndex = 0;
+ } else if (stubInfo.accessType == access_get_by_id_self) {
+ RELEASE_ASSERT(!stubInfo.stubRoutine);
+ polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), JITStubRoutine::createSelfManagedRoutine(slowCase), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true);
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, true);
+ listIndex = 1;
+ } else if (stubInfo.accessType == access_get_by_id_chain) {
+ RELEASE_ASSERT(!!stubInfo.stubRoutine);
+ slowCase = CodeLocationLabel(stubInfo.stubRoutine->code().code());
+ polymorphicStructureList = new PolymorphicAccessStructureList(*vm, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true);
+ stubInfo.stubRoutine.clear();
+ stubInfo.initGetByIdSelfList(polymorphicStructureList, 1, false);
+ listIndex = 1;
+ } else {
+ RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
+ polymorphicStructureList = stubInfo.u.getByIdSelfList.structureList;
+ listIndex = stubInfo.u.getByIdSelfList.listSize;
+ slowCase = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
+ }
+
+ if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
+ return false;
+
+ RELEASE_ASSERT(listIndex < POLYMORPHIC_LIST_CACHE_SIZE);
+ return true;
+}
+
+static void patchJumpToGetByIdStub(CodeBlock* codeBlock, StructureStubInfo& stubInfo, JITStubRoutine* stubRoutine)
+{
+ RELEASE_ASSERT(stubInfo.accessType == access_get_by_id_self_list);
+ RepatchBuffer repatchBuffer(codeBlock);
+ if (stubInfo.u.getByIdSelfList.didSelfPatching) {
+ repatchBuffer.relink(
+ stubInfo.callReturnLocation.jumpAtOffset(
+ stubInfo.patch.deltaCallToJump),
+ CodeLocationLabel(stubRoutine->code().code()));
+ return;
+ }
+
+ replaceWithJump(repatchBuffer, stubInfo, stubRoutine->code().code());
+}
+
+static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& ident, const PropertySlot& slot, StructureStubInfo& stubInfo)
+{
+ if (!baseValue.isCell()
+ || !slot.isCacheable()
+ || !baseValue.asCell()->structure()->propertyAccessesAreCacheable())
+ return false;
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+
+ if (slot.slotBase() == baseValue) {
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
+ }
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+ CodeLocationLabel slowCase;
- if (slot.isUnset()) {
- conditionSet = generateConditionsForPropertyMiss(
- vm, codeBlock, exec, structure, propertyName.impl());
+ if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
+ return false;
+
+ stubInfo.u.getByIdSelfList.listSize++;
+
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+
+ CCallHelpers stubJit(vm, codeBlock);
+
+ MacroAssembler::Jump wrongStruct = stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(structure));
+
+ // The strategy we use for stubs is as follows:
+ // 1) Call DFG helper that calls the getter.
+ // 2) Check if there was an exception, and if there was, call yet another
+ // helper.
+
+ bool isDirect = false;
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Call handlerCall;
+ FunctionPtr operationFunction;
+ MacroAssembler::Jump success;
+
+ if (slot.isCacheableGetter() || slot.isCacheableCustom()) {
+ if (slot.isCacheableGetter()) {
+ ASSERT(scratchGPR != InvalidGPRReg);
+ ASSERT(baseGPR != scratchGPR);
+ if (isInlineOffset(slot.cachedOffset())) {
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#endif
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR);
+#endif
+ }
+ stubJit.setupArguments(callFrameRegister, baseGPR, scratchGPR);
+ operationFunction = operationCallGetter;
} else {
- conditionSet = generateConditionsForPrototypePropertyHit(
- vm, codeBlock, exec, structure, slot.slotBase(),
- propertyName.impl());
+ stubJit.setupArguments(
+ callFrameRegister, baseGPR,
+ MacroAssembler::TrustedImmPtr(FunctionPtr(slot.customGetter()).executableAddress()),
+ MacroAssembler::TrustedImmPtr(ident.impl()));
+ operationFunction = operationCallCustomGetter;
}
- if (!conditionSet.isValid())
- return GiveUpOnCache;
-
- offset = slot.isUnset() ? invalidOffset : conditionSet.slotBaseCondition().offset();
+ // Need to make sure that whenever this call is made in the future, we remember the
+ // place that we made it from. It just so happens to be the place that we are at
+ // right now!
+ stubJit.store32(
+ MacroAssembler::TrustedImm32(exec->locationAsRawBits()),
+ CCallHelpers::tagFor(static_cast<VirtualRegister>(JSStack::ArgumentCount)));
+
+ operationCall = stubJit.call();
+#if USE(JSVALUE64)
+ stubJit.move(GPRInfo::returnValueGPR, resultGPR);
+#else
+ stubJit.setupResults(resultGPR, resultTagGPR);
+#endif
+ success = stubJit.emitExceptionCheck(CCallHelpers::InvertedExceptionCheck);
+
+ stubJit.setupArgumentsExecState();
+ handlerCall = stubJit.call();
+ stubJit.jumpToExceptionHandler();
+ } else {
+ if (isInlineOffset(slot.cachedOffset())) {
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+#else
+ if (baseGPR == resultTagGPR) {
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ } else {
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+ }
+#endif
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), resultGPR);
+#if USE(JSVALUE64)
+ stubJit.load64(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR);
+#else
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR);
+ stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR);
+#endif
+ }
+ success = stubJit.jump();
+ isDirect = true;
}
- JSFunction* getter = nullptr;
- if (slot.isCacheableGetter())
- getter = jsDynamicCast<JSFunction*>(slot.getterSetter()->getter());
-
- if (!loadTargetFromProxy && getter && AccessCase::canEmitIntrinsicGetter(getter, structure))
- newCase = AccessCase::getIntrinsic(vm, codeBlock, getter, slot.cachedOffset(), structure, conditionSet);
- else {
- AccessCase::AccessType type;
- if (slot.isCacheableValue())
- type = AccessCase::Load;
- else if (slot.isUnset())
- type = AccessCase::Miss;
- else if (slot.isCacheableGetter())
- type = AccessCase::Getter;
- else if (slot.attributes() & CustomAccessor)
- type = AccessCase::CustomAccessorGetter;
- else
- type = AccessCase::CustomValueGetter;
-
- newCase = AccessCase::get(
- vm, codeBlock, type, offset, structure, conditionSet, loadTargetFromProxy,
- slot.watchpointSet(), slot.isCacheableCustom() ? slot.customGetter() : nullptr,
- slot.isCacheableCustom() ? slot.slotBase() : nullptr);
+ LinkBuffer patchBuffer(*vm, &stubJit, codeBlock);
+
+ patchBuffer.link(wrongStruct, slowCase);
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ if (!isDirect) {
+ patchBuffer.link(operationCall, operationFunction);
+ patchBuffer.link(handlerCall, lookupExceptionHandler);
}
+
+ RefPtr<JITStubRoutine> stubRoutine =
+ createJITStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG GetById polymorphic list access for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress())),
+ *vm,
+ codeBlock->ownerExecutable(),
+ slot.isCacheableGetter() || slot.isCacheableCustom());
+
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect);
+
+ patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
+
+ if (baseValue.asCell()->structure()->typeInfo().prohibitsPropertyCaching()
+ || baseValue.asCell()->structure()->isDictionary())
+ return false;
+
+ if (!stubInfo.patch.registersFlushed) {
+ // We cannot do as much inline caching if the registers were not flushed prior to this GetById. In particular,
+ // non-Value cached properties require planting calls, which requires registers to have been flushed. Thus,
+ // if registers were not flushed, don't do non-Value caching.
+ if (!slot.isCacheableValue())
+ return false;
+ }
+
- MacroAssemblerCodePtr codePtr =
- stubInfo.addAccessCase(codeBlock, propertyName, WTFMove(newCase));
-
- if (!codePtr)
- return GiveUpOnCache;
+ PropertyOffset offset = slot.cachedOffset();
+ size_t count = normalizePrototypeChainForChainAccess(exec, baseValue, slot.slotBase(), ident, offset);
+ if (count == InvalidPrototypeChain)
+ return false;
- replaceWithJump(stubInfo, codePtr);
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+ CodeLocationLabel slowCase;
+ if (!getPolymorphicStructureList(vm, codeBlock, stubInfo, polymorphicStructureList, listIndex, slowCase))
+ return false;
+
+ stubInfo.u.getByIdProtoList.listSize++;
- return RetryCacheLater;
+ RefPtr<JITStubRoutine> stubRoutine;
+
+ if (generateProtoChainAccessStub(exec, slot, ident, stubInfo, prototypeChain, count, offset, structure,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone),
+ slowCase, stubRoutine) == ProtoChainGenerationFailed)
+ return false;
+
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, slot.isCacheableValue());
+
+ patchJumpToGetByIdStub(codeBlock, stubInfo, stubRoutine.get());
+
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
-void repatchGetByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
+void buildGetByIDList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PropertySlot& slot, StructureStubInfo& stubInfo)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryCacheGetByID(exec, baseValue, propertyName, slot, stubInfo) == GiveUpOnCache)
+ bool dontChangeCall = tryBuildGetByIDList(exec, baseValue, propertyName, slot, stubInfo);
+ if (!dontChangeCall)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationGetById);
}
@@ -346,592 +750,862 @@ static V_JITOperation_ESsiJJI appropriateGenericPutByIdFunction(const PutPropert
return operationPutByIdNonStrict;
}
-static V_JITOperation_ESsiJJI appropriateOptimizingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
+static V_JITOperation_ESsiJJI appropriateListBuildingPutByIdFunction(const PutPropertySlot &slot, PutKind putKind)
{
if (slot.isStrictMode()) {
if (putKind == Direct)
- return operationPutByIdDirectStrictOptimize;
- return operationPutByIdStrictOptimize;
+ return operationPutByIdDirectStrictBuildList;
+ return operationPutByIdStrictBuildList;
}
if (putKind == Direct)
- return operationPutByIdDirectNonStrictOptimize;
- return operationPutByIdNonStrictOptimize;
+ return operationPutByIdDirectNonStrictBuildList;
+ return operationPutByIdNonStrictBuildList;
}
-static InlineCacheAction tryCachePutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+#if ENABLE(GGC)
+static MacroAssembler::Call storeToWriteBarrierBuffer(CCallHelpers& jit, GPRReg cell, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
{
- if (forceICFailure(exec))
- return GiveUpOnCache;
-
- CodeBlock* codeBlock = exec->codeBlock();
- VM& vm = exec->vm();
+ ASSERT(scratch1 != scratch2);
+ WriteBarrierBuffer* writeBarrierBuffer = &jit.vm()->heap.writeBarrierBuffer();
+ jit.move(MacroAssembler::TrustedImmPtr(writeBarrierBuffer), scratch1);
+ jit.load32(MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()), scratch2);
+ MacroAssembler::Jump needToFlush = jit.branch32(MacroAssembler::AboveOrEqual, scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::capacityOffset()));
+
+ jit.add32(MacroAssembler::TrustedImm32(1), scratch2);
+ jit.store32(scratch2, MacroAssembler::Address(scratch1, WriteBarrierBuffer::currentIndexOffset()));
+
+ jit.loadPtr(MacroAssembler::Address(scratch1, WriteBarrierBuffer::bufferOffset()), scratch1);
+ // We use an offset of -sizeof(void*) because we already added 1 to scratch2.
+ jit.storePtr(cell, MacroAssembler::BaseIndex(scratch1, scratch2, MacroAssembler::ScalePtr, static_cast<int32_t>(-sizeof(void*))));
+
+ MacroAssembler::Jump done = jit.jump();
+ needToFlush.link(&jit);
+
+ ScratchBuffer* scratchBuffer = jit.vm()->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(jit, scratchBuffer, scratch1);
+
+ unsigned bytesFromBase = allocator.numberOfReusedRegisters() * sizeof(void*);
+ unsigned bytesToSubtract = 0;
+#if CPU(X86)
+ bytesToSubtract += 2 * sizeof(void*);
+ bytesFromBase += bytesToSubtract;
+#endif
+ unsigned currentAlignment = bytesFromBase % stackAlignmentBytes();
+ bytesToSubtract += currentAlignment;
- if (!baseValue.isCell())
- return GiveUpOnCache;
+ if (bytesToSubtract)
+ jit.subPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
+
+ jit.setupArguments(callFrameRegister, cell);
+ MacroAssembler::Call call = jit.call();
+
+ if (bytesToSubtract)
+ jit.addPtr(MacroAssembler::TrustedImm32(bytesToSubtract), MacroAssembler::stackPointerRegister);
+ allocator.restoreUsedRegistersFromScratchBuffer(jit, scratchBuffer, scratch1);
+
+ done.link(&jit);
+
+ return call;
+}
+
+static MacroAssembler::Call writeBarrier(CCallHelpers& jit, GPRReg owner, GPRReg scratch1, GPRReg scratch2, GPRReg callFrameRegister, ScratchRegisterAllocator& allocator)
+{
+ ASSERT(owner != scratch1);
+ ASSERT(owner != scratch2);
+
+ MacroAssembler::Jump definitelyNotMarked = DFG::SpeculativeJIT::genericWriteBarrier(jit, owner, scratch1, scratch2);
+ MacroAssembler::Call call = storeToWriteBarrierBuffer(jit, owner, scratch1, scratch2, callFrameRegister, allocator);
+ definitelyNotMarked.link(&jit);
+ return call;
+}
+#endif // ENABLE(GGC)
+
+static void emitPutReplaceStub(
+ ExecState* exec,
+ JSValue,
+ const Identifier&,
+ const PutPropertySlot& slot,
+ StructureStubInfo& stubInfo,
+ PutKind,
+ Structure* structure,
+ CodeLocationLabel failureLabel,
+ RefPtr<JITStubRoutine>& stubRoutine)
+{
+ VM* vm = &exec->vm();
+#if ENABLE(GGC)
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+#endif
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(valueTagGPR);
+#endif
+ allocator.lock(valueGPR);
- if (!slot.isCacheablePut() && !slot.isCacheableCustom() && !slot.isCacheableSetter())
- return GiveUpOnCache;
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+#if ENABLE(GGC)
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+#endif
- if (!structure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ CCallHelpers stubJit(vm, exec->codeBlock());
+
+ allocator.preserveReusedRegistersByPushing(stubJit);
- std::unique_ptr<AccessCase> newCase;
+ MacroAssembler::Jump badStructure = stubJit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure));
- if (slot.base() == baseValue && slot.isCacheablePut()) {
- if (slot.type() == PutPropertySlot::ExistingProperty) {
- structure->didCachePropertyReplacement(vm, slot.cachedOffset());
+#if USE(JSVALUE64)
+ if (isInlineOffset(slot.cachedOffset()))
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ }
+#elif USE(JSVALUE32_64)
+ if (isInlineOffset(slot.cachedOffset())) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+#if ENABLE(GGC)
+ MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
+#endif
+
+ MacroAssembler::Jump success;
+ MacroAssembler::Jump failure;
+
+ if (allocator.didReuseRegisters()) {
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ success = stubJit.jump();
- if (stubInfo.cacheType == CacheType::Unset
- && isInlineOffset(slot.cachedOffset())
- && MacroAssembler::isPtrAlignedAddressOffset(maxOffsetRelativeToBase(slot.cachedOffset()))
- && !structure->needImpurePropertyWatchpoint()
- && !structure->inferredTypeFor(ident.impl())) {
+ badStructure.link(&stubJit);
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ failure = stubJit.jump();
+ } else {
+ success = stubJit.jump();
+ failure = badStructure;
+ }
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+#if ENABLE(GGC)
+ patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
+#endif
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ patchBuffer.link(failure, failureLabel);
+
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG PutById replace stub for %s, return point %p",
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress()));
+}
- repatchByIdSelfAccess(
- codeBlock, stubInfo, structure, slot.cachedOffset(),
- appropriateOptimizingPutByIdFunction(slot, putKind), false);
- stubInfo.initPutByIdReplace(codeBlock, structure, slot.cachedOffset());
- return RetryCacheLater;
- }
+static void emitPutTransitionStub(
+ ExecState* exec,
+ JSValue,
+ const Identifier&,
+ const PutPropertySlot& slot,
+ StructureStubInfo& stubInfo,
+ PutKind putKind,
+ Structure* structure,
+ Structure* oldStructure,
+ StructureChain* prototypeChain,
+ CodeLocationLabel failureLabel,
+ RefPtr<JITStubRoutine>& stubRoutine)
+{
+ VM* vm = &exec->vm();
- newCase = AccessCase::replace(vm, codeBlock, structure, slot.cachedOffset());
- } else {
- ASSERT(slot.type() == PutPropertySlot::NewProperty);
+ GPRReg callFrameRegister = static_cast<GPRReg>(stubInfo.patch.callFrameRegister);
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+#if USE(JSVALUE32_64)
+ GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.valueTagGPR);
+#endif
+ GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(valueTagGPR);
+#endif
+ allocator.lock(valueGPR);
+
+ CCallHelpers stubJit(vm);
+
+ bool needThirdScratch = false;
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity()) {
+ needThirdScratch = true;
+ }
- if (!structure->isObject() || structure->isDictionary())
- return GiveUpOnCache;
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR1 != baseGPR);
+ ASSERT(scratchGPR1 != valueGPR);
+
+ GPRReg scratchGPR2 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR2 != baseGPR);
+ ASSERT(scratchGPR2 != valueGPR);
+ ASSERT(scratchGPR2 != scratchGPR1);
+
+ GPRReg scratchGPR3;
+ if (needThirdScratch) {
+ scratchGPR3 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR3 != baseGPR);
+ ASSERT(scratchGPR3 != valueGPR);
+ ASSERT(scratchGPR3 != scratchGPR1);
+ ASSERT(scratchGPR3 != scratchGPR2);
+ } else
+ scratchGPR3 = InvalidGPRReg;
+
+ allocator.preserveReusedRegistersByPushing(stubJit);
- PropertyOffset offset;
- Structure* newStructure =
- Structure::addPropertyTransitionToExistingStructureConcurrently(
- structure, ident.impl(), 0, offset);
- if (!newStructure || !newStructure->propertyAccessesAreCacheable())
- return GiveUpOnCache;
+ MacroAssembler::JumpList failureCases;
+
+ ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated());
+
+ failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
+
+ addStructureTransitionCheck(
+ oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
+ scratchGPR1);
+
+ if (putKind == NotDirect) {
+ for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) {
+ addStructureTransitionCheck(
+ (*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
+ scratchGPR1);
+ }
+ }
- ASSERT(newStructure->previousID() == structure);
- ASSERT(!newStructure->isDictionary());
- ASSERT(newStructure->isObject());
+ MacroAssembler::JumpList slowPath;
+
+ bool scratchGPR1HasStorage = false;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ size_t newSize = structure->outOfLineCapacity() * sizeof(JSValue);
+ CopiedAllocator* copiedAllocator = &vm->heap.storageAllocator();
+
+ if (!oldStructure->outOfLineCapacity()) {
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
+ } else {
+ size_t oldSize = oldStructure->outOfLineCapacity() * sizeof(JSValue);
+ ASSERT(newSize > oldSize);
- ObjectPropertyConditionSet conditionSet;
- if (putKind == NotDirect) {
- conditionSet =
- generateConditionsForPropertySetterMiss(
- vm, codeBlock, exec, newStructure, ident.impl());
- if (!conditionSet.isValid())
- return GiveUpOnCache;
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR3);
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSValue)), scratchGPR1);
+ // We have scratchGPR1 = new storage, scratchGPR3 = old storage, scratchGPR2 = available
+ for (size_t offset = 0; offset < oldSize; offset += sizeof(void*)) {
+ stubJit.loadPtr(MacroAssembler::Address(scratchGPR3, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))), scratchGPR2);
+ stubJit.storePtr(scratchGPR2, MacroAssembler::Address(scratchGPR1, -static_cast<ptrdiff_t>(offset + sizeof(JSValue) + sizeof(void*))));
}
-
- newCase = AccessCase::transition(vm, codeBlock, structure, newStructure, offset, conditionSet);
}
- } else if (slot.isCacheableCustom() || slot.isCacheableSetter()) {
- if (slot.isCacheableCustom()) {
- ObjectPropertyConditionSet conditionSet;
-
- if (slot.base() != baseValue) {
- conditionSet =
- generateConditionsForPrototypePropertyHitCustom(
- vm, codeBlock, exec, structure, slot.base(), ident.impl());
- if (!conditionSet.isValid())
- return GiveUpOnCache;
- }
+
+ stubJit.storePtr(scratchGPR1, MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()));
+ scratchGPR1HasStorage = true;
+ }
- newCase = AccessCase::setter(
- vm, codeBlock, slot.isCustomAccessor() ? AccessCase::CustomAccessorSetter : AccessCase::CustomValueSetter, structure, invalidOffset, conditionSet,
- slot.customSetter(), slot.base());
- } else {
- ObjectPropertyConditionSet conditionSet;
- PropertyOffset offset;
-
- if (slot.base() != baseValue) {
- conditionSet =
- generateConditionsForPrototypePropertyHit(
- vm, codeBlock, exec, structure, slot.base(), ident.impl());
- if (!conditionSet.isValid())
- return GiveUpOnCache;
- offset = conditionSet.slotBaseCondition().offset();
- } else
- offset = slot.cachedOffset();
-
- newCase = AccessCase::setter(
- vm, codeBlock, AccessCase::Setter, structure, offset, conditionSet);
+ stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
+#if USE(JSVALUE64)
+ if (isInlineOffset(slot.cachedOffset()))
+ stubJit.store64(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ else {
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store64(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue)));
+ }
+#elif USE(JSVALUE32_64)
+ if (isInlineOffset(slot.cachedOffset())) {
+ stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ } else {
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::butterflyOffset()), scratchGPR1);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInButterfly(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ }
+#endif
+
+#if ENABLE(GGC)
+ MacroAssembler::Call writeBarrierOperation = writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, callFrameRegister, allocator);
+#endif
+
+ MacroAssembler::Jump success;
+ MacroAssembler::Jump failure;
+
+ if (allocator.didReuseRegisters()) {
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ success = stubJit.jump();
+
+ failureCases.link(&stubJit);
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ failure = stubJit.jump();
+ } else
+ success = stubJit.jump();
+
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Jump successInSlowPath;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ slowPath.link(&stubJit);
+
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ ScratchBuffer* scratchBuffer = vm->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+#if USE(JSVALUE64)
+ stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
+#else
+ stubJit.setupArguments(callFrameRegister, baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
+#endif
+ operationCall = stubJit.call();
+ allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+ successInSlowPath = stubJit.jump();
+ }
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
+#if ENABLE(GGC)
+ patchBuffer.link(writeBarrierOperation, operationFlushWriteBarrierBuffer);
+#endif
+ patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ if (allocator.didReuseRegisters())
+ patchBuffer.link(failure, failureLabel);
+ else
+ patchBuffer.link(failureCases, failureLabel);
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
+ patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone));
+ }
+
+ stubRoutine =
+ createJITStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG PutById %stransition stub (%p -> %p) for %s, return point %p",
+ structure->outOfLineCapacity() != oldStructure->outOfLineCapacity() ? "reallocating " : "",
+ oldStructure, structure,
+ toCString(*exec->codeBlock()).data(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToDone).executableAddress())),
+ *vm,
+ exec->codeBlock()->ownerExecutable(),
+ structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(),
+ structure);
+}
+
+static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
+
+ if (!slot.isCacheable())
+ return false;
+ if (!structure->propertyAccessesAreCacheable())
+ return false;
+
+ // Optimize self access.
+ if (slot.base() == baseValue) {
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary())
+ return false;
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
+ return false;
+
+ // Skip optimizing the case where we need realloc, and the structure has
+ // indexing storage.
+ if (oldStructure->couldHaveIndexingHeader())
+ return false;
+
+ if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ emitPutTransitionStub(
+ exec, baseValue, ident, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase),
+ stubInfo.stubRoutine);
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(
+ stubInfo.callReturnLocation.jumpAtOffset(
+ stubInfo.patch.deltaCallToJump),
+ CodeLocationLabel(stubInfo.stubRoutine->code().code()));
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind));
+
+ stubInfo.initPutByIdTransition(*vm, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct);
+
+ return true;
}
+
+ if (!MacroAssembler::isPtrAlignedAddressOffset(offsetRelativeToPatchedStorage(slot.cachedOffset())))
+ return false;
+
+ repatchByIdSelfAccess(*vm, codeBlock, stubInfo, structure, ident, slot.cachedOffset(), appropriateListBuildingPutByIdFunction(slot, putKind), false);
+ stubInfo.initPutByIdReplace(*vm, codeBlock->ownerExecutable(), structure);
+ return true;
}
- MacroAssemblerCodePtr codePtr = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
+ return false;
+}
+
+void repatchPutByID(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (!codePtr)
- return GiveUpOnCache;
+ bool cached = tryCachePutByID(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+}
- resetPutByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(
- stubInfo.callReturnLocation.jumpAtOffset(
- stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(codePtr));
+static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+{
+ CodeBlock* codeBlock = exec->codeBlock();
+ VM* vm = &exec->vm();
+
+ if (!baseValue.isCell())
+ return false;
+ JSCell* baseCell = baseValue.asCell();
+ Structure* structure = baseCell->structure();
+ Structure* oldStructure = structure->previousID();
+
+ if (!slot.isCacheable())
+ return false;
+ if (!structure->propertyAccessesAreCacheable())
+ return false;
+
+ // Optimize self access.
+ if (slot.base() == baseValue) {
+ PolymorphicPutByIdList* list;
+ RefPtr<JITStubRoutine> stubRoutine;
+
+ if (slot.type() == PutPropertySlot::NewProperty) {
+ if (structure->isDictionary())
+ return false;
+
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
+ return false;
+
+ // Skip optimizing the case where we need realloc, and the structure has
+ // indexing storage.
+ if (oldStructure->couldHaveIndexingHeader())
+ return false;
+
+ if (normalizePrototypeChain(exec, baseCell) == InvalidPrototypeChain)
+ return false;
+
+ StructureChain* prototypeChain = structure->prototypeChain(exec);
+
+ // We're now committed to creating the stub. Mogrify the meta-data accordingly.
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ emitPutTransitionStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
+ structure, oldStructure, prototypeChain,
+ CodeLocationLabel(list->currentSlowPathTarget()),
+ stubRoutine);
+
+ list->addAccess(
+ PutByIdAccess::transition(
+ *vm, codeBlock->ownerExecutable(),
+ oldStructure, structure, prototypeChain,
+ stubRoutine));
+ } else {
+ // We're now committed to creating the stub. Mogrify the meta-data accordingly.
+ list = PolymorphicPutByIdList::from(
+ putKind, stubInfo,
+ stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+
+ emitPutReplaceStub(
+ exec, baseValue, propertyName, slot, stubInfo, putKind,
+ structure, CodeLocationLabel(list->currentSlowPathTarget()), stubRoutine);
+
+ list->addAccess(
+ PutByIdAccess::replace(
+ *vm, codeBlock->ownerExecutable(),
+ structure, stubRoutine));
+ }
+
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
+
+ if (list->isFull())
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
+
+ return true;
+ }
- return RetryCacheLater;
+ return false;
}
-void repatchPutByID(ExecState* exec, JSValue baseValue, Structure* structure, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
+void buildPutByIdList(ExecState* exec, JSValue baseValue, const Identifier& propertyName, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
{
GCSafeConcurrentJITLocker locker(exec->codeBlock()->m_lock, exec->vm().heap);
- if (tryCachePutByID(exec, baseValue, structure, propertyName, slot, stubInfo, putKind) == GiveUpOnCache)
+ bool cached = tryBuildPutByIdList(exec, baseValue, propertyName, slot, stubInfo, putKind);
+ if (!cached)
repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind));
}
-static InlineCacheAction tryRepatchIn(
+static bool tryRepatchIn(
ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- if (forceICFailure(exec))
- return GiveUpOnCache;
-
- if (!base->structure()->propertyAccessesAreCacheable() || (!wasFound && !base->structure()->propertyAccessesAreCacheableForAbsence()))
- return GiveUpOnCache;
+ if (!base->structure()->propertyAccessesAreCacheable())
+ return false;
if (wasFound) {
if (!slot.isCacheable())
- return GiveUpOnCache;
+ return false;
}
CodeBlock* codeBlock = exec->codeBlock();
- VM& vm = exec->vm();
- Structure* structure = base->structure(vm);
+ VM* vm = &exec->vm();
+ Structure* structure = base->structure();
- ObjectPropertyConditionSet conditionSet;
- if (wasFound) {
- if (slot.slotBase() != base) {
- conditionSet = generateConditionsForPrototypePropertyHit(
- vm, codeBlock, exec, structure, slot.slotBase(), ident.impl());
- }
+ PropertyOffset offsetIgnored;
+ size_t count = normalizePrototypeChainForChainAccess(exec, base, wasFound ? slot.slotBase() : JSValue(), ident, offsetIgnored);
+ if (count == InvalidPrototypeChain)
+ return false;
+
+ PolymorphicAccessStructureList* polymorphicStructureList;
+ int listIndex;
+
+ CodeLocationLabel successLabel = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToDone);
+ CodeLocationLabel slowCaseLabel;
+
+ if (stubInfo.accessType == access_unset) {
+ polymorphicStructureList = new PolymorphicAccessStructureList();
+ stubInfo.initInList(polymorphicStructureList, 0);
+ slowCaseLabel = stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.deltaCallToSlowCase);
+ listIndex = 0;
} else {
- conditionSet = generateConditionsForPropertyMiss(
- vm, codeBlock, exec, structure, ident.impl());
+ RELEASE_ASSERT(stubInfo.accessType == access_in_list);
+ polymorphicStructureList = stubInfo.u.inList.structureList;
+ listIndex = stubInfo.u.inList.listSize;
+ slowCaseLabel = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code());
+
+ if (listIndex == POLYMORPHIC_LIST_CACHE_SIZE)
+ return false;
}
- if (!conditionSet.isValid())
- return GiveUpOnCache;
-
- std::unique_ptr<AccessCase> newCase = AccessCase::in(
- vm, codeBlock, wasFound ? AccessCase::InHit : AccessCase::InMiss, structure, conditionSet);
-
- MacroAssemblerCodePtr codePtr = stubInfo.addAccessCase(codeBlock, ident, WTFMove(newCase));
- if (!codePtr)
- return GiveUpOnCache;
+
+ StructureChain* chain = structure->prototypeChain(exec);
+ RefPtr<JITStubRoutine> stubRoutine;
+
+ {
+ GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.baseGPR);
+ GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.valueGPR);
+ GPRReg scratchGPR = TempRegisterSet(stubInfo.patch.usedRegisters).getFreeGPR();
+
+ CCallHelpers stubJit(vm);
+
+ bool needToRestoreScratch;
+ if (scratchGPR == InvalidGPRReg) {
+ scratchGPR = AssemblyHelpers::selectScratchGPR(baseGPR, resultGPR);
+ stubJit.pushToSave(scratchGPR);
+ needToRestoreScratch = true;
+ } else
+ needToRestoreScratch = false;
+
+ MacroAssembler::JumpList failureCases;
+ failureCases.append(stubJit.branchPtr(
+ MacroAssembler::NotEqual,
+ MacroAssembler::Address(baseGPR, JSCell::structureOffset()),
+ MacroAssembler::TrustedImmPtr(structure)));
+
+ CodeBlock* codeBlock = exec->codeBlock();
+ if (structure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
+
+ Structure* currStructure = structure;
+ WriteBarrier<Structure>* it = chain->head();
+ for (unsigned i = 0; i < count; ++i, ++it) {
+ JSObject* prototype = asObject(currStructure->prototypeForLookup(exec));
+ Structure* protoStructure = prototype->structure();
+ addStructureTransitionCheck(
+ prototype, protoStructure, exec->codeBlock(), stubInfo, stubJit,
+ failureCases, scratchGPR);
+ if (protoStructure->typeInfo().newImpurePropertyFiresWatchpoints())
+ vm->registerWatchpointForImpureProperty(ident, stubInfo.addWatchpoint(codeBlock));
+ currStructure = it->get();
+ }
+
+#if USE(JSVALUE64)
+ stubJit.move(MacroAssembler::TrustedImm64(JSValue::encode(jsBoolean(wasFound))), resultGPR);
+#else
+ stubJit.move(MacroAssembler::TrustedImm32(wasFound), resultGPR);
+#endif
+
+ MacroAssembler::Jump success, fail;
+
+ emitRestoreScratch(stubJit, needToRestoreScratch, scratchGPR, success, fail, failureCases);
+
+ LinkBuffer patchBuffer(*vm, &stubJit, exec->codeBlock());
- MacroAssembler::repatchJump(
- stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump),
- CodeLocationLabel(codePtr));
+ linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel);
+
+ stubRoutine = FINALIZE_CODE_FOR_DFG_STUB(
+ patchBuffer,
+ ("DFG In (found = %s) stub for %s, return point %p",
+ wasFound ? "yes" : "no", toCString(*exec->codeBlock()).data(),
+ successLabel.executableAddress()));
+ }
+
+ polymorphicStructureList->list[listIndex].set(*vm, codeBlock->ownerExecutable(), stubRoutine, structure, true);
+ stubInfo.u.inList.listSize++;
- return RetryCacheLater;
+ RepatchBuffer repatchBuffer(codeBlock);
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), CodeLocationLabel(stubRoutine->code().code()));
+
+ return listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1);
}
void repatchIn(
ExecState* exec, JSCell* base, const Identifier& ident, bool wasFound,
const PropertySlot& slot, StructureStubInfo& stubInfo)
{
- if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo) == GiveUpOnCache)
- repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
-}
-
-static void linkSlowFor(VM*, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
-{
- MacroAssembler::repatchNearCall(callLinkInfo.callReturnLocation(), CodeLocationLabel(codeRef.code()));
-}
-
-static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo, ThunkGenerator generator)
-{
- linkSlowFor(vm, callLinkInfo, vm->getCTIStub(generator));
+ if (tryRepatchIn(exec, base, ident, wasFound, slot, stubInfo))
+ return;
+ repatchCall(exec->codeBlock(), stubInfo.callReturnLocation, operationIn);
}
-static void linkSlowFor(VM* vm, CallLinkInfo& callLinkInfo)
+static void linkSlowFor(RepatchBuffer& repatchBuffer, VM* vm, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
{
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
- linkSlowFor(vm, callLinkInfo, virtualThunk);
- callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualCallThunkGenerator).code());
+ return;
+ }
+ ASSERT(kind == CodeForConstruct);
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(virtualConstructThunkGenerator).code());
}
-void linkFor(
- ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock,
- JSFunction* callee, MacroAssemblerCodePtr codePtr)
+void linkFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, JSFunction* callee, MacroAssemblerCodePtr codePtr, CodeSpecializationKind kind)
{
- ASSERT(!callLinkInfo.stub());
+ ASSERT(!callLinkInfo.stub);
+
+ // If you're being call-linked from a DFG caller then you obviously didn't get inlined.
+ if (calleeCodeBlock)
+ calleeCodeBlock->m_shouldAlwaysBeInlined = false;
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
-
VM* vm = callerCodeBlock->vm();
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
ASSERT(!callLinkInfo.isLinked());
- callLinkInfo.setCallee(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin(), callerCodeBlock, callee);
- callLinkInfo.setLastSeenCallee(exec->callerFrame()->vm(), callerCodeBlock, callee);
- if (shouldDumpDisassemblyFor(callerCodeBlock))
- dataLog("Linking call in ", *callerCodeBlock, " at ", callLinkInfo.codeOrigin(), " to ", pointerDump(calleeCodeBlock), ", entrypoint at ", codePtr, "\n");
- MacroAssembler::repatchNearCall(callLinkInfo.hotPathOther(), CodeLocationLabel(codePtr));
+ callLinkInfo.callee.set(exec->callerFrame()->vm(), callLinkInfo.hotPathBegin, callerCodeBlock->ownerExecutable(), callee);
+ callLinkInfo.lastSeenCallee.set(exec->callerFrame()->vm(), callerCodeBlock->ownerExecutable(), callee);
+ repatchBuffer.relink(callLinkInfo.hotPathOther, codePtr);
if (calleeCodeBlock)
calleeCodeBlock->linkIncomingCall(exec->callerFrame(), &callLinkInfo);
- if (callLinkInfo.specializationKind() == CodeForCall && callLinkInfo.allowStubs()) {
- linkSlowFor(vm, callLinkInfo, linkPolymorphicCallThunkGenerator);
+ if (kind == CodeForCall) {
+ repatchBuffer.relink(callLinkInfo.callReturnLocation, vm->getCTIStub(linkClosureCallThunkGenerator).code());
return;
}
- linkSlowFor(vm, callLinkInfo);
+ ASSERT(kind == CodeForConstruct);
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForConstruct);
}
-void linkSlowFor(
- ExecState* exec, CallLinkInfo& callLinkInfo)
+void linkSlowFor(ExecState* exec, CallLinkInfo& callLinkInfo, CodeSpecializationKind kind)
{
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
- linkSlowFor(vm, callLinkInfo);
-}
-
-static void revertCall(VM* vm, CallLinkInfo& callLinkInfo, MacroAssemblerCodeRef codeRef)
-{
- MacroAssembler::revertJumpReplacementToBranchPtrWithPatch(
- MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
- static_cast<MacroAssembler::RegisterID>(callLinkInfo.calleeGPR()), 0);
- linkSlowFor(vm, callLinkInfo, codeRef);
- callLinkInfo.clearSeen();
- callLinkInfo.clearCallee();
- callLinkInfo.clearStub();
- callLinkInfo.clearSlowStub();
- if (callLinkInfo.isOnList())
- callLinkInfo.remove();
-}
-
-void unlinkFor(VM& vm, CallLinkInfo& callLinkInfo)
-{
- if (Options::dumpDisassembly())
- dataLog("Unlinking call from ", callLinkInfo.callReturnLocation(), "\n");
-
- revertCall(&vm, callLinkInfo, vm.getCTIStub(linkCallThunkGenerator));
-}
-
-void linkVirtualFor(
- ExecState* exec, CallLinkInfo& callLinkInfo)
-{
- CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
- VM* vm = callerCodeBlock->vm();
-
- if (shouldDumpDisassemblyFor(callerCodeBlock))
- dataLog("Linking virtual call at ", *callerCodeBlock, " ", exec->callerFrame()->codeOrigin(), "\n");
+ RepatchBuffer repatchBuffer(callerCodeBlock);
- MacroAssemblerCodeRef virtualThunk = virtualThunkFor(vm, callLinkInfo);
- revertCall(vm, callLinkInfo, virtualThunk);
- callLinkInfo.setSlowStub(createJITStubRoutine(virtualThunk, *vm, nullptr, true));
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, kind);
}
-namespace {
-struct CallToCodePtr {
- CCallHelpers::Call call;
- MacroAssemblerCodePtr codePtr;
-};
-} // annonymous namespace
-
-void linkPolymorphicCall(
- ExecState* exec, CallLinkInfo& callLinkInfo, CallVariant newVariant)
+void linkClosureCall(ExecState* exec, CallLinkInfo& callLinkInfo, CodeBlock* calleeCodeBlock, Structure* structure, ExecutableBase* executable, MacroAssemblerCodePtr codePtr)
{
- RELEASE_ASSERT(callLinkInfo.allowStubs());
-
- // Currently we can't do anything for non-function callees.
- // https://bugs.webkit.org/show_bug.cgi?id=140685
- if (!newVariant || !newVariant.executable()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
+ ASSERT(!callLinkInfo.stub);
CodeBlock* callerCodeBlock = exec->callerFrame()->codeBlock();
VM* vm = callerCodeBlock->vm();
- CallVariantList list;
- if (PolymorphicCallStubRoutine* stub = callLinkInfo.stub())
- list = stub->variants();
- else if (JSFunction* oldCallee = callLinkInfo.callee())
- list = CallVariantList{ CallVariant(oldCallee) };
-
- list = variantListWithVariant(list, newVariant);
-
- // If there are any closure calls then it makes sense to treat all of them as closure calls.
- // This makes switching on callee cheaper. It also produces profiling that's easier on the DFG;
- // the DFG doesn't really want to deal with a combination of closure and non-closure callees.
- bool isClosureCall = false;
- for (CallVariant variant : list) {
- if (variant.isClosureCall()) {
- list = despecifiedVariantList(list);
- isClosureCall = true;
- break;
- }
- }
-
- if (isClosureCall)
- callLinkInfo.setHasSeenClosure();
-
- Vector<PolymorphicCallCase> callCases;
-
- // Figure out what our cases are.
- for (CallVariant variant : list) {
- CodeBlock* codeBlock;
- if (variant.executable()->isHostFunction())
- codeBlock = nullptr;
- else {
- ExecutableBase* executable = variant.executable();
-#if ENABLE(WEBASSEMBLY)
- if (executable->isWebAssemblyExecutable())
- codeBlock = jsCast<WebAssemblyExecutable*>(executable)->codeBlockForCall();
- else
-#endif
- codeBlock = jsCast<FunctionExecutable*>(executable)->codeBlockForCall();
- // If we cannot handle a callee, either because we don't have a CodeBlock or because arity mismatch,
- // assume that it's better for this whole thing to be a virtual call.
- if (!codeBlock || exec->argumentCountIncludingThis() < static_cast<size_t>(codeBlock->numParameters()) || callLinkInfo.isVarargs()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
- }
-
- callCases.append(PolymorphicCallCase(variant, codeBlock));
- }
-
- // If we are over the limit, just use a normal virtual call.
- unsigned maxPolymorphicCallVariantListSize;
- if (callerCodeBlock->jitType() == JITCode::topTierJIT())
- maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSizeForTopTier();
- else
- maxPolymorphicCallVariantListSize = Options::maxPolymorphicCallVariantListSize();
- if (list.size() > maxPolymorphicCallVariantListSize) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
-
- GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR());
+ GPRReg calleeGPR = static_cast<GPRReg>(callLinkInfo.calleeGPR);
CCallHelpers stubJit(vm, callerCodeBlock);
CCallHelpers::JumpList slowPath;
- std::unique_ptr<CallFrameShuffler> frameShuffler;
- if (callLinkInfo.frameShuffleData()) {
- ASSERT(callLinkInfo.isTailCall());
- frameShuffler = std::make_unique<CallFrameShuffler>(stubJit, *callLinkInfo.frameShuffleData());
-#if USE(JSVALUE32_64)
- // We would have already checked that the callee is a cell, and we can
- // use the additional register this buys us.
- frameShuffler->assumeCalleeIsCell();
-#endif
- frameShuffler->lockGPR(calleeGPR);
- }
- GPRReg comparisonValueGPR;
-
- if (isClosureCall) {
- GPRReg scratchGPR;
- if (frameShuffler)
- scratchGPR = frameShuffler->acquireGPR();
- else
- scratchGPR = AssemblyHelpers::selectScratchGPR(calleeGPR);
- // Verify that we have a function and stash the executable in scratchGPR.
-
#if USE(JSVALUE64)
- // We can't rely on tagMaskRegister being set, so we do this the hard
- // way.
- stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratchGPR);
- slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratchGPR));
+ // We can safely clobber everything except the calleeGPR. We can't rely on tagMaskRegister
+ // being set. So we do this the hard way.
+ GPRReg scratch = AssemblyHelpers::selectScratchGPR(calleeGPR);
+ stubJit.move(MacroAssembler::TrustedImm64(TagMask), scratch);
+ slowPath.append(stubJit.branchTest64(CCallHelpers::NonZero, calleeGPR, scratch));
#else
- // We would have already checked that the callee is a cell.
+ // We would have already checked that the callee is a cell.
#endif
- slowPath.append(
- stubJit.branch8(
- CCallHelpers::NotEqual,
- CCallHelpers::Address(calleeGPR, JSCell::typeInfoTypeOffset()),
- CCallHelpers::TrustedImm32(JSFunctionType)));
+ slowPath.append(
+ stubJit.branchPtr(
+ CCallHelpers::NotEqual,
+ CCallHelpers::Address(calleeGPR, JSCell::structureOffset()),
+ CCallHelpers::TrustedImmPtr(structure)));
- stubJit.loadPtr(
+ slowPath.append(
+ stubJit.branchPtr(
+ CCallHelpers::NotEqual,
CCallHelpers::Address(calleeGPR, JSFunction::offsetOfExecutable()),
- scratchGPR);
-
- comparisonValueGPR = scratchGPR;
- } else
- comparisonValueGPR = calleeGPR;
-
- Vector<int64_t> caseValues(callCases.size());
- Vector<CallToCodePtr> calls(callCases.size());
- std::unique_ptr<uint32_t[]> fastCounts;
-
- if (callerCodeBlock->jitType() != JITCode::topTierJIT())
- fastCounts = std::make_unique<uint32_t[]>(callCases.size());
-
- for (size_t i = 0; i < callCases.size(); ++i) {
- if (fastCounts)
- fastCounts[i] = 0;
-
- CallVariant variant = callCases[i].variant();
- int64_t newCaseValue;
- if (isClosureCall)
- newCaseValue = bitwise_cast<intptr_t>(variant.executable());
- else
- newCaseValue = bitwise_cast<intptr_t>(variant.function());
-
- if (!ASSERT_DISABLED) {
- for (size_t j = 0; j < i; ++j) {
- if (caseValues[j] != newCaseValue)
- continue;
-
- dataLog("ERROR: Attempt to add duplicate case value.\n");
- dataLog("Existing case values: ");
- CommaPrinter comma;
- for (size_t k = 0; k < i; ++k)
- dataLog(comma, caseValues[k]);
- dataLog("\n");
- dataLog("Attempting to add: ", newCaseValue, "\n");
- dataLog("Variant list: ", listDump(callCases), "\n");
- RELEASE_ASSERT_NOT_REACHED();
- }
- }
-
- caseValues[i] = newCaseValue;
- }
-
- GPRReg fastCountsBaseGPR;
- if (frameShuffler)
- fastCountsBaseGPR = frameShuffler->acquireGPR();
- else {
- fastCountsBaseGPR =
- AssemblyHelpers::selectScratchGPR(calleeGPR, comparisonValueGPR, GPRInfo::regT3);
- }
- stubJit.move(CCallHelpers::TrustedImmPtr(fastCounts.get()), fastCountsBaseGPR);
- if (!frameShuffler && callLinkInfo.isTailCall())
- stubJit.emitRestoreCalleeSaves();
- BinarySwitch binarySwitch(comparisonValueGPR, caseValues, BinarySwitch::IntPtr);
- CCallHelpers::JumpList done;
- while (binarySwitch.advance(stubJit)) {
- size_t caseIndex = binarySwitch.caseIndex();
-
- CallVariant variant = callCases[caseIndex].variant();
-
- ASSERT(variant.executable()->hasJITCodeForCall());
- MacroAssemblerCodePtr codePtr =
- variant.executable()->generatedJITCodeForCall()->addressForCall(ArityCheckNotRequired);
-
- if (fastCounts) {
- stubJit.add32(
- CCallHelpers::TrustedImm32(1),
- CCallHelpers::Address(fastCountsBaseGPR, caseIndex * sizeof(uint32_t)));
- }
- if (frameShuffler) {
- CallFrameShuffler(stubJit, frameShuffler->snapshot()).prepareForTailCall();
- calls[caseIndex].call = stubJit.nearTailCall();
- } else if (callLinkInfo.isTailCall()) {
- stubJit.prepareForTailCallSlow();
- calls[caseIndex].call = stubJit.nearTailCall();
- } else
- calls[caseIndex].call = stubJit.nearCall();
- calls[caseIndex].codePtr = codePtr;
- done.append(stubJit.jump());
- }
+ CCallHelpers::TrustedImmPtr(executable)));
- slowPath.link(&stubJit);
- binarySwitch.fallThrough().link(&stubJit);
+ stubJit.loadPtr(
+ CCallHelpers::Address(calleeGPR, JSFunction::offsetOfScopeChain()),
+ GPRInfo::returnValueGPR);
- if (frameShuffler) {
- frameShuffler->releaseGPR(calleeGPR);
- frameShuffler->releaseGPR(comparisonValueGPR);
- frameShuffler->releaseGPR(fastCountsBaseGPR);
-#if USE(JSVALUE32_64)
- frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT1, GPRInfo::regT0));
+#if USE(JSVALUE64)
+ stubJit.store64(
+ GPRInfo::returnValueGPR,
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain)));
#else
- frameShuffler->setCalleeJSValueRegs(JSValueRegs(GPRInfo::regT0));
+ stubJit.storePtr(
+ GPRInfo::returnValueGPR,
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::Address(GPRInfo::callFrameRegister, static_cast<ptrdiff_t>(sizeof(Register) * JSStack::ScopeChain) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
#endif
- frameShuffler->prepareForSlowPath();
- } else {
- stubJit.move(calleeGPR, GPRInfo::regT0);
+
+ AssemblyHelpers::Call call = stubJit.nearCall();
+ AssemblyHelpers::Jump done = stubJit.jump();
+
+ slowPath.link(&stubJit);
+ stubJit.move(calleeGPR, GPRInfo::regT0);
#if USE(JSVALUE32_64)
- stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
+ stubJit.move(CCallHelpers::TrustedImm32(JSValue::CellTag), GPRInfo::regT1);
#endif
- }
- stubJit.move(CCallHelpers::TrustedImmPtr(&callLinkInfo), GPRInfo::regT2);
- stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation().executableAddress()), GPRInfo::regT4);
-
- stubJit.restoreReturnAddressBeforeReturn(GPRInfo::regT4);
+ stubJit.move(CCallHelpers::TrustedImmPtr(callLinkInfo.callReturnLocation.executableAddress()), GPRInfo::nonArgGPR2);
+ stubJit.restoreReturnAddressBeforeReturn(GPRInfo::nonArgGPR2);
AssemblyHelpers::Jump slow = stubJit.jump();
-
- LinkBuffer patchBuffer(*vm, stubJit, callerCodeBlock, JITCompilationCanFail);
- if (patchBuffer.didFailToAllocate()) {
- linkVirtualFor(exec, callLinkInfo);
- return;
- }
- RELEASE_ASSERT(callCases.size() == calls.size());
- for (CallToCodePtr callToCodePtr : calls) {
- // Tail call special-casing ensures proper linking on ARM Thumb2, where a tail call jumps to an address
- // with a non-decorated bottom bit but a normal call calls an address with a decorated bottom bit.
- bool isTailCall = callToCodePtr.call.isFlagSet(CCallHelpers::Call::Tail);
- patchBuffer.link(
- callToCodePtr.call, FunctionPtr(isTailCall ? callToCodePtr.codePtr.dataLocation() : callToCodePtr.codePtr.executableAddress()));
- }
- if (JITCode::isOptimizingJIT(callerCodeBlock->jitType()))
- patchBuffer.link(done, callLinkInfo.callReturnLocation().labelAtOffset(0));
- else
- patchBuffer.link(done, callLinkInfo.hotPathOther().labelAtOffset(0));
- patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(linkPolymorphicCallThunkGenerator).code()));
-
- RefPtr<PolymorphicCallStubRoutine> stubRoutine = adoptRef(new PolymorphicCallStubRoutine(
- FINALIZE_CODE_FOR(
- callerCodeBlock, patchBuffer,
- ("Polymorphic call stub for %s, return point %p, targets %s",
- toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation().labelAtOffset(0).executableAddress(),
- toCString(listDump(callCases)).data())),
- *vm, callerCodeBlock, exec->callerFrame(), callLinkInfo, callCases,
- WTFMove(fastCounts)));
-
- MacroAssembler::replaceWithJump(
- MacroAssembler::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin()),
+ LinkBuffer patchBuffer(*vm, &stubJit, callerCodeBlock);
+
+ patchBuffer.link(call, FunctionPtr(codePtr.executableAddress()));
+ patchBuffer.link(done, callLinkInfo.callReturnLocation.labelAtOffset(0));
+ patchBuffer.link(slow, CodeLocationLabel(vm->getCTIStub(virtualCallThunkGenerator).code()));
+
+ RefPtr<ClosureCallStubRoutine> stubRoutine = adoptRef(new ClosureCallStubRoutine(
+ FINALIZE_DFG_CODE(
+ patchBuffer,
+ ("DFG closure call stub for %s, return point %p, target %p (%s)",
+ toCString(*callerCodeBlock).data(), callLinkInfo.callReturnLocation.labelAtOffset(0).executableAddress(),
+ codePtr.executableAddress(), toCString(pointerDump(calleeCodeBlock)).data())),
+ *vm, callerCodeBlock->ownerExecutable(), structure, executable, callLinkInfo.codeOrigin));
+
+ RepatchBuffer repatchBuffer(callerCodeBlock);
+
+ repatchBuffer.replaceWithJump(
+ RepatchBuffer::startOfBranchPtrWithPatchOnRegister(callLinkInfo.hotPathBegin),
CodeLocationLabel(stubRoutine->code().code()));
- // The original slow path is unreachable on 64-bits, but still
- // reachable on 32-bits since a non-cell callee will always
- // trigger the slow path
- linkSlowFor(vm, callLinkInfo);
+ linkSlowFor(repatchBuffer, vm, callLinkInfo, CodeForCall);
- // If there had been a previous stub routine, that one will die as soon as the GC runs and sees
- // that it's no longer on stack.
- callLinkInfo.setStub(stubRoutine.release());
+ callLinkInfo.stub = stubRoutine.release();
- // The call link info no longer has a call cache apart from the jump to the polymorphic call
- // stub.
- if (callLinkInfo.isOnList())
- callLinkInfo.remove();
+ ASSERT(!calleeCodeBlock || calleeCodeBlock->isIncomingCallAlreadyLinked(&callLinkInfo));
}
-void resetGetByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
+void resetGetByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
- repatchCall(codeBlock, stubInfo.callReturnLocation, operationGetByIdOptimize);
- resetGetByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, operationGetByIdOptimize);
+ CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
+ MacroAssembler::Address(
+ static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
+ JSCell::structureOffset()),
+ reinterpret_cast<void*>(unusedPointer));
+ }
+ repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
+#if USE(JSVALUE64)
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
+#else
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+#endif
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
-void resetPutByID(CodeBlock* codeBlock, StructureStubInfo& stubInfo)
+void resetPutByID(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
- V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(codeBlock, stubInfo.callReturnLocation).executableAddress());
+ V_JITOperation_ESsiJJI unoptimizedFunction = bitwise_cast<V_JITOperation_ESsiJJI>(readCallTarget(repatchBuffer, stubInfo.callReturnLocation).executableAddress());
V_JITOperation_ESsiJJI optimizedFunction;
- if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictOptimize)
+ if (unoptimizedFunction == operationPutByIdStrict || unoptimizedFunction == operationPutByIdStrictBuildList)
optimizedFunction = operationPutByIdStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictOptimize)
+ else if (unoptimizedFunction == operationPutByIdNonStrict || unoptimizedFunction == operationPutByIdNonStrictBuildList)
optimizedFunction = operationPutByIdNonStrictOptimize;
- else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictOptimize)
+ else if (unoptimizedFunction == operationPutByIdDirectStrict || unoptimizedFunction == operationPutByIdDirectStrictBuildList)
optimizedFunction = operationPutByIdDirectStrictOptimize;
else {
- ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictOptimize);
+ ASSERT(unoptimizedFunction == operationPutByIdDirectNonStrict || unoptimizedFunction == operationPutByIdDirectNonStrictBuildList);
optimizedFunction = operationPutByIdDirectNonStrictOptimize;
}
- repatchCall(codeBlock, stubInfo.callReturnLocation, optimizedFunction);
- resetPutByIDCheckAndLoad(stubInfo);
- MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ repatchCall(repatchBuffer, stubInfo.callReturnLocation, optimizedFunction);
+ CodeLocationDataLabelPtr structureLabel = stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.deltaCheckImmToCall);
+ if (MacroAssembler::canJumpReplacePatchableBranchPtrWithPatch()) {
+ repatchBuffer.revertJumpReplacementToPatchableBranchPtrWithPatch(
+ RepatchBuffer::startOfPatchableBranchPtrWithPatchOnAddress(structureLabel),
+ MacroAssembler::Address(
+ static_cast<MacroAssembler::RegisterID>(stubInfo.patch.baseGPR),
+ JSCell::structureOffset()),
+ reinterpret_cast<void*>(unusedPointer));
+ }
+ repatchBuffer.repatch(structureLabel, reinterpret_cast<void*>(unusedPointer));
+#if USE(JSVALUE64)
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToLoadOrStore), 0);
+#else
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToTagLoadOrStore), 0);
+ repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.deltaCallToPayloadLoadOrStore), 0);
+#endif
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
-void resetIn(CodeBlock*, StructureStubInfo& stubInfo)
+void resetIn(RepatchBuffer& repatchBuffer, StructureStubInfo& stubInfo)
{
- MacroAssembler::repatchJump(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
+ repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.deltaCallToJump), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.deltaCallToSlowCase));
}
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/Repatch.h b/Source/JavaScriptCore/jit/Repatch.h
index 443e944a3..faa787613 100644
--- a/Source/JavaScriptCore/jit/Repatch.h
+++ b/Source/JavaScriptCore/jit/Repatch.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,29 +26,42 @@
#ifndef Repatch_h
#define Repatch_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "CCallHelpers.h"
-#include "CallVariant.h"
#include "JITOperations.h"
-#include "PutKind.h"
namespace JSC {
void repatchGetByID(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
void buildGetByIDList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
void buildGetByIDProtoList(ExecState*, JSValue, const Identifier&, const PropertySlot&, StructureStubInfo&);
-void repatchPutByID(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
-void buildPutByIdList(ExecState*, JSValue, Structure*, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void repatchPutByID(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
+void buildPutByIdList(ExecState*, JSValue, const Identifier&, const PutPropertySlot&, StructureStubInfo&, PutKind);
void repatchIn(ExecState*, JSCell*, const Identifier&, bool wasFound, const PropertySlot&, StructureStubInfo&);
-void linkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr);
-void linkSlowFor(ExecState*, CallLinkInfo&);
-void unlinkFor(VM&, CallLinkInfo&);
-void linkVirtualFor(ExecState*, CallLinkInfo&);
-void linkPolymorphicCall(ExecState*, CallLinkInfo&, CallVariant);
-void resetGetByID(CodeBlock*, StructureStubInfo&);
-void resetPutByID(CodeBlock*, StructureStubInfo&);
-void resetIn(CodeBlock*, StructureStubInfo&);
+void linkFor(ExecState*, CallLinkInfo&, CodeBlock*, JSFunction* callee, MacroAssemblerCodePtr, CodeSpecializationKind);
+void linkSlowFor(ExecState*, CallLinkInfo&, CodeSpecializationKind);
+void linkClosureCall(ExecState*, CallLinkInfo&, CodeBlock*, Structure*, ExecutableBase*, MacroAssemblerCodePtr);
+void resetGetByID(RepatchBuffer&, StructureStubInfo&);
+void resetPutByID(RepatchBuffer&, StructureStubInfo&);
+void resetIn(RepatchBuffer&, StructureStubInfo&);
+
+} // namespace JSC
+
+#else // ENABLE(JIT)
+
+#include <wtf/Assertions.h>
+
+namespace JSC {
+
+class RepatchBuffer;
+struct StructureStubInfo;
+
+inline NO_RETURN_DUE_TO_CRASH void resetGetByID(RepatchBuffer&, StructureStubInfo&) { RELEASE_ASSERT_NOT_REACHED(); }
+inline NO_RETURN_DUE_TO_CRASH void resetPutByID(RepatchBuffer&, StructureStubInfo&) { RELEASE_ASSERT_NOT_REACHED(); }
+inline NO_RETURN void resetIn(RepatchBuffer&, StructureStubInfo&) { RELEASE_ASSERT_NOT_REACHED(); }
} // namespace JSC
diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp
deleted file mode 100644
index 93d670d6c..000000000
--- a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.cpp
+++ /dev/null
@@ -1,302 +0,0 @@
-/*
- * Copyright (C) 2014, 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "ScratchRegisterAllocator.h"
-
-#if ENABLE(JIT)
-
-#include "JSCInlines.h"
-#include "MaxFrameExtentForSlowPathCall.h"
-#include "VM.h"
-
-namespace JSC {
-
-ScratchRegisterAllocator::ScratchRegisterAllocator(const RegisterSet& usedRegisters)
- : m_usedRegisters(usedRegisters)
- , m_numberOfReusedRegisters(0)
-{
-}
-
-ScratchRegisterAllocator::~ScratchRegisterAllocator() { }
-
-void ScratchRegisterAllocator::lock(GPRReg reg)
-{
- if (reg == InvalidGPRReg)
- return;
- unsigned index = GPRInfo::toIndex(reg);
- if (index == GPRInfo::InvalidIndex)
- return;
- m_lockedRegisters.setGPRByIndex(index);
-}
-
-void ScratchRegisterAllocator::lock(FPRReg reg)
-{
- if (reg == InvalidFPRReg)
- return;
- unsigned index = FPRInfo::toIndex(reg);
- if (index == FPRInfo::InvalidIndex)
- return;
- m_lockedRegisters.setFPRByIndex(index);
-}
-
-void ScratchRegisterAllocator::lock(JSValueRegs regs)
-{
- lock(regs.tagGPR());
- lock(regs.payloadGPR());
-}
-
-template<typename BankInfo>
-typename BankInfo::RegisterType ScratchRegisterAllocator::allocateScratch()
-{
- // First try to allocate a register that is totally free.
- for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
- typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
- if (!m_lockedRegisters.get(reg)
- && !m_usedRegisters.get(reg)
- && !m_scratchRegisters.get(reg)) {
- m_scratchRegisters.set(reg);
- return reg;
- }
- }
-
- // Since that failed, try to allocate a register that is not yet
- // locked or used for scratch.
- for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
- typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
- if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) {
- m_scratchRegisters.set(reg);
- m_numberOfReusedRegisters++;
- return reg;
- }
- }
-
- // We failed.
- CRASH();
- // Make some silly compilers happy.
- return static_cast<typename BankInfo::RegisterType>(-1);
-}
-
-GPRReg ScratchRegisterAllocator::allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
-FPRReg ScratchRegisterAllocator::allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
-
-ScratchRegisterAllocator::PreservedState ScratchRegisterAllocator::preserveReusedRegistersByPushing(MacroAssembler& jit, ExtraStackSpace extraStackSpace)
-{
- if (!didReuseRegisters())
- return PreservedState(0, extraStackSpace);
-
- RegisterSet registersToSpill;
- for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
- FPRReg reg = FPRInfo::toRegister(i);
- if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
- registersToSpill.set(reg);
- }
- for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
- GPRReg reg = GPRInfo::toRegister(i);
- if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
- registersToSpill.set(reg);
- }
-
- unsigned extraStackBytesAtTopOfStack = extraStackSpace == ExtraStackSpace::SpaceForCCall ? maxFrameExtentForSlowPathCall : 0;
- unsigned stackAdjustmentSize = ScratchRegisterAllocator::preserveRegistersToStackForCall(jit, registersToSpill, extraStackBytesAtTopOfStack);
-
- return PreservedState(stackAdjustmentSize, extraStackSpace);
-}
-
-void ScratchRegisterAllocator::restoreReusedRegistersByPopping(MacroAssembler& jit, const ScratchRegisterAllocator::PreservedState& preservedState)
-{
- RELEASE_ASSERT(preservedState);
- if (!didReuseRegisters())
- return;
-
- RegisterSet registersToFill;
- for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- GPRReg reg = GPRInfo::toRegister(i);
- if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.get(reg))
- registersToFill.set(reg);
- }
- for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
- FPRReg reg = FPRInfo::toRegister(i);
- if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.get(reg))
- registersToFill.set(reg);
- }
-
- unsigned extraStackBytesAtTopOfStack =
- preservedState.extraStackSpaceRequirement == ExtraStackSpace::SpaceForCCall ? maxFrameExtentForSlowPathCall : 0;
- RegisterSet dontRestore; // Empty set. We want to restore everything.
- ScratchRegisterAllocator::restoreRegistersFromStackForCall(jit, registersToFill, dontRestore,
- preservedState.numberOfBytesPreserved, extraStackBytesAtTopOfStack);
-}
-
-RegisterSet ScratchRegisterAllocator::usedRegistersForCall() const
-{
- RegisterSet result = m_usedRegisters;
- result.exclude(RegisterSet::registersToNotSaveForJSCall());
- return result;
-}
-
-unsigned ScratchRegisterAllocator::desiredScratchBufferSizeForCall() const
-{
- return usedRegistersForCall().numberOfSetRegisters() * sizeof(JSValue);
-}
-
-void ScratchRegisterAllocator::preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
-{
- RegisterSet usedRegisters = usedRegistersForCall();
- if (!usedRegisters.numberOfSetRegisters())
- return;
-
- unsigned count = 0;
- for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
- if (usedRegisters.get(reg)) {
- jit.storePtr(reg, static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count);
- count++;
- }
- if (GPRInfo::toIndex(reg) != GPRInfo::InvalidIndex
- && scratchGPR == InvalidGPRReg
- && !m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg))
- scratchGPR = reg;
- }
- RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
- for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
- if (usedRegisters.get(reg)) {
- jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + count), scratchGPR);
- count++;
- jit.storeDouble(reg, scratchGPR);
- }
- }
- RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSizeForCall());
-
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
- jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
-}
-
-void ScratchRegisterAllocator::restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR)
-{
- RegisterSet usedRegisters = usedRegistersForCall();
- if (!usedRegisters.numberOfSetRegisters())
- return;
-
- if (scratchGPR == InvalidGPRReg) {
- // Find a scratch register.
- for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
- if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
- continue;
- scratchGPR = GPRInfo::toRegister(i);
- break;
- }
- }
- RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
-
- jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
- jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
-
- // Restore double registers first.
- unsigned count = usedRegisters.numberOfSetGPRs();
- for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
- if (usedRegisters.get(reg)) {
- jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
- jit.loadDouble(scratchGPR, reg);
- }
- }
-
- count = 0;
- for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
- if (usedRegisters.get(reg))
- jit.loadPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), reg);
- }
-}
-
-unsigned ScratchRegisterAllocator::preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraBytesAtTopOfStack)
-{
- RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
- if (!usedRegisters.numberOfSetRegisters())
- return 0;
-
- unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
- stackOffset += extraBytesAtTopOfStack;
- stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
- jit.subPtr(
- MacroAssembler::TrustedImm32(stackOffset),
- MacroAssembler::stackPointerRegister);
-
- unsigned count = 0;
- for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
- if (usedRegisters.get(reg)) {
- jit.storePtr(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
- count++;
- }
- }
- for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
- if (usedRegisters.get(reg)) {
- jit.storeDouble(reg, MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (count * sizeof(EncodedJSValue))));
- count++;
- }
- }
-
- RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
-
- return stackOffset;
-}
-
-void ScratchRegisterAllocator::restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraBytesAtTopOfStack)
-{
- RELEASE_ASSERT(extraBytesAtTopOfStack % sizeof(void*) == 0);
- if (!usedRegisters.numberOfSetRegisters()) {
- RELEASE_ASSERT(numberOfStackBytesUsedForRegisterPreservation == 0);
- return;
- }
-
- unsigned count = 0;
- for (GPRReg reg = MacroAssembler::firstRegister(); reg <= MacroAssembler::lastRegister(); reg = MacroAssembler::nextRegister(reg)) {
- if (usedRegisters.get(reg)) {
- if (!ignore.get(reg))
- jit.loadPtr(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
- count++;
- }
- }
- for (FPRReg reg = MacroAssembler::firstFPRegister(); reg <= MacroAssembler::lastFPRegister(); reg = MacroAssembler::nextFPRegister(reg)) {
- if (usedRegisters.get(reg)) {
- if (!ignore.get(reg))
- jit.loadDouble(MacroAssembler::Address(MacroAssembler::stackPointerRegister, extraBytesAtTopOfStack + (sizeof(EncodedJSValue) * count)), reg);
- count++;
- }
- }
-
- unsigned stackOffset = (usedRegisters.numberOfSetRegisters()) * sizeof(EncodedJSValue);
- stackOffset += extraBytesAtTopOfStack;
- stackOffset = WTF::roundUpToMultipleOf(stackAlignmentBytes(), stackOffset);
-
- RELEASE_ASSERT(count == usedRegisters.numberOfSetRegisters());
- RELEASE_ASSERT(stackOffset == numberOfStackBytesUsedForRegisterPreservation);
-
- jit.addPtr(
- MacroAssembler::TrustedImm32(stackOffset),
- MacroAssembler::stackPointerRegister);
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
index 014997eca..1967226c5 100644
--- a/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
+++ b/Source/JavaScriptCore/jit/ScratchRegisterAllocator.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2012, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -26,33 +26,73 @@
#ifndef ScratchRegisterAllocator_h
#define ScratchRegisterAllocator_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "MacroAssembler.h"
-#include "RegisterSet.h"
#include "TempRegisterSet.h"
namespace JSC {
-struct ScratchBuffer;
-
// This class provides a low-level register allocator for use in stubs.
class ScratchRegisterAllocator {
public:
- ScratchRegisterAllocator() { }
- ScratchRegisterAllocator(const RegisterSet& usedRegisters);
- ~ScratchRegisterAllocator();
+ ScratchRegisterAllocator(const TempRegisterSet& usedRegisters)
+ : m_usedRegisters(usedRegisters)
+ , m_numberOfReusedRegisters(0)
+ {
+ }
- void lock(GPRReg);
- void lock(FPRReg);
- void lock(JSValueRegs);
+ void lock(GPRReg reg)
+ {
+ unsigned index = GPRInfo::toIndex(reg);
+ if (index == GPRInfo::InvalidIndex)
+ return;
+ m_lockedRegisters.setGPRByIndex(index);
+ }
+ void lock(FPRReg reg)
+ {
+ unsigned index = FPRInfo::toIndex(reg);
+ if (index == FPRInfo::InvalidIndex)
+ return;
+ m_lockedRegisters.setFPRByIndex(index);
+ }
template<typename BankInfo>
- typename BankInfo::RegisterType allocateScratch();
+ typename BankInfo::RegisterType allocateScratch()
+ {
+ // First try to allocate a register that is totally free.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg)
+ && !m_usedRegisters.get(reg)
+ && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ return reg;
+ }
+ }
+
+ // Since that failed, try to allocate a register that is not yet
+ // locked or used for scratch.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ m_numberOfReusedRegisters++;
+ return reg;
+ }
+ }
+
+ // We failed.
+ CRASH();
+ // Make some silly compilers happy.
+ return static_cast<typename BankInfo::RegisterType>(-1);
+ }
- GPRReg allocateScratchGPR();
- FPRReg allocateScratchFPR();
+ GPRReg allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
+ FPRReg allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
bool didReuseRegisters() const
{
@@ -63,43 +103,105 @@ public:
{
return m_numberOfReusedRegisters;
}
-
- RegisterSet usedRegisters() const { return m_usedRegisters; }
- enum class ExtraStackSpace { SpaceForCCall, NoExtraSpace };
-
- struct PreservedState {
- PreservedState()
- : numberOfBytesPreserved(std::numeric_limits<unsigned>::max())
- , extraStackSpaceRequirement(ExtraStackSpace::SpaceForCCall)
- { }
-
- PreservedState(unsigned numberOfBytes, ExtraStackSpace extraStackSpace)
- : numberOfBytesPreserved(numberOfBytes)
- , extraStackSpaceRequirement(extraStackSpace)
- { }
-
- explicit operator bool() const { return numberOfBytesPreserved != std::numeric_limits<unsigned>::max(); }
-
- unsigned numberOfBytesPreserved;
- ExtraStackSpace extraStackSpaceRequirement;
- };
-
- PreservedState preserveReusedRegistersByPushing(MacroAssembler& jit, ExtraStackSpace);
- void restoreReusedRegistersByPopping(MacroAssembler& jit, const PreservedState&);
+ void preserveReusedRegistersByPushing(MacroAssembler& jit)
+ {
+ if (!didReuseRegisters())
+ return;
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i))
+ jit.pushToSave(FPRInfo::toRegister(i));
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
+ jit.pushToSave(GPRInfo::toRegister(i));
+ }
+ }
- RegisterSet usedRegistersForCall() const;
+ void restoreReusedRegistersByPopping(MacroAssembler& jit)
+ {
+ if (!didReuseRegisters())
+ return;
+
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
+ jit.popToRestore(GPRInfo::toRegister(i));
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i))
+ jit.popToRestore(FPRInfo::toRegister(i));
+ }
+ }
- unsigned desiredScratchBufferSizeForCall() const;
+ unsigned desiredScratchBufferSize() const { return m_usedRegisters.numberOfSetRegisters() * sizeof(JSValue); }
+
+ void preserveUsedRegistersToScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
+ {
+ unsigned count = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getGPRByIndex(i)) {
+#if USE(JSVALUE64)
+ jit.store64(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
+#else
+ jit.store32(GPRInfo::toRegister(i), static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++));
+#endif
+ }
+ if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i))
+ scratchGPR = GPRInfo::toRegister(i);
+ }
+ RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getFPRByIndex(i)) {
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
+ jit.storeDouble(FPRInfo::toRegister(i), scratchGPR);
+ }
+ }
+ RELEASE_ASSERT(count * sizeof(JSValue) == desiredScratchBufferSize());
+
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
+ }
+
+ void restoreUsedRegistersFromScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
+ {
+ if (scratchGPR == InvalidGPRReg) {
+ // Find a scratch register.
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
+ continue;
+ scratchGPR = GPRInfo::toRegister(i);
+ break;
+ }
+ }
+ RELEASE_ASSERT(scratchGPR != InvalidGPRReg);
+
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->activeLengthPtr()), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
+
+ // Restore double registers first.
+ unsigned count = m_usedRegisters.numberOfSetGPRs();
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getFPRByIndex(i)) {
+ jit.move(MacroAssembler::TrustedImmPtr(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++)), scratchGPR);
+ jit.loadDouble(scratchGPR, FPRInfo::toRegister(i));
+ }
+ }
+
+ count = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getGPRByIndex(i)) {
+#if USE(JSVALUE64)
+ jit.load64(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
+#else
+ jit.load32(static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) + (count++), GPRInfo::toRegister(i));
+#endif
+ }
+ }
+ }
- void preserveUsedRegistersToScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
- void restoreUsedRegistersFromScratchBufferForCall(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg);
-
- static unsigned preserveRegistersToStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, unsigned extraPaddingInBytes);
- static void restoreRegistersFromStackForCall(MacroAssembler& jit, const RegisterSet& usedRegisters, const RegisterSet& ignore, unsigned numberOfStackBytesUsedForRegisterPreservation, unsigned extraPaddingInBytes);
-
private:
- RegisterSet m_usedRegisters;
+ TempRegisterSet m_usedRegisters;
TempRegisterSet m_lockedRegisters;
TempRegisterSet m_scratchRegisters;
unsigned m_numberOfReusedRegisters;
@@ -110,3 +212,4 @@ private:
#endif // ENABLE(JIT)
#endif // ScratchRegisterAllocator_h
+
diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp b/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
deleted file mode 100644
index f43551e00..000000000
--- a/Source/JavaScriptCore/jit/SetupVarargsFrame.cpp
+++ /dev/null
@@ -1,143 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#include "config.h"
-#include "SetupVarargsFrame.h"
-
-#if ENABLE(JIT)
-
-#include "Interpreter.h"
-#include "JSCInlines.h"
-#include "StackAlignment.h"
-
-namespace JSC {
-
-void emitSetVarargsFrame(CCallHelpers& jit, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR)
-{
- jit.move(numUsedSlotsGPR, resultGPR);
- // We really want to make sure the size of the new call frame is a multiple of
- // stackAlignmentRegisters(), however it is easier to accomplish this by
- // rounding numUsedSlotsGPR to the next multiple of stackAlignmentRegisters().
- // Together with the rounding below, we will assure that the new call frame is
- // located on a stackAlignmentRegisters() boundary and a multiple of
- // stackAlignmentRegisters() in size.
- jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
- jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
-
- jit.addPtr(lengthGPR, resultGPR);
- jit.addPtr(CCallHelpers::TrustedImm32(JSStack::CallFrameHeaderSize + (lengthIncludesThis? 0 : 1)), resultGPR);
-
- // resultGPR now has the required frame size in Register units
- // Round resultGPR to next multiple of stackAlignmentRegisters()
- jit.addPtr(CCallHelpers::TrustedImm32(stackAlignmentRegisters() - 1), resultGPR);
- jit.andPtr(CCallHelpers::TrustedImm32(~(stackAlignmentRegisters() - 1)), resultGPR);
-
- // Now resultGPR has the right stack frame offset in Register units.
- jit.negPtr(resultGPR);
- jit.lshiftPtr(CCallHelpers::Imm32(3), resultGPR);
- jit.addPtr(GPRInfo::callFrameRegister, resultGPR);
-}
-
-void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
-{
- CCallHelpers::JumpList end;
-
- if (argCountRecovery.isConstant()) {
- // FIXME: We could constant-fold a lot of the computation below in this case.
- // https://bugs.webkit.org/show_bug.cgi?id=141486
- jit.move(CCallHelpers::TrustedImm32(argCountRecovery.constant().asInt32()), scratchGPR1);
- } else
- jit.load32(CCallHelpers::payloadFor(argCountRecovery.virtualRegister()), scratchGPR1);
- if (firstVarArgOffset) {
- CCallHelpers::Jump sufficientArguments = jit.branch32(CCallHelpers::GreaterThan, scratchGPR1, CCallHelpers::TrustedImm32(firstVarArgOffset + 1));
- jit.move(CCallHelpers::TrustedImm32(1), scratchGPR1);
- CCallHelpers::Jump endVarArgs = jit.jump();
- sufficientArguments.link(&jit);
- jit.sub32(CCallHelpers::TrustedImm32(firstVarArgOffset), scratchGPR1);
- endVarArgs.link(&jit);
- }
- slowCase.append(jit.branch32(CCallHelpers::Above, scratchGPR1, CCallHelpers::TrustedImm32(maxArguments + 1)));
-
- emitSetVarargsFrame(jit, scratchGPR1, true, numUsedSlotsGPR, scratchGPR2);
-
- slowCase.append(jit.branchPtr(CCallHelpers::Above, CCallHelpers::AbsoluteAddress(jit.vm()->addressOfStackLimit()), scratchGPR2));
-
- // Initialize ArgumentCount.
- jit.store32(scratchGPR1, CCallHelpers::Address(scratchGPR2, JSStack::ArgumentCount * static_cast<int>(sizeof(Register)) + PayloadOffset));
-
- // Copy arguments.
- jit.signExtend32ToPtr(scratchGPR1, scratchGPR1);
- CCallHelpers::Jump done = jit.branchSubPtr(CCallHelpers::Zero, CCallHelpers::TrustedImm32(1), scratchGPR1);
- // scratchGPR1: argumentCount
-
- CCallHelpers::Label copyLoop = jit.label();
- int argOffset = (firstArgumentReg.offset() - 1 + firstVarArgOffset) * static_cast<int>(sizeof(Register));
-#if USE(JSVALUE64)
- jit.load64(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset), scratchGPR3);
- jit.store64(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register))));
-#else // USE(JSVALUE64), so this begins the 32-bit case
- jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + TagOffset), scratchGPR3);
- jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + TagOffset));
- jit.load32(CCallHelpers::BaseIndex(GPRInfo::callFrameRegister, scratchGPR1, CCallHelpers::TimesEight, argOffset + PayloadOffset), scratchGPR3);
- jit.store32(scratchGPR3, CCallHelpers::BaseIndex(scratchGPR2, scratchGPR1, CCallHelpers::TimesEight, CallFrame::thisArgumentOffset() * static_cast<int>(sizeof(Register)) + PayloadOffset));
-#endif // USE(JSVALUE64), end of 32-bit case
- jit.branchSubPtr(CCallHelpers::NonZero, CCallHelpers::TrustedImm32(1), scratchGPR1).linkTo(copyLoop, &jit);
-
- done.link(&jit);
-}
-
-void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
-{
- emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, nullptr, firstVarArgOffset, slowCase);
-}
-
-void emitSetupVarargsFrameFastCase(CCallHelpers& jit, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame* inlineCallFrame, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase)
-{
- ValueRecovery argumentCountRecovery;
- VirtualRegister firstArgumentReg;
- if (inlineCallFrame) {
- if (inlineCallFrame->isVarargs()) {
- argumentCountRecovery = ValueRecovery::displacedInJSStack(
- inlineCallFrame->argumentCountRegister, DataFormatInt32);
- } else {
- argumentCountRecovery = ValueRecovery::constant(
- jsNumber(inlineCallFrame->arguments.size()));
- }
- if (inlineCallFrame->arguments.size() > 1)
- firstArgumentReg = inlineCallFrame->arguments[1].virtualRegister();
- else
- firstArgumentReg = VirtualRegister(0);
- } else {
- argumentCountRecovery = ValueRecovery::displacedInJSStack(
- VirtualRegister(JSStack::ArgumentCount), DataFormatInt32);
- firstArgumentReg = VirtualRegister(CallFrame::argumentOffset(0));
- }
- emitSetupVarargsFrameFastCase(jit, numUsedSlotsGPR, scratchGPR1, scratchGPR2, scratchGPR3, argumentCountRecovery, firstArgumentReg, firstVarArgOffset, slowCase);
-}
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
diff --git a/Source/JavaScriptCore/jit/SetupVarargsFrame.h b/Source/JavaScriptCore/jit/SetupVarargsFrame.h
deleted file mode 100644
index 0e8933a29..000000000
--- a/Source/JavaScriptCore/jit/SetupVarargsFrame.h
+++ /dev/null
@@ -1,53 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SetupVarargsFrame_h
-#define SetupVarargsFrame_h
-
-#if ENABLE(JIT)
-
-#include "CCallHelpers.h"
-#include "VirtualRegister.h"
-
-namespace JSC {
-
-void emitSetVarargsFrame(CCallHelpers&, GPRReg lengthGPR, bool lengthIncludesThis, GPRReg numUsedSlotsGPR, GPRReg resultGPR);
-
-// Assumes that SP refers to the last in-use stack location, and after this returns SP will point to
-// the newly created frame plus the native header. scratchGPR2 may be the same as numUsedSlotsGPR.
-void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, ValueRecovery argCountRecovery, VirtualRegister firstArgumentReg, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
-
-// Variant that assumes normal stack frame.
-void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
-
-// Variant for potentially inlined stack frames.
-void emitSetupVarargsFrameFastCase(CCallHelpers&, GPRReg numUsedSlotsGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, GPRReg scratchGPR3, InlineCallFrame*, unsigned firstVarArgOffset, CCallHelpers::JumpList& slowCase);
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // SetupVarargsFrame_h
-
diff --git a/Source/JavaScriptCore/jit/SlowPathCall.h b/Source/JavaScriptCore/jit/SlowPathCall.h
index 55da60cd0..f0aa28e83 100644
--- a/Source/JavaScriptCore/jit/SlowPathCall.h
+++ b/Source/JavaScriptCore/jit/SlowPathCall.h
@@ -45,7 +45,7 @@ public:
JIT::Call call()
{
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, true);
#endif
m_jit->updateTopCallFrame();
@@ -73,7 +73,7 @@ public:
#endif
#if ENABLE(OPCODE_SAMPLING)
- if (m_jit->m_bytecodeOffset != std::numeric_limits<unsigned>::max())
+ if (m_jit->m_bytecodeOffset != (unsigned)-1)
m_jit->sampleInstruction(m_jit->m_codeBlock->instructions().begin() + m_jit->m_bytecodeOffset, false);
#endif
diff --git a/Source/JavaScriptCore/jit/SnippetOperand.h b/Source/JavaScriptCore/jit/SnippetOperand.h
deleted file mode 100644
index 67884b3c0..000000000
--- a/Source/JavaScriptCore/jit/SnippetOperand.h
+++ /dev/null
@@ -1,101 +0,0 @@
-/*
- * Copyright (C) 2015 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SnippetOperand_h
-#define SnippetOperand_h
-
-#if ENABLE(JIT)
-
-#include "ResultType.h"
-
-namespace JSC {
-
-class SnippetOperand {
- enum ConstOrVarType {
- Variable,
- ConstInt32,
- ConstDouble
- };
-
-public:
- SnippetOperand()
- : m_resultType(ResultType::unknownType())
- { }
-
- SnippetOperand(ResultType resultType)
- : m_resultType(resultType)
- { }
-
- bool mightBeNumber() const { return m_resultType.mightBeNumber(); }
- bool definitelyIsNumber() const { return m_resultType.definitelyIsNumber(); }
-
- bool isConst() const { return m_type != Variable; }
- bool isConstInt32() const { return m_type == ConstInt32; }
- bool isConstDouble() const { return m_type == ConstDouble; }
- bool isPositiveConstInt32() const { return isConstInt32() && asConstInt32() > 0; }
-
- int64_t asRawBits() const { return m_val.rawBits; }
-
- int32_t asConstInt32() const
- {
- ASSERT(m_type == ConstInt32);
- return m_val.int32Val;
- }
-
- double asConstDouble() const
- {
- ASSERT(m_type == ConstDouble);
- return m_val.doubleVal;
- }
-
- void setConstInt32(int32_t value)
- {
- m_type = ConstInt32;
- m_val.int32Val = value;
- }
-
- void setConstDouble(double value)
- {
- m_type = ConstDouble;
- m_val.doubleVal = value;
- }
-
-private:
- ResultType m_resultType;
- ConstOrVarType m_type { Variable };
- union {
- int32_t int32Val;
- double doubleVal;
- int64_t rawBits;
- } m_val;
-};
-
-} // namespace JSC
-
-#endif // ENABLE(JIT)
-
-#endif // SnippetOperand_h
-
-
diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
index 6a2da6ded..6ec1e71a7 100644
--- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
+++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h
@@ -29,8 +29,6 @@
#if ENABLE(JIT)
#include "Executable.h"
-#include "JIT.h"
-#include "JITInlines.h"
#include "JSInterfaceJIT.h"
#include "JSStack.h"
#include "LinkBuffer.h"
@@ -43,8 +41,6 @@ namespace JSC {
SpecializedThunkJIT(VM* vm, int expectedArgCount)
: JSInterfaceJIT(vm)
{
- emitFunctionPrologue();
- emitSaveThenMaterializeTagRegisters();
// Check that we have the expected number of arguments
m_failures.append(branch32(NotEqual, payloadFor(JSStack::ArgumentCount), TrustedImm32(expectedArgCount + 1)));
}
@@ -52,8 +48,6 @@ namespace JSC {
explicit SpecializedThunkJIT(VM* vm)
: JSInterfaceJIT(vm)
{
- emitFunctionPrologue();
- emitSaveThenMaterializeTagRegisters();
}
void loadDoubleArgument(int argument, FPRegisterID dst, RegisterID scratch)
@@ -71,18 +65,14 @@ namespace JSC {
void loadJSStringArgument(VM& vm, int argument, RegisterID dst)
{
loadCellArgument(argument, dst);
- m_failures.append(branchStructure(NotEqual,
- Address(dst, JSCell::structureIDOffset()),
- vm.stringStructure.get()));
+ m_failures.append(branchPtr(NotEqual, Address(dst, JSCell::structureOffset()), TrustedImmPtr(vm.stringStructure.get())));
}
void loadArgumentWithSpecificClass(const ClassInfo* classInfo, int argument, RegisterID dst, RegisterID scratch)
{
loadCellArgument(argument, dst);
- emitLoadStructure(dst, scratch, dst);
+ loadPtr(Address(dst, JSCell::structureOffset()), scratch);
appendFailure(branchPtr(NotEqual, Address(scratch, Structure::classInfoOffset()), TrustedImmPtr(classInfo)));
- // We have to reload the argument since emitLoadStructure clobbered it.
- loadCellArgument(argument, dst);
}
void loadInt32Argument(int argument, RegisterID dst, Jump& failTarget)
@@ -107,9 +97,7 @@ namespace JSC {
{
if (src != regT0)
move(src, regT0);
-
- emitRestoreSavedTagRegisters();
- emitFunctionEpilogue();
+ loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
ret();
}
#else
@@ -117,8 +105,7 @@ namespace JSC {
{
ASSERT_UNUSED(payload, payload == regT0);
ASSERT_UNUSED(tag, tag == regT1);
- emitRestoreSavedTagRegisters();
- emitFunctionEpilogue();
+ loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
ret();
}
#endif
@@ -134,7 +121,14 @@ namespace JSC {
move(tagTypeNumberRegister, regT0);
done.link(this);
#else
+#if !CPU(X86)
+ // The src register is not clobbered by moveDoubleToInts with ARM, MIPS and SH4 macro assemblers, so let's use it.
moveDoubleToInts(src, regT0, regT1);
+#else
+ storeDouble(src, Address(stackPointerRegister, -(int)sizeof(double)));
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - sizeof(double)), regT1);
+ loadPtr(Address(stackPointerRegister, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - sizeof(double)), regT0);
+#endif
Jump lowNonZero = branchTestPtr(NonZero, regT1);
Jump highNonZero = branchTestPtr(NonZero, regT0);
move(TrustedImm32(0), regT0);
@@ -142,8 +136,7 @@ namespace JSC {
lowNonZero.link(this);
highNonZero.link(this);
#endif
- emitRestoreSavedTagRegisters();
- emitFunctionEpilogue();
+ loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
ret();
}
@@ -152,8 +145,7 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsInt32();
- emitRestoreSavedTagRegisters();
- emitFunctionEpilogue();
+ loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
ret();
}
@@ -162,14 +154,13 @@ namespace JSC {
if (src != regT0)
move(src, regT0);
tagReturnAsJSCell();
- emitRestoreSavedTagRegisters();
- emitFunctionEpilogue();
+ loadPtr(Address(callFrameRegister, CallFrame::callerFrameOffset()), callFrameRegister);
ret();
}
MacroAssemblerCodeRef finalize(MacroAssemblerCodePtr fallback, const char* thunkKind)
{
- LinkBuffer patchBuffer(*m_vm, *this, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*m_vm, this, GLOBAL_THUNK_ID);
patchBuffer.link(m_failures, CodeLocationLabel(fallback));
for (unsigned i = 0; i < m_calls.size(); i++)
patchBuffer.link(m_calls[i].first, m_calls[i].second);
@@ -193,31 +184,7 @@ namespace JSC {
}
private:
- void emitSaveThenMaterializeTagRegisters()
- {
-#if USE(JSVALUE64)
-#if CPU(ARM64)
- pushPair(tagTypeNumberRegister, tagMaskRegister);
-#else
- push(tagTypeNumberRegister);
- push(tagMaskRegister);
-#endif
- emitMaterializeTagCheckRegisters();
-#endif
- }
- void emitRestoreSavedTagRegisters()
- {
-#if USE(JSVALUE64)
-#if CPU(ARM64)
- popPair(tagTypeNumberRegister, tagMaskRegister);
-#else
- pop(tagMaskRegister);
- pop(tagTypeNumberRegister);
-#endif
-#endif
- }
-
void tagReturnAsInt32()
{
#if USE(JSVALUE64)
diff --git a/Source/JavaScriptCore/jit/SpillRegistersMode.h b/Source/JavaScriptCore/jit/SpillRegistersMode.h
deleted file mode 100644
index 160df2c2e..000000000
--- a/Source/JavaScriptCore/jit/SpillRegistersMode.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/*
- * Copyright (C) 2014 Apple Inc. All rights reserved.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the above copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- *
- * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
- * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
- * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
- * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
- * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
- * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
- * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
- * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
- * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- */
-
-#ifndef SpillRegistersMode_h
-#define SpillRegistersMode_h
-
-namespace JSC {
-
-enum SpillRegistersMode { NeedToSpill, DontSpill };
-
-}
-
-#endif
diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.cpp b/Source/JavaScriptCore/jit/TempRegisterSet.cpp
index 9c2e73d43..9d80bbc57 100644
--- a/Source/JavaScriptCore/jit/TempRegisterSet.cpp
+++ b/Source/JavaScriptCore/jit/TempRegisterSet.cpp
@@ -28,15 +28,12 @@
#if ENABLE(JIT)
-#include "JSCInlines.h"
#include "RegisterSet.h"
namespace JSC {
TempRegisterSet::TempRegisterSet(const RegisterSet& other)
{
- clearAll();
-
for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
GPRReg reg = GPRInfo::toRegister(i);
if (other.get(reg))
diff --git a/Source/JavaScriptCore/jit/TempRegisterSet.h b/Source/JavaScriptCore/jit/TempRegisterSet.h
index 4c2102400..0915e796a 100644
--- a/Source/JavaScriptCore/jit/TempRegisterSet.h
+++ b/Source/JavaScriptCore/jit/TempRegisterSet.h
@@ -26,6 +26,8 @@
#ifndef TempRegisterSet_h
#define TempRegisterSet_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
#include "FPRInfo.h"
@@ -39,7 +41,8 @@ class TempRegisterSet {
public:
TempRegisterSet()
{
- clearAll();
+ for (unsigned i = numberOfBytesInTempRegisterSet; i--;)
+ m_set[i] = 0;
}
TempRegisterSet(const RegisterSet&);
@@ -115,16 +118,6 @@ public:
return getBit(GPRInfo::numberOfRegisters + index);
}
- // Return the index'th free FPR.
- FPRReg getFreeFPR(unsigned index = 0) const
- {
- for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
- if (!getFPRByIndex(i) && !index--)
- return FPRInfo::toRegister(i);
- }
- return InvalidFPRReg;
- }
-
template<typename BankInfo>
void setByIndex(unsigned index)
{
@@ -171,12 +164,6 @@ public:
}
private:
- void clearAll()
- {
- for (unsigned i = numberOfBytesInTempRegisterSet; i--;)
- m_set[i] = 0;
- }
-
void setBit(unsigned i)
{
ASSERT(i < totalNumberOfRegisters);
diff --git a/Source/JavaScriptCore/jit/ThunkGenerator.h b/Source/JavaScriptCore/jit/ThunkGenerator.h
index 031748cbe..a9d7e04ee 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerator.h
+++ b/Source/JavaScriptCore/jit/ThunkGenerator.h
@@ -26,6 +26,8 @@
#ifndef ThunkGenerator_h
#define ThunkGenerator_h
+#include <wtf/Platform.h>
+
#if ENABLE(JIT)
namespace JSC {
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
index 4a71dfeb2..f8f5cbaf5 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2010, 2012, 2013, 2014 Apple Inc. All rights reserved.
+ * Copyright (C) 2010, 2012, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -27,14 +27,11 @@
#include "ThunkGenerators.h"
#include "CodeBlock.h"
-#include "DFGSpeculativeJIT.h"
#include "JITOperations.h"
#include "JSArray.h"
#include "JSArrayIterator.h"
#include "JSStack.h"
-#include "MathCommon.h"
-#include "MaxFrameExtentForSlowPathCall.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include "SpecializedThunkJIT.h"
#include <wtf/InlineASM.h>
#include <wtf/StringPrintStream.h>
@@ -46,14 +43,17 @@ namespace JSC {
inline void emitPointerValidation(CCallHelpers& jit, GPRReg pointerGPR)
{
- if (ASSERT_DISABLED)
- return;
+#if !ASSERT_DISABLED
CCallHelpers::Jump isNonZero = jit.branchTestPtr(CCallHelpers::NonZero, pointerGPR);
- jit.abortWithReason(TGInvalidPointer);
+ jit.breakpoint();
isNonZero.link(&jit);
jit.pushToSave(pointerGPR);
jit.load8(pointerGPR, pointerGPR);
jit.popToRestore(pointerGPR);
+#else
+ UNUSED_PARAM(jit);
+ UNUSED_PARAM(pointerGPR);
+#endif
}
// We will jump here if the JIT code tries to make a call, but the
@@ -66,100 +66,86 @@ MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM* vm)
// even though we won't use it.
jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
- jit.copyCalleeSavesToVMCalleeSavesBuffer();
+ // The CallFrame register points to the (failed) callee frame, so we need to pop back one frame.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::callFrameRegister);
- jit.setupArguments(CCallHelpers::TrustedImmPtr(vm), GPRInfo::callFrameRegister);
+ jit.setupArgumentsExecState();
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(lookupExceptionHandler)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("Throw exception from call slow path thunk"));
}
static void slowPathFor(
- CCallHelpers& jit, VM* vm, Sprt_JITOperation_ECli slowPathFunction)
+ CCallHelpers& jit, VM* vm, P_JITOperation_E slowPathFunction)
{
- jit.emitFunctionPrologue();
+ jit.preserveReturnAddressAfterCall(GPRInfo::nonArgGPR2);
+ emitPointerValidation(jit, GPRInfo::nonArgGPR2);
+ jit.emitPutReturnPCToCallFrameHeader(GPRInfo::nonArgGPR2);
jit.storePtr(GPRInfo::callFrameRegister, &vm->topCallFrame);
-#if OS(WINDOWS) && CPU(X86_64)
- // Windows X86_64 needs some space pointed to by arg0 for return types larger than 64 bits.
- // Other argument values are shift by 1. Use space on the stack for our two return values.
- // Moving the stack down maxFrameExtentForSlowPathCall bytes gives us room for our 3 arguments
- // and space for the 16 byte return area.
- jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
- jit.move(GPRInfo::regT2, GPRInfo::argumentGPR2);
- jit.addPtr(CCallHelpers::TrustedImm32(32), CCallHelpers::stackPointerRegister, GPRInfo::argumentGPR0);
- jit.move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
- jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
- emitPointerValidation(jit, GPRInfo::nonArgGPR0);
- jit.call(GPRInfo::nonArgGPR0);
- jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR, 8), GPRInfo::returnValueGPR2);
- jit.loadPtr(CCallHelpers::Address(GPRInfo::returnValueGPR), GPRInfo::returnValueGPR);
- jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
-#else
- if (maxFrameExtentForSlowPathCall)
- jit.addPtr(CCallHelpers::TrustedImm32(-maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
- jit.setupArgumentsWithExecState(GPRInfo::regT2);
+ jit.setupArgumentsExecState();
jit.move(CCallHelpers::TrustedImmPtr(bitwise_cast<void*>(slowPathFunction)), GPRInfo::nonArgGPR0);
emitPointerValidation(jit, GPRInfo::nonArgGPR0);
jit.call(GPRInfo::nonArgGPR0);
- if (maxFrameExtentForSlowPathCall)
- jit.addPtr(CCallHelpers::TrustedImm32(maxFrameExtentForSlowPathCall), CCallHelpers::stackPointerRegister);
-#endif
-
+
// This slow call will return the address of one of the following:
// 1) Exception throwing thunk.
// 2) Host call return value returner thingy.
// 3) The function to call.
- // The second return value GPR will hold a non-zero value for tail calls.
-
+ jit.emitGetReturnPCFromCallFrameHeaderPtr(GPRInfo::nonPreservedNonReturnGPR);
+ jit.emitPutReturnPCToCallFrameHeader(CCallHelpers::TrustedImmPtr(0));
+ emitPointerValidation(jit, GPRInfo::nonPreservedNonReturnGPR);
+ jit.restoreReturnAddressBeforeReturn(GPRInfo::nonPreservedNonReturnGPR);
emitPointerValidation(jit, GPRInfo::returnValueGPR);
- jit.emitFunctionEpilogue();
-
- RELEASE_ASSERT(reinterpret_cast<void*>(KeepTheFrame) == reinterpret_cast<void*>(0));
- CCallHelpers::Jump doNotTrash = jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::returnValueGPR2);
-
- jit.preserveReturnAddressAfterCall(GPRInfo::nonPreservedNonReturnGPR);
- jit.prepareForTailCallSlow(GPRInfo::returnValueGPR);
-
- doNotTrash.link(&jit);
jit.jump(GPRInfo::returnValueGPR);
}
-MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
+static MacroAssemblerCodeRef linkForThunkGenerator(
+ VM* vm, CodeSpecializationKind kind)
{
// The return address is on the stack or in the link register. We will hence
// save the return address to the call frame while we make a C++ function call
// to perform linking and lazy compilation if necessary. We expect the callee
// to be in regT0/regT1 (payload/tag), the CallFrame to have already
// been adjusted, and all other registers to be available for use.
+
CCallHelpers jit(vm);
- slowPathFor(jit, vm, operationLinkCall);
+ slowPathFor(jit, vm, kind == CodeForCall ? operationLinkCall : operationLinkConstruct);
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("Link call slow path thunk"));
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(
+ patchBuffer,
+ ("Link %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
+}
+
+MacroAssemblerCodeRef linkCallThunkGenerator(VM* vm)
+{
+ return linkForThunkGenerator(vm, CodeForCall);
+}
+
+MacroAssemblerCodeRef linkConstructThunkGenerator(VM* vm)
+{
+ return linkForThunkGenerator(vm, CodeForConstruct);
}
// For closure optimizations, we only include calls, since if you're using closures for
// object construction then you're going to lose big time anyway.
-MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM* vm)
+MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM* vm)
{
CCallHelpers jit(vm);
- slowPathFor(jit, vm, operationLinkPolymorphicCall);
+ slowPathFor(jit, vm, operationLinkClosureCall);
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("Link polymorphic call slow path thunk"));
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("Link closure call slow path thunk"));
}
-// FIXME: We should distinguish between a megamorphic virtual call vs. a slow
-// path virtual call so that we can enable fast tail calls for megamorphic
-// virtual calls by using the shuffler.
-// https://bugs.webkit.org/show_bug.cgi?id=148831
-MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
+static MacroAssemblerCodeRef virtualForThunkGenerator(
+ VM* vm, CodeSpecializationKind kind)
{
// The callee is in regT0 (for JSVALUE32_64, the tag is in regT1).
// The return address is on the stack, or in the link register. We will hence
@@ -169,149 +155,203 @@ MacroAssemblerCodeRef virtualThunkFor(VM* vm, CallLinkInfo& callLinkInfo)
CCallHelpers jit(vm);
CCallHelpers::JumpList slowCase;
-
- // This is a slow path execution, and regT2 contains the CallLinkInfo. Count the
- // slow path execution for the profiler.
- jit.add32(
- CCallHelpers::TrustedImm32(1),
- CCallHelpers::Address(GPRInfo::regT2, CallLinkInfo::offsetOfSlowPathCount()));
// FIXME: we should have a story for eliminating these checks. In many cases,
// the DFG knows that the value is definitely a cell, or definitely a function.
#if USE(JSVALUE64)
- jit.move(CCallHelpers::TrustedImm64(TagMask), GPRInfo::regT4);
-
slowCase.append(
jit.branchTest64(
- CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::regT4));
+ CCallHelpers::NonZero, GPRInfo::regT0, GPRInfo::tagMaskRegister));
#else
slowCase.append(
jit.branch32(
CCallHelpers::NotEqual, GPRInfo::regT1,
CCallHelpers::TrustedImm32(JSValue::CellTag)));
#endif
- AssemblyHelpers::emitLoadStructure(jit, GPRInfo::regT0, GPRInfo::regT4, GPRInfo::regT1);
+ jit.loadPtr(CCallHelpers::Address(GPRInfo::regT0, JSCell::structureOffset()), GPRInfo::nonArgGPR2);
slowCase.append(
jit.branchPtr(
CCallHelpers::NotEqual,
- CCallHelpers::Address(GPRInfo::regT4, Structure::classInfoOffset()),
+ CCallHelpers::Address(GPRInfo::nonArgGPR2, Structure::classInfoOffset()),
CCallHelpers::TrustedImmPtr(JSFunction::info())));
// Now we know we have a JSFunction.
jit.loadPtr(
CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfExecutable()),
- GPRInfo::regT4);
- jit.loadPtr(
- CCallHelpers::Address(
- GPRInfo::regT4, ExecutableBase::offsetOfJITCodeWithArityCheckFor(
- callLinkInfo.specializationKind())),
- GPRInfo::regT4);
- slowCase.append(jit.branchTestPtr(CCallHelpers::Zero, GPRInfo::regT4));
+ GPRInfo::nonArgGPR2);
+ slowCase.append(
+ jit.branch32(
+ CCallHelpers::LessThan,
+ CCallHelpers::Address(
+ GPRInfo::nonArgGPR2, ExecutableBase::offsetOfNumParametersFor(kind)),
+ CCallHelpers::TrustedImm32(0)));
// Now we know that we have a CodeBlock, and we're committed to making a fast
// call.
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::regT0, JSFunction::offsetOfScopeChain()),
+ GPRInfo::regT1);
+#if USE(JSVALUE64)
+ jit.store64(
+ GPRInfo::regT1,
+ CCallHelpers::Address(
+ GPRInfo::callFrameRegister,
+ static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain));
+#else
+ jit.storePtr(
+ GPRInfo::regT1,
+ CCallHelpers::Address(
+ GPRInfo::callFrameRegister,
+ static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ jit.store32(
+ CCallHelpers::TrustedImm32(JSValue::CellTag),
+ CCallHelpers::Address(
+ GPRInfo::callFrameRegister,
+ static_cast<ptrdiff_t>(sizeof(Register)) * JSStack::ScopeChain +
+ OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+#endif
+
+ jit.loadPtr(
+ CCallHelpers::Address(GPRInfo::nonArgGPR2, ExecutableBase::offsetOfJITCodeWithArityCheckFor(kind)),
+ GPRInfo::regT0);
+
// Make a tail call. This will return back to JIT code.
- emitPointerValidation(jit, GPRInfo::regT4);
- if (callLinkInfo.isTailCall()) {
- jit.preserveReturnAddressAfterCall(GPRInfo::regT0);
- jit.prepareForTailCallSlow(GPRInfo::regT4);
- }
- jit.jump(GPRInfo::regT4);
+ emitPointerValidation(jit, GPRInfo::regT0);
+ jit.jump(GPRInfo::regT0);
slowCase.link(&jit);
// Here we don't know anything, so revert to the full slow path.
- slowPathFor(jit, vm, operationVirtualCall);
+ slowPathFor(jit, vm, kind == CodeForCall ? operationVirtualCall : operationVirtualConstruct);
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(
patchBuffer,
- ("Virtual %s slow path thunk",
- callLinkInfo.callMode() == CallMode::Regular ? "call" : callLinkInfo.callMode() == CallMode::Tail ? "tail call" : "construct"));
+ ("Virtual %s slow path thunk", kind == CodeForCall ? "call" : "construct"));
}
-enum ThunkEntryType { EnterViaCall, EnterViaJump };
+MacroAssemblerCodeRef virtualCallThunkGenerator(VM* vm)
+{
+ return virtualForThunkGenerator(vm, CodeForCall);
+}
-static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind, ThunkEntryType entryType = EnterViaCall)
+MacroAssemblerCodeRef virtualConstructThunkGenerator(VM* vm)
+{
+ return virtualForThunkGenerator(vm, CodeForConstruct);
+}
+
+static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind kind)
{
int executableOffsetToFunction = NativeExecutable::offsetOfNativeFunctionFor(kind);
JSInterfaceJIT jit(vm);
-
- if (entryType == EnterViaCall)
- jit.emitFunctionPrologue();
-#if USE(JSVALUE64)
- else if (entryType == EnterViaJump) {
- // We're coming from a specialized thunk that has saved the prior tag registers' contents.
- // Restore them now.
-#if CPU(ARM64)
- jit.popPair(JSInterfaceJIT::tagTypeNumberRegister, JSInterfaceJIT::tagMaskRegister);
-#else
- jit.pop(JSInterfaceJIT::tagMaskRegister);
- jit.pop(JSInterfaceJIT::tagTypeNumberRegister);
-#endif
- }
-#endif
-
- jit.emitPutToCallFrameHeader(0, JSStack::CodeBlock);
+
+ jit.emitPutImmediateToCallFrameHeader(0, JSStack::CodeBlock);
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
#if CPU(X86)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.peek(JSInterfaceJIT::regT1);
+ jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1);
+
// Calling convention: f(ecx, edx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- jit.subPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister); // Align stack after prologue.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
// call the function
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::regT1);
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT1);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT1, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(void*)), JSInterfaceJIT::stackPointerRegister);
#elif CPU(X86_64)
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT0);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT0);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.peek(JSInterfaceJIT::regT1);
+ jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT1);
+
#if !OS(WINDOWS)
// Calling convention: f(edi, esi, edx, ecx, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::edi);
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister); // Align stack after call.
+
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::esi);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::esi, JSFunction::offsetOfExecutable()), X86Registers::r9);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#else
// Calling convention: f(ecx, edx, r8, r9, ...);
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, X86Registers::ecx);
- // Leave space for the callee parameter home addresses.
- // At this point the stack is aligned to 16 bytes, but if this changes at some point, we need to emit code to align it.
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ // Leave space for the callee parameter home addresses and align the stack.
+ jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, X86Registers::edx);
jit.loadPtr(JSInterfaceJIT::Address(X86Registers::edx, JSFunction::offsetOfExecutable()), X86Registers::r9);
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(X86Registers::r9, executableOffsetToFunction));
- jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
+ jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t) + 16 - sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
#elif CPU(ARM64)
+ COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT1, prev_callframe_not_trampled_by_T1);
+ COMPILE_ASSERT(ARM64Registers::x3 != JSInterfaceJIT::regT3, prev_callframe_not_trampled_by_T3);
COMPILE_ASSERT(ARM64Registers::x0 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_0);
COMPILE_ASSERT(ARM64Registers::x1 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_1);
COMPILE_ASSERT(ARM64Registers::x2 != JSInterfaceJIT::regT3, T3_not_trampled_by_arg_2);
+ // Load caller frame's scope chain into this callframe so that whatever we call can
+ // get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(ARM64Registers::x3);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, ARM64Registers::x3);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
+ jit.emitPutReturnPCToCallFrameHeader(ARM64Registers::lr);
+
// Host function signature: f(ExecState*);
jit.move(JSInterfaceJIT::callFrameRegister, ARM64Registers::x0);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, ARM64Registers::x1);
jit.loadPtr(JSInterfaceJIT::Address(ARM64Registers::x1, JSFunction::offsetOfExecutable()), ARM64Registers::x2);
+ jit.move(ARM64Registers::x3, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.call(JSInterfaceJIT::Address(ARM64Registers::x2, executableOffsetToFunction));
+
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
+
#elif CPU(ARM) || CPU(SH4) || CPU(MIPS)
+ // Load caller frame's scope chain into this callframe so that whatever we call can get to its global data.
+ jit.emitGetCallerFrameFromCallFrameHeaderPtr(JSInterfaceJIT::regT2);
+ jit.emitGetFromCallFrameHeaderPtr(JSStack::ScopeChain, JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
+ jit.emitPutCellToCallFrameHeader(JSInterfaceJIT::regT1, JSStack::ScopeChain);
+
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT3); // Callee preserved
+ jit.emitPutReturnPCToCallFrameHeader(JSInterfaceJIT::regT3);
+
#if CPU(MIPS)
// Allocate stack space for (unused) 16 bytes (8-byte aligned) for 4 arguments.
jit.subPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
@@ -322,6 +362,7 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
jit.emitGetFromCallFrameHeaderPtr(JSStack::Callee, JSInterfaceJIT::argumentGPR1);
+ jit.move(JSInterfaceJIT::regT2, JSInterfaceJIT::callFrameRegister); // Eagerly restore caller frame register to avoid loading from stack.
jit.loadPtr(JSInterfaceJIT::Address(JSInterfaceJIT::argumentGPR1, JSFunction::offsetOfExecutable()), JSInterfaceJIT::regT2);
jit.call(JSInterfaceJIT::Address(JSInterfaceJIT::regT2, executableOffsetToFunction));
@@ -329,10 +370,12 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
// Restore stack space
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
#endif
+
+ jit.restoreReturnAddressBeforeReturn(JSInterfaceJIT::regT3);
#else
#error "JIT not supported on this platform."
UNUSED_PARAM(executableOffsetToFunction);
- abortWithReason(TGNotSupported);
+ breakpoint();
#endif
// Check for an exception
@@ -342,43 +385,40 @@ static MacroAssemblerCodeRef nativeForGenerator(VM* vm, CodeSpecializationKind k
#else
JSInterfaceJIT::Jump exceptionHandler = jit.branch32(
JSInterfaceJIT::NotEqual,
- JSInterfaceJIT::AbsoluteAddress(vm->addressOfException()),
- JSInterfaceJIT::TrustedImm32(0));
+ JSInterfaceJIT::AbsoluteAddress(reinterpret_cast<char*>(vm->addressOfException()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)),
+ JSInterfaceJIT::TrustedImm32(JSValue::EmptyValueTag));
#endif
- jit.emitFunctionEpilogue();
// Return.
jit.ret();
// Handle an exception
exceptionHandler.link(&jit);
- jit.copyCalleeSavesToVMCalleeSavesBuffer();
+ // Grab the return address.
+ jit.preserveReturnAddressAfterCall(JSInterfaceJIT::regT1);
+
+ jit.move(JSInterfaceJIT::TrustedImmPtr(&vm->exceptionLocation), JSInterfaceJIT::regT2);
+ jit.storePtr(JSInterfaceJIT::regT1, JSInterfaceJIT::regT2);
+
jit.storePtr(JSInterfaceJIT::callFrameRegister, &vm->topCallFrame);
#if CPU(X86) && USE(JSVALUE32_64)
jit.addPtr(JSInterfaceJIT::TrustedImm32(-12), JSInterfaceJIT::stackPointerRegister);
- jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT0);
- jit.push(JSInterfaceJIT::regT0);
+ jit.push(JSInterfaceJIT::callFrameRegister);
#else
-#if OS(WINDOWS)
- // Allocate space on stack for the 4 parameter registers.
- jit.subPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
-#endif
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR0);
#endif
jit.move(JSInterfaceJIT::TrustedImmPtr(FunctionPtr(operationVMHandleException).value()), JSInterfaceJIT::regT3);
jit.call(JSInterfaceJIT::regT3);
#if CPU(X86) && USE(JSVALUE32_64)
jit.addPtr(JSInterfaceJIT::TrustedImm32(16), JSInterfaceJIT::stackPointerRegister);
-#elif OS(WINDOWS)
- jit.addPtr(JSInterfaceJIT::TrustedImm32(4 * sizeof(int64_t)), JSInterfaceJIT::stackPointerRegister);
#endif
jit.jumpToExceptionHandler();
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("native %s%s trampoline", entryType == EnterViaJump ? "Tail " : "", toCString(kind).data()));
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
+ return FINALIZE_CODE(patchBuffer, ("native %s trampoline", toCString(kind).data()));
}
MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
@@ -386,72 +426,43 @@ MacroAssemblerCodeRef nativeCallGenerator(VM* vm)
return nativeForGenerator(vm, CodeForCall);
}
-MacroAssemblerCodeRef nativeTailCallGenerator(VM* vm)
-{
- return nativeForGenerator(vm, CodeForCall, EnterViaJump);
-}
-
MacroAssemblerCodeRef nativeConstructGenerator(VM* vm)
{
return nativeForGenerator(vm, CodeForConstruct);
}
-MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
+MacroAssemblerCodeRef arityFixup(VM* vm)
{
JSInterfaceJIT jit(vm);
- // We enter with fixup count in argumentGPR0
- // We have the guarantee that a0, a1, a2, t3, t4 and t5 (or t0 for Windows) are all distinct :-)
+ // We enter with fixup count in regT0
#if USE(JSVALUE64)
-#if OS(WINDOWS)
- const GPRReg extraTemp = JSInterfaceJIT::regT0;
-#else
- const GPRReg extraTemp = JSInterfaceJIT::regT5;
-#endif
# if CPU(X86_64)
jit.pop(JSInterfaceJIT::regT4);
# endif
+ jit.neg64(JSInterfaceJIT::regT0);
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
- jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
- jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
-
- // Check to see if we have extra slots we can use
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
- jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
- JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
- jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
- JSInterfaceJIT::Label fillExtraSlots(jit.label());
- jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight));
- jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
- jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
- jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
- JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
- noExtraSlot.link(&jit);
-
- jit.neg64(JSInterfaceJIT::argumentGPR0);
-
- // Move current frame down argumentGPR0 number of slots
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
+
+ // Move current frame down regT0 number of slots
JSInterfaceJIT::Label copyLoop(jit.label());
- jit.load64(JSInterfaceJIT::regT3, extraTemp);
- jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
+ jit.load64(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
+ jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
- // Fill in argumentGPR0 missing arg slots with undefined
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
- jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), extraTemp);
+ // Fill in regT0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT1);
JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
- jit.store64(extraTemp, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight));
+ jit.store64(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
-
- // Adjust call frame register and stack pointer to account for missing args
- jit.move(JSInterfaceJIT::argumentGPR0, extraTemp);
- jit.lshift64(JSInterfaceJIT::TrustedImm32(3), extraTemp);
- jit.addPtr(extraTemp, JSInterfaceJIT::callFrameRegister);
- jit.addPtr(extraTemp, JSInterfaceJIT::stackPointerRegister);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
- done.link(&jit);
+ // Adjust call frame register to account for missing args
+ jit.lshift64(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
+ jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
# if CPU(X86_64)
jit.push(JSInterfaceJIT::regT4);
@@ -461,54 +472,34 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
# if CPU(X86)
jit.pop(JSInterfaceJIT::regT4);
# endif
+ jit.neg32(JSInterfaceJIT::regT0);
jit.move(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::regT3);
- jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * sizeof(Register)), JSInterfaceJIT::argumentGPR2);
- jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::argumentGPR2);
-
- // Check to see if we have extra slots we can use
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR1);
- jit.and32(JSInterfaceJIT::TrustedImm32(stackAlignmentRegisters() - 1), JSInterfaceJIT::argumentGPR1);
- JSInterfaceJIT::Jump noExtraSlot = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR1);
- JSInterfaceJIT::Label fillExtraSlots(jit.label());
- jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, PayloadOffset));
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::callFrameRegister, JSInterfaceJIT::argumentGPR2, JSInterfaceJIT::TimesEight, TagOffset));
- jit.add32(JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2);
- jit.branchSub32(JSInterfaceJIT::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR1).linkTo(fillExtraSlots, &jit);
- jit.and32(JSInterfaceJIT::TrustedImm32(-stackAlignmentRegisters()), JSInterfaceJIT::argumentGPR0);
- JSInterfaceJIT::Jump done = jit.branchTest32(MacroAssembler::Zero, JSInterfaceJIT::argumentGPR0);
- noExtraSlot.link(&jit);
-
- jit.neg32(JSInterfaceJIT::argumentGPR0);
-
- // Move current frame down argumentGPR0 number of slots
+ jit.load32(JSInterfaceJIT::Address(JSInterfaceJIT::callFrameRegister, JSStack::ArgumentCount * 8), JSInterfaceJIT::regT2);
+ jit.add32(JSInterfaceJIT::TrustedImm32(JSStack::CallFrameHeaderSize), JSInterfaceJIT::regT2);
+
+ // Move current frame down regT0 number of slots
JSInterfaceJIT::Label copyLoop(jit.label());
- jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, PayloadOffset), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
- jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, TagOffset), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
+ jit.load32(JSInterfaceJIT::regT3, JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.load32(MacroAssembler::Address(JSInterfaceJIT::regT3, 4), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(copyLoop, &jit);
+ jit.branchSub32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(copyLoop, &jit);
- // Fill in argumentGPR0 missing arg slots with undefined
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::argumentGPR2);
+ // Fill in regT0 missing arg slots with undefined
+ jit.move(JSInterfaceJIT::regT0, JSInterfaceJIT::regT2);
JSInterfaceJIT::Label fillUndefinedLoop(jit.label());
- jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, PayloadOffset));
- jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT5);
- jit.store32(JSInterfaceJIT::regT5, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::TimesEight, TagOffset));
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
+ jit.store32(JSInterfaceJIT::regT1, MacroAssembler::BaseIndex(JSInterfaceJIT::regT3, JSInterfaceJIT::regT0, JSInterfaceJIT::TimesEight, 4));
jit.addPtr(JSInterfaceJIT::TrustedImm32(8), JSInterfaceJIT::regT3);
- jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::argumentGPR2).linkTo(fillUndefinedLoop, &jit);
-
- // Adjust call frame register and stack pointer to account for missing args
- jit.move(JSInterfaceJIT::argumentGPR0, JSInterfaceJIT::regT5);
- jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT5);
- jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::callFrameRegister);
- jit.addPtr(JSInterfaceJIT::regT5, JSInterfaceJIT::stackPointerRegister);
+ jit.branchAdd32(MacroAssembler::NonZero, JSInterfaceJIT::TrustedImm32(1), JSInterfaceJIT::regT2).linkTo(fillUndefinedLoop, &jit);
- done.link(&jit);
+ // Adjust call frame register to account for missing args
+ jit.lshift32(JSInterfaceJIT::TrustedImm32(3), JSInterfaceJIT::regT0);
+ jit.addPtr(JSInterfaceJIT::regT0, JSInterfaceJIT::callFrameRegister);
# if CPU(X86)
jit.push(JSInterfaceJIT::regT4);
@@ -516,20 +507,10 @@ MacroAssemblerCodeRef arityFixupGenerator(VM* vm)
jit.ret();
#endif
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
+ LinkBuffer patchBuffer(*vm, &jit, GLOBAL_THUNK_ID);
return FINALIZE_CODE(patchBuffer, ("fixup arity"));
}
-MacroAssemblerCodeRef unreachableGenerator(VM* vm)
-{
- JSInterfaceJIT jit(vm);
-
- jit.breakpoint();
-
- LinkBuffer patchBuffer(*vm, jit, GLOBAL_THUNK_ID);
- return FINALIZE_CODE(patchBuffer, ("unreachable thunk"));
-}
-
static void stringCharLoad(SpecializedThunkJIT& jit, VM* vm)
{
// load string
@@ -573,7 +554,7 @@ MacroAssemblerCodeRef charCodeAtThunkGenerator(VM* vm)
SpecializedThunkJIT jit(vm, 1);
stringCharLoad(jit, vm);
jit.returnInt32(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charCodeAt");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charCodeAt");
}
MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
@@ -582,7 +563,7 @@ MacroAssemblerCodeRef charAtThunkGenerator(VM* vm)
stringCharLoad(jit, vm);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "charAt");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "charAt");
}
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
@@ -592,28 +573,7 @@ MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM* vm)
jit.loadInt32Argument(0, SpecializedThunkJIT::regT0);
charToString(jit, vm, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
jit.returnJSCell(SpecializedThunkJIT::regT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "fromCharCode");
-}
-
-MacroAssemblerCodeRef clz32ThunkGenerator(VM* vm)
-{
- SpecializedThunkJIT jit(vm, 1);
- MacroAssembler::Jump nonIntArgJump;
- jit.loadInt32Argument(0, SpecializedThunkJIT::regT0, nonIntArgJump);
-
- SpecializedThunkJIT::Label convertedArgumentReentry(&jit);
- jit.countLeadingZeros32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
- jit.returnInt32(SpecializedThunkJIT::regT1);
-
- if (jit.supportsFloatingPointTruncate()) {
- nonIntArgJump.link(&jit);
- jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(convertedArgumentReentry, &jit);
- jit.appendFailure(jit.jump());
- } else
- jit.appendFailure(nonIntArgJump);
-
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "clz32");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "fromCharCode");
}
MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
@@ -625,15 +585,25 @@ MacroAssemblerCodeRef sqrtThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.sqrtDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "sqrt");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "sqrt");
}
#define UnaryDoubleOpWrapper(function) function##Wrapper
enum MathThunkCallingConvention { };
typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
+extern "C" {
+
+double jsRound(double) REFERENCED_FROM_ASM;
+double jsRound(double d)
+{
+ double integer = ceil(d);
+ return integer - (integer - d > 0.5);
+}
+
+}
-#if CPU(X86_64) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
+#if CPU(X86_64) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
@@ -641,9 +611,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
- "pushq %rax\n" \
"call " GLOBAL_REFERENCE(function) "\n" \
- "popq %rcx\n" \
"ret\n" \
);\
extern "C" { \
@@ -651,7 +619,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && OS(LINUX) && defined(__PIC__)
+#elif CPU(X86) && COMPILER(GCC) && OS(LINUX) && defined(__PIC__)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
@@ -675,19 +643,19 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(GCC_OR_CLANG) && (OS(DARWIN) || OS(LINUX))
+#elif CPU(X86) && COMPILER(GCC) && (OS(DARWIN) || OS(LINUX))
#define defineUnaryDoubleOpWrapper(function) \
asm( \
".text\n" \
".globl " SYMBOL_STRING(function##Thunk) "\n" \
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
- "subl $20, %esp\n" \
+ "subl $8, %esp\n" \
"movsd %xmm0, (%esp) \n" \
"call " GLOBAL_REFERENCE(function) "\n" \
"fstpl (%esp) \n" \
"movsd (%esp), %xmm0 \n" \
- "addl $20, %esp\n" \
+ "addl $8, %esp\n" \
"ret\n" \
);\
extern "C" { \
@@ -695,7 +663,7 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(ARM_THUMB2) && COMPILER(GCC_OR_CLANG) && PLATFORM(IOS)
+#elif CPU(ARM_THUMB2) && COMPILER(GCC) && PLATFORM(IOS)
#define defineUnaryDoubleOpWrapper(function) \
asm( \
@@ -728,38 +696,12 @@ typedef MathThunkCallingConvention(*MathThunk)(MathThunkCallingConvention);
HIDE_SYMBOL(function##Thunk) "\n" \
SYMBOL_STRING(function##Thunk) ":" "\n" \
"b " GLOBAL_REFERENCE(function) "\n" \
- ".previous" \
); \
extern "C" { \
MathThunkCallingConvention function##Thunk(MathThunkCallingConvention); \
} \
static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-#elif CPU(X86) && COMPILER(MSVC) && OS(WINDOWS)
-
-// MSVC does not accept floor, etc, to be called directly from inline assembly, so we need to wrap these functions.
-static double (_cdecl *floorFunction)(double) = floor;
-static double (_cdecl *ceilFunction)(double) = ceil;
-static double (_cdecl *expFunction)(double) = exp;
-static double (_cdecl *logFunction)(double) = log;
-static double (_cdecl *jsRoundFunction)(double) = jsRound;
-
-#define defineUnaryDoubleOpWrapper(function) \
- extern "C" __declspec(naked) MathThunkCallingConvention function##Thunk(MathThunkCallingConvention) \
- { \
- __asm \
- { \
- __asm sub esp, 20 \
- __asm movsd mmword ptr [esp], xmm0 \
- __asm call function##Function \
- __asm fstp qword ptr [esp] \
- __asm movsd xmm0, mmword ptr [esp] \
- __asm add esp, 20 \
- __asm ret \
- } \
- } \
- static MathThunk UnaryDoubleOpWrapper(function) = &function##Thunk;
-
#else
#define defineUnaryDoubleOpWrapper(function) \
@@ -774,6 +716,7 @@ defineUnaryDoubleOpWrapper(ceil);
static const double oneConstant = 1.0;
static const double negativeHalfConstant = -0.5;
+static const double zeroConstant = 0.0;
static const double halfConstant = 0.5;
MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
@@ -797,7 +740,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
@@ -814,7 +757,7 @@ MacroAssemblerCodeRef floorThunkGenerator(VM* vm)
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
#endif // CPU(ARM64)
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "floor");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "floor");
}
MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
@@ -827,17 +770,17 @@ MacroAssemblerCodeRef ceilThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
- if (jit.supportsFloatingPointRounding())
- jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
- else
- jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
-
+#if CPU(ARM64)
+ jit.ceilDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT0);
+#else
+ jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(ceil));
+#endif // CPU(ARM64)
SpecializedThunkJIT::JumpList doubleResult;
jit.branchConvertDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, doubleResult, SpecializedThunkJIT::fpRegT1);
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "ceil");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "ceil");
}
MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
@@ -853,12 +796,12 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
SpecializedThunkJIT::Jump intResult;
SpecializedThunkJIT::JumpList doubleResult;
if (jit.supportsFloatingPointTruncate()) {
- jit.moveZeroToDouble(SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&zeroConstant, SpecializedThunkJIT::fpRegT1);
doubleResult.append(jit.branchDouble(MacroAssembler::DoubleEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
SpecializedThunkJIT::JumpList slowPath;
// Handle the negative doubles in the slow path for now.
slowPath.append(jit.branchDouble(MacroAssembler::DoubleLessThanOrUnordered, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&halfConstant), SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&halfConstant, SpecializedThunkJIT::fpRegT1);
jit.addDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
slowPath.append(jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT1, SpecializedThunkJIT::regT0));
intResult = jit.jump();
@@ -871,7 +814,7 @@ MacroAssemblerCodeRef roundThunkGenerator(VM* vm)
jit.returnInt32(SpecializedThunkJIT::regT0);
doubleResult.link(&jit);
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "round");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "round");
}
MacroAssemblerCodeRef expThunkGenerator(VM* vm)
@@ -884,7 +827,7 @@ MacroAssemblerCodeRef expThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(exp));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "exp");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "exp");
}
MacroAssemblerCodeRef logThunkGenerator(VM* vm)
@@ -897,7 +840,7 @@ MacroAssemblerCodeRef logThunkGenerator(VM* vm)
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.callDoubleToDoublePreservingReturn(UnaryDoubleOpWrapper(log));
jit.returnDouble(SpecializedThunkJIT::fpRegT0);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "log");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "log");
}
MacroAssemblerCodeRef absThunkGenerator(VM* vm)
@@ -910,14 +853,14 @@ MacroAssemblerCodeRef absThunkGenerator(VM* vm)
jit.rshift32(SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(31), SpecializedThunkJIT::regT1);
jit.add32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT0);
- jit.appendFailure(jit.branchTest32(MacroAssembler::Signed, SpecializedThunkJIT::regT0));
+ jit.appendFailure(jit.branch32(MacroAssembler::Equal, SpecializedThunkJIT::regT0, MacroAssembler::TrustedImm32(1 << 31)));
jit.returnInt32(SpecializedThunkJIT::regT0);
nonIntJump.link(&jit);
// Shame about the double int conversion here.
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.absDouble(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1);
jit.returnDouble(SpecializedThunkJIT::fpRegT1);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "abs");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "abs");
}
MacroAssemblerCodeRef powThunkGenerator(VM* vm)
@@ -926,7 +869,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
if (!jit.supportsFloatingPoint())
return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&oneConstant), SpecializedThunkJIT::fpRegT1);
+ jit.loadDouble(&oneConstant, SpecializedThunkJIT::fpRegT1);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
MacroAssembler::Jump nonIntExponent;
jit.loadInt32Argument(1, SpecializedThunkJIT::regT0, nonIntExponent);
@@ -954,7 +897,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
if (jit.supportsFloatingPointSqrt()) {
nonIntExponent.link(&jit);
- jit.loadDouble(MacroAssembler::TrustedImmPtr(&negativeHalfConstant), SpecializedThunkJIT::fpRegT3);
+ jit.loadDouble(&negativeHalfConstant, SpecializedThunkJIT::fpRegT3);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::regT0);
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleLessThanOrEqual, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::fpRegT1));
jit.appendFailure(jit.branchDouble(MacroAssembler::DoubleNotEqualOrUnordered, SpecializedThunkJIT::fpRegT2, SpecializedThunkJIT::fpRegT3));
@@ -969,7 +912,7 @@ MacroAssemblerCodeRef powThunkGenerator(VM* vm)
} else
jit.appendFailure(nonIntExponent);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "pow");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "pow");
}
MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
@@ -988,7 +931,8 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg0Jump.link(&jit);
jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg0, &jit);
- jit.appendFailure(jit.jump());
+ jit.xor32(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0);
+ jit.jump(doneLoadingArg0);
} else
jit.appendFailure(nonIntArg0Jump);
@@ -996,29 +940,117 @@ MacroAssemblerCodeRef imulThunkGenerator(VM* vm)
nonIntArg1Jump.link(&jit);
jit.loadDoubleArgument(1, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1);
jit.branchTruncateDoubleToInt32(SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::BranchIfTruncateSuccessful).linkTo(doneLoadingArg1, &jit);
- jit.appendFailure(jit.jump());
+ jit.xor32(SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT1);
+ jit.jump(doneLoadingArg1);
} else
jit.appendFailure(nonIntArg1Jump);
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "imul");
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "imul");
}
-MacroAssemblerCodeRef randomThunkGenerator(VM* vm)
+static MacroAssemblerCodeRef arrayIteratorNextThunkGenerator(VM* vm, ArrayIterationKind kind)
{
- SpecializedThunkJIT jit(vm, 0);
- if (!jit.supportsFloatingPoint())
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+ typedef SpecializedThunkJIT::TrustedImm32 TrustedImm32;
+ typedef SpecializedThunkJIT::TrustedImmPtr TrustedImmPtr;
+ typedef SpecializedThunkJIT::Address Address;
+ typedef SpecializedThunkJIT::BaseIndex BaseIndex;
+ typedef SpecializedThunkJIT::Jump Jump;
+
+ SpecializedThunkJIT jit(vm);
+ // Make sure we're being called on an array iterator, and load m_iteratedObject, and m_nextIndex into regT0 and regT1 respectively
+ jit.loadArgumentWithSpecificClass(JSArrayIterator::info(), SpecializedThunkJIT::ThisArgument, SpecializedThunkJIT::regT4, SpecializedThunkJIT::regT1);
-#if USE(JSVALUE64)
- jit.emitRandomThunk(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1, SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT3, SpecializedThunkJIT::fpRegT0);
- jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+ // Early exit if we don't have a thunk for this form of iteration
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIterationKind()), TrustedImm32(ArrayIterateKeyValue)));
+
+ jit.loadPtr(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfIteratedObject()), SpecializedThunkJIT::regT0);
+
+ jit.load32(Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()), SpecializedThunkJIT::regT1);
+
+ // Pull out the butterfly from iteratedObject
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSCell::structureOffset()), SpecializedThunkJIT::regT2);
+
+ jit.load8(Address(SpecializedThunkJIT::regT2, Structure::indexingTypeOffset()), SpecializedThunkJIT::regT3);
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ Jump nullButterfly = jit.branchTestPtr(SpecializedThunkJIT::Zero, SpecializedThunkJIT::regT2);
+
+ Jump notDone = jit.branch32(SpecializedThunkJIT::Below, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfPublicLength()));
- return jit.finalize(vm->jitStubs->ctiNativeTailCall(vm), "random");
+ nullButterfly.link(&jit);
+
+ // Return the termination signal to indicate that we've finished
+ jit.move(TrustedImmPtr(vm->iterationTerminator.get()), SpecializedThunkJIT::regT0);
+ jit.returnJSCell(SpecializedThunkJIT::regT0);
+
+ notDone.link(&jit);
+
+ if (kind == ArrayIterateKey) {
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnInt32(SpecializedThunkJIT::regT1);
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-key");
+
+ }
+ ASSERT(kind == ArrayIterateValue);
+
+ // Okay, now we're returning a value so make sure we're inside the vector size
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::AboveOrEqual, SpecializedThunkJIT::regT1, Address(SpecializedThunkJIT::regT2, Butterfly::offsetOfVectorLength())));
+
+ // So now we perform inline loads for int32, value/undecided, and double storage
+ Jump undecidedStorage = jit.branch32(SpecializedThunkJIT::Equal, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithUndecided));
+ Jump notContiguousStorage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithContiguous));
+
+ undecidedStorage.link(&jit);
+
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+
+#if USE(JSVALUE64)
+ jit.load64(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::regT0);
+ Jump notHole = jit.branchTest64(SpecializedThunkJIT::NonZero, SpecializedThunkJIT::regT0);
+ jit.move(JSInterfaceJIT::TrustedImm64(ValueUndefined), JSInterfaceJIT::regT0);
+ notHole.link(&jit);
+ jit.addPtr(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnJSValue(SpecializedThunkJIT::regT0);
#else
- return MacroAssemblerCodeRef::createSelfManagedCodeRef(vm->jitStubs->ctiNativeCall(vm));
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfTag()), SpecializedThunkJIT::regT3);
+ Jump notHole = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(JSValue::EmptyValueTag));
+ jit.move(JSInterfaceJIT::TrustedImm32(JSValue::UndefinedTag), JSInterfaceJIT::regT1);
+ jit.move(JSInterfaceJIT::TrustedImm32(0), JSInterfaceJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnJSValue(SpecializedThunkJIT::regT0, JSInterfaceJIT::regT1);
+ notHole.link(&jit);
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.move(SpecializedThunkJIT::regT3, SpecializedThunkJIT::regT1);
+ jit.returnJSValue(SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1);
#endif
+ notContiguousStorage.link(&jit);
+
+ Jump notInt32Storage = jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithInt32));
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ jit.load32(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight, JSValue::offsetOfPayload()), SpecializedThunkJIT::regT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnInt32(SpecializedThunkJIT::regT0);
+ notInt32Storage.link(&jit);
+
+ jit.appendFailure(jit.branch32(SpecializedThunkJIT::NotEqual, SpecializedThunkJIT::regT3, TrustedImm32(ArrayWithDouble)));
+ jit.loadPtr(Address(SpecializedThunkJIT::regT0, JSObject::butterflyOffset()), SpecializedThunkJIT::regT2);
+ jit.loadDouble(BaseIndex(SpecializedThunkJIT::regT2, SpecializedThunkJIT::regT1, SpecializedThunkJIT::TimesEight), SpecializedThunkJIT::fpRegT0);
+ jit.add32(TrustedImm32(1), Address(SpecializedThunkJIT::regT4, JSArrayIterator::offsetOfNextIndex()));
+ jit.returnDouble(SpecializedThunkJIT::fpRegT0);
+
+ return jit.finalize(vm->jitStubs->ctiNativeCall(vm), "array-iterator-next-value");
}
+MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM* vm)
+{
+ return arrayIteratorNextThunkGenerator(vm, ArrayIterateKey);
+}
+
+MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM* vm)
+{
+ return arrayIteratorNextThunkGenerator(vm, ArrayIterateValue);
+}
+
}
#endif // ENABLE(JIT)
diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.h b/Source/JavaScriptCore/jit/ThunkGenerators.h
index 9fb8abb44..0e2762890 100644
--- a/Source/JavaScriptCore/jit/ThunkGenerators.h
+++ b/Source/JavaScriptCore/jit/ThunkGenerators.h
@@ -26,32 +26,27 @@
#ifndef ThunkGenerators_h
#define ThunkGenerators_h
-#include "CodeSpecializationKind.h"
#include "ThunkGenerator.h"
#if ENABLE(JIT)
namespace JSC {
-class CallLinkInfo;
-class CCallHelpers;
-
MacroAssemblerCodeRef throwExceptionFromCallSlowPathGenerator(VM*);
-MacroAssemblerCodeRef linkCallThunk(VM*, CallLinkInfo&, CodeSpecializationKind);
MacroAssemblerCodeRef linkCallThunkGenerator(VM*);
-MacroAssemblerCodeRef linkPolymorphicCallThunkGenerator(VM*);
+MacroAssemblerCodeRef linkConstructThunkGenerator(VM*);
+
+MacroAssemblerCodeRef linkClosureCallThunkGenerator(VM*);
-MacroAssemblerCodeRef virtualThunkFor(VM*, CallLinkInfo&);
+MacroAssemblerCodeRef virtualCallThunkGenerator(VM*);
+MacroAssemblerCodeRef virtualConstructThunkGenerator(VM*);
MacroAssemblerCodeRef nativeCallGenerator(VM*);
MacroAssemblerCodeRef nativeConstructGenerator(VM*);
-MacroAssemblerCodeRef nativeTailCallGenerator(VM*);
-MacroAssemblerCodeRef arityFixupGenerator(VM*);
-MacroAssemblerCodeRef unreachableGenerator(VM*);
+MacroAssemblerCodeRef arityFixup(VM*);
MacroAssemblerCodeRef charCodeAtThunkGenerator(VM*);
MacroAssemblerCodeRef charAtThunkGenerator(VM*);
-MacroAssemblerCodeRef clz32ThunkGenerator(VM*);
MacroAssemblerCodeRef fromCharCodeThunkGenerator(VM*);
MacroAssemblerCodeRef absThunkGenerator(VM*);
MacroAssemblerCodeRef ceilThunkGenerator(VM*);
@@ -62,7 +57,8 @@ MacroAssemblerCodeRef roundThunkGenerator(VM*);
MacroAssemblerCodeRef sqrtThunkGenerator(VM*);
MacroAssemblerCodeRef powThunkGenerator(VM*);
MacroAssemblerCodeRef imulThunkGenerator(VM*);
-MacroAssemblerCodeRef randomThunkGenerator(VM*);
+MacroAssemblerCodeRef arrayIteratorNextKeyThunkGenerator(VM*);
+MacroAssemblerCodeRef arrayIteratorNextValueThunkGenerator(VM*);
}
#endif // ENABLE(JIT)