summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2017-06-27 06:07:23 +0000
commit1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch)
tree46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
parent32761a6cee1d0dee366b885b7b9c777e67885688 (diff)
downloadWebKitGtk-tarball-master.tar.gz
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGJITCompiler.cpp')
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp431
1 files changed, 338 insertions, 93 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 2934d2ba9..db5e525ff 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -39,9 +39,12 @@
#include "DFGSlowPathGenerator.h"
#include "DFGSpeculativeJIT.h"
#include "DFGThunks.h"
+#include "JSCInlines.h"
#include "JSCJSValueInlines.h"
-#include "VM.h"
#include "LinkBuffer.h"
+#include "MaxFrameExtentForSlowPathCall.h"
+#include "StructureStubInfo.h"
+#include "VM.h"
namespace JSC { namespace DFG {
@@ -50,9 +53,15 @@ JITCompiler::JITCompiler(Graph& dfg)
, m_graph(dfg)
, m_jitCode(adoptRef(new JITCode()))
, m_blockHeads(dfg.numBlocks())
+ , m_pcToCodeOriginMapBuilder(dfg.m_vm)
{
- if (shouldShowDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
- m_disassembler = adoptPtr(new Disassembler(dfg));
+ if (shouldDumpDisassembly() || m_graph.m_vm.m_perBytecodeProfiler)
+ m_disassembler = std::make_unique<Disassembler>(dfg);
+#if ENABLE(FTL_JIT)
+ m_jitCode->tierUpInLoopHierarchy = WTFMove(m_graph.m_plan.tierUpInLoopHierarchy);
+ for (unsigned tierUpBytecode : m_graph.m_plan.tierUpAndOSREnterBytecodes)
+ m_jitCode->tierUpEntryTriggers.add(tierUpBytecode, JITCode::TriggerReason::DontTrigger);
+#endif
}
JITCompiler::~JITCompiler()
@@ -83,6 +92,7 @@ void JITCompiler::linkOSRExits()
failureJumps.link(this);
else
info.m_replacementDestination = label();
+
jitAssertHasValidCallFrame();
store32(TrustedImm32(i), &vm()->osrExitIndex);
exit.setPatchableCodeOffset(patchableJump());
@@ -92,15 +102,27 @@ void JITCompiler::linkOSRExits()
void JITCompiler::compileEntry()
{
// This code currently matches the old JIT. In the function header we need to
- // pop the return address (since we do not allow any recursion on the machine
- // stack), and perform a fast stack check.
+ // save return address and call frame via the prologue and perform a fast stack check.
// FIXME: https://bugs.webkit.org/show_bug.cgi?id=56292
// We'll need to convert the remaining cti_ style calls (specifically the stack
// check) which will be dependent on stack layout. (We'd need to account for this in
// both normal return code and when jumping to an exception handler).
- preserveReturnAddressAfterCall(GPRInfo::regT2);
- emitPutReturnPCToCallFrameHeader(GPRInfo::regT2);
- emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock);
+ emitFunctionPrologue();
+ emitPutToCallFrameHeader(m_codeBlock, CallFrameSlot::codeBlock);
+}
+
+void JITCompiler::compileSetupRegistersForEntry()
+{
+ emitSaveCalleeSaves();
+ emitMaterializeTagCheckRegisters();
+}
+
+void JITCompiler::compileEntryExecutionFlag()
+{
+#if ENABLE(FTL_JIT)
+ if (m_graph.m_plan.canTierUpAndOSREnter())
+ store8(TrustedImm32(0), &m_jitCode->neverExecutedEntry);
+#endif // ENABLE(FTL_JIT)
}
void JITCompiler::compileBody()
@@ -114,32 +136,44 @@ void JITCompiler::compileBody()
void JITCompiler::compileExceptionHandlers()
{
- if (m_exceptionChecks.empty() && m_exceptionChecksWithCallFrameRollback.empty())
- return;
-
- Jump doLookup;
-
if (!m_exceptionChecksWithCallFrameRollback.empty()) {
m_exceptionChecksWithCallFrameRollback.link(this);
- emitGetCallerFrameFromCallFrameHeaderPtr(GPRInfo::argumentGPR0);
- doLookup = jump();
+
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
+
+ // lookupExceptionHandlerFromCallerFrame is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+
+#if CPU(X86)
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
+#endif
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandlerFromCallerFrame));
+
+ jumpToExceptionHandler();
}
- if (!m_exceptionChecks.empty())
+ if (!m_exceptionChecks.empty()) {
m_exceptionChecks.link(this);
- // lookupExceptionHandler is passed one argument, the exec (the CallFrame*).
- move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ copyCalleeSavesToVMEntryFrameCalleeSavesBuffer();
- if (doLookup.isSet())
- doLookup.link(this);
+ // lookupExceptionHandler is passed two arguments, the VM and the exec (the CallFrame*).
+ move(TrustedImmPtr(vm()), GPRInfo::argumentGPR0);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR1);
#if CPU(X86)
- // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
- poke(GPRInfo::argumentGPR0);
+ // FIXME: should use the call abstraction, but this is currently in the SpeculativeJIT layer!
+ poke(GPRInfo::argumentGPR0);
+ poke(GPRInfo::argumentGPR1, 1);
#endif
- m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
- jumpToExceptionHandler();
+ m_calls.append(CallLinkRecord(call(), lookupExceptionHandler));
+
+ jumpToExceptionHandler();
+ }
}
void JITCompiler::link(LinkBuffer& linkBuffer)
@@ -148,15 +182,18 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
m_jitCode->common.frameRegisterCount = m_graph.frameRegisterCount();
m_jitCode->common.requiredRegisterCountForExit = m_graph.requiredRegisterCountForExit();
- if (!m_graph.m_inlineCallFrames->isEmpty())
- m_jitCode->common.inlineCallFrames = m_graph.m_inlineCallFrames.release();
+ if (!m_graph.m_plan.inlineCallFrames->isEmpty())
+ m_jitCode->common.inlineCallFrames = m_graph.m_plan.inlineCallFrames;
+
+#if USE(JSVALUE32_64)
+ m_jitCode->common.doubleConstants = WTFMove(m_graph.m_doubleConstants);
+#endif
- m_jitCode->common.machineCaptureStart = m_graph.m_machineCaptureStart;
- m_jitCode->common.slowArguments = std::move(m_graph.m_slowArguments);
+ m_graph.registerFrozenValues();
BitVector usedJumpTables;
- for (unsigned i = m_graph.m_switchData.size(); i--;) {
- SwitchData& data = m_graph.m_switchData[i];
+ for (Bag<SwitchData>::iterator iter = m_graph.m_switchData.begin(); !!iter; ++iter) {
+ SwitchData& data = **iter;
if (!data.didUseJumpTable)
continue;
@@ -167,14 +204,14 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
usedJumpTables.set(data.switchTableIndex);
SimpleJumpTable& table = m_codeBlock->switchJumpTable(data.switchTableIndex);
- table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
table.ctiOffsets.grow(table.branchOffsets.size());
for (unsigned j = table.ctiOffsets.size(); j--;)
table.ctiOffsets[j] = table.ctiDefault;
for (unsigned j = data.cases.size(); j--;) {
SwitchCase& myCase = data.cases[j];
- table.ctiOffsets[myCase.value.switchLookupValue() - table.min] =
- linkBuffer.locationOf(m_blockHeads[myCase.target->index]);
+ table.ctiOffsets[myCase.value.switchLookupValue(data.kind) - table.min] =
+ linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
}
}
@@ -188,8 +225,8 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
// NOTE: we cannot clear string switch tables because (1) we're running concurrently
// and we cannot deref StringImpl's and (2) it would be weird to deref those
// StringImpl's since we refer to them.
- for (unsigned i = m_graph.m_switchData.size(); i--;) {
- SwitchData& data = m_graph.m_switchData[i];
+ for (Bag<SwitchData>::iterator switchDataIter = m_graph.m_switchData.begin(); !!switchDataIter; ++switchDataIter) {
+ SwitchData& data = **switchDataIter;
if (!data.didUseJumpTable)
continue;
@@ -197,7 +234,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
continue;
StringJumpTable& table = m_codeBlock->stringSwitchJumpTable(data.switchTableIndex);
- table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough->index]);
+ table.ctiDefault = linkBuffer.locationOf(m_blockHeads[data.fallThrough.block->index]);
StringJumpTable::StringOffsetTable::iterator iter;
StringJumpTable::StringOffsetTable::iterator end = table.offsetTable.end();
for (iter = table.offsetTable.begin(); iter != end; ++iter)
@@ -206,7 +243,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
SwitchCase& myCase = data.cases[j];
iter = table.offsetTable.find(myCase.value.stringImpl());
RELEASE_ASSERT(iter != end);
- iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target->index]);
+ iter->value.ctiOffset = linkBuffer.locationOf(m_blockHeads[myCase.target.block->index]);
}
}
@@ -221,24 +258,46 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
for (unsigned i = 0; i < m_ins.size(); ++i) {
StructureStubInfo& info = *m_ins[i].m_stubInfo;
- CodeLocationCall callReturnLocation = linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call());
- info.patch.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_done));
- info.patch.deltaCallToJump = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_jump));
- info.callReturnLocation = callReturnLocation;
- info.patch.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
+
+ CodeLocationLabel start = linkBuffer.locationOf(m_ins[i].m_jump);
+ info.patch.start = start;
+
+ ptrdiff_t inlineSize = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(m_ins[i].m_done));
+ RELEASE_ASSERT(inlineSize >= 0);
+ info.patch.inlineSize = inlineSize;
+
+ info.patch.deltaFromStartToSlowPathCallLocation = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->call()));
+
+ info.patch.deltaFromStartToSlowPathStart = MacroAssembler::differenceBetweenCodePtr(
+ start, linkBuffer.locationOf(m_ins[i].m_slowPathGenerator->label()));
+ }
+
+ for (auto& record : m_jsCalls) {
+ CallLinkInfo& info = *record.info;
+ linkBuffer.link(record.slowCall, FunctionPtr(m_vm->getCTIStub(linkCallThunkGenerator).code().executableAddress()));
+ info.setCallLocations(
+ CodeLocationLabel(linkBuffer.locationOfNearCall(record.slowCall)),
+ CodeLocationLabel(linkBuffer.locationOf(record.targetToCheck)),
+ linkBuffer.locationOfNearCall(record.fastCall));
}
- m_codeBlock->setNumberOfCallLinkInfos(m_jsCalls.size());
- for (unsigned i = 0; i < m_jsCalls.size(); ++i) {
- CallLinkInfo& info = m_codeBlock->callLinkInfo(i);
- info.callType = m_jsCalls[i].m_callType;
- info.isDFG = true;
- info.codeOrigin = m_jsCalls[i].m_codeOrigin;
- linkBuffer.link(m_jsCalls[i].m_slowCall, FunctionPtr((m_vm->getCTIStub(info.callType == CallLinkInfo::Construct ? linkConstructThunkGenerator : linkCallThunkGenerator)).code().executableAddress()));
- info.callReturnLocation = linkBuffer.locationOfNearCall(m_jsCalls[i].m_slowCall);
- info.hotPathBegin = linkBuffer.locationOf(m_jsCalls[i].m_targetToCheck);
- info.hotPathOther = linkBuffer.locationOfNearCall(m_jsCalls[i].m_fastCall);
- info.calleeGPR = static_cast<unsigned>(m_jsCalls[i].m_callee);
+ for (JSDirectCallRecord& record : m_jsDirectCalls) {
+ CallLinkInfo& info = *record.info;
+ linkBuffer.link(record.call, linkBuffer.locationOf(record.slowPath));
+ info.setCallLocations(
+ CodeLocationLabel(),
+ linkBuffer.locationOf(record.slowPath),
+ linkBuffer.locationOfNearCall(record.call));
+ }
+
+ for (JSDirectTailCallRecord& record : m_jsDirectTailCalls) {
+ CallLinkInfo& info = *record.info;
+ info.setCallLocations(
+ linkBuffer.locationOf(record.patchableJump),
+ linkBuffer.locationOf(record.slowPath),
+ linkBuffer.locationOfNearCall(record.call));
}
MacroAssemblerCodeRef osrExitThunk = vm()->getCTIStub(osrExitGenerationThunkGenerator);
@@ -266,23 +325,65 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
}
} else
ASSERT(!m_exitSiteLabels.size());
-
+
m_jitCode->common.compilation = m_graph.compilation();
+ // Link new DFG exception handlers and remove baseline JIT handlers.
+ m_codeBlock->clearExceptionHandlers();
+ for (unsigned i = 0; i < m_exceptionHandlerOSRExitCallSites.size(); i++) {
+ OSRExitCompilationInfo& info = m_exceptionHandlerOSRExitCallSites[i].exitInfo;
+ if (info.m_replacementDestination.isSet()) {
+ // If this is is *not* set, it means that we already jumped to the OSR exit in pure generated control flow.
+ // i.e, we explicitly emitted an exceptionCheck that we know will be caught in this machine frame.
+ // If this *is set*, it means we will be landing at this code location from genericUnwind from an
+ // exception thrown in a child call frame.
+ CodeLocationLabel catchLabel = linkBuffer.locationOf(info.m_replacementDestination);
+ HandlerInfo newExceptionHandler = m_exceptionHandlerOSRExitCallSites[i].baselineExceptionHandler;
+ CallSiteIndex callSite = m_exceptionHandlerOSRExitCallSites[i].callSiteIndex;
+ newExceptionHandler.start = callSite.bits();
+ newExceptionHandler.end = callSite.bits() + 1;
+ newExceptionHandler.nativeCode = catchLabel;
+ m_codeBlock->appendExceptionHandler(newExceptionHandler);
+ }
+ }
+
+ if (m_pcToCodeOriginMapBuilder.didBuildMapping())
+ m_codeBlock->setPCToCodeOriginMap(std::make_unique<PCToCodeOriginMap>(WTFMove(m_pcToCodeOriginMapBuilder), linkBuffer));
}
void JITCompiler::compile()
{
- SamplingRegion samplingRegion("DFG Backend");
-
setStartOfCode();
compileEntry();
- m_speculative = adoptPtr(new SpeculativeJIT(*this));
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
+
+ // Plant a check that sufficient space is available in the JSStack.
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), GPRInfo::regT1);
+
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+ compileSetupRegistersForEntry();
+ compileEntryExecutionFlag();
compileBody();
setEndOfMainPath();
+ // === Footer code generation ===
+ //
+ // Generate the stack overflow handling; if the stack check in the entry head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
+
+ emitStoreCodeOrigin(CodeOrigin(0));
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
+
// Generate slow path code.
- m_speculative->runSlowPathGenerators();
+ m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
+ m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
compileExceptionHandlers();
linkOSRExits();
@@ -290,32 +391,27 @@ void JITCompiler::compile()
// Create OSR entry trampolines if necessary.
m_speculative->createOSREntries();
setEndOfCode();
-}
-void JITCompiler::link()
-{
- OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
return;
}
link(*linkBuffer);
m_speculative->linkOSREntries(*linkBuffer);
-
+
m_jitCode->shrinkToFit();
codeBlock()->shrinkToFit(CodeBlock::LateShrink);
disassemble(*linkBuffer);
- m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
- m_graph.m_plan, m_jitCode.release(), linkBuffer.release()));
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer));
}
void JITCompiler::compileFunction()
{
- SamplingRegion samplingRegion("DFG Backend");
-
setStartOfCode();
compileEntry();
@@ -325,30 +421,36 @@ void JITCompiler::compileFunction()
// so enter after this.
Label fromArityCheck(this);
// Plant a check that sufficient space is available in the JSStack.
- addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit()).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
- Jump stackCheck = branchPtr(Above, AbsoluteAddress(m_vm->addressOfJSStackLimit()), GPRInfo::regT1);
- // Return here after stack check.
- Label fromStackCheck = label();
+ addPtr(TrustedImm32(virtualRegisterForLocal(m_graph.requiredRegisterCountForExecutionAndExit() - 1).offset() * sizeof(Register)), GPRInfo::callFrameRegister, GPRInfo::regT1);
+ Jump stackOverflow = branchPtr(Above, AbsoluteAddress(m_vm->addressOfSoftStackLimit()), GPRInfo::regT1);
+
+ // Move the stack pointer down to accommodate locals
+ addPtr(TrustedImm32(m_graph.stackPointerOffset() * sizeof(Register)), GPRInfo::callFrameRegister, stackPointerRegister);
+ checkStackPointerAlignment();
+ compileSetupRegistersForEntry();
+ compileEntryExecutionFlag();
// === Function body code generation ===
- m_speculative = adoptPtr(new SpeculativeJIT(*this));
+ m_speculative = std::make_unique<SpeculativeJIT>(*this);
compileBody();
setEndOfMainPath();
// === Function footer code generation ===
//
- // Generate code to perform the slow stack check (if the fast one in
+ // Generate code to perform the stack overflow handling (if the stack check in
// the function header fails), and generate the entry point with arity check.
//
- // Generate the stack check; if the fast check in the function head fails,
- // we need to call out to a helper function to check whether more space is available.
- // FIXME: change this from a cti call to a DFG style operation (normal C calling conventions).
- stackCheck.link(this);
+ // Generate the stack overflow handling; if the stack check in the function head fails,
+ // we need to call out to a helper function to throw the StackOverflowError.
+ stackOverflow.link(this);
emitStoreCodeOrigin(CodeOrigin(0));
- m_speculative->callOperationWithCallFrameRollbackOnException(operationStackCheck, m_codeBlock);
- jump(fromStackCheck);
+
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
+
+ m_speculative->callOperationWithCallFrameRollbackOnException(operationThrowStackOverflowError, m_codeBlock);
// The fast entry point into a function does not check the correct number of arguments
// have been passed to the call (we only use the fast entry point where we can statically
@@ -358,17 +460,23 @@ void JITCompiler::compileFunction()
m_arityCheck = label();
compileEntry();
- load32(AssemblyHelpers::payloadFor((VirtualRegister)JSStack::ArgumentCount), GPRInfo::regT1);
+ load32(AssemblyHelpers::payloadFor((VirtualRegister)CallFrameSlot::argumentCount), GPRInfo::regT1);
branch32(AboveOrEqual, GPRInfo::regT1, TrustedImm32(m_codeBlock->numParameters())).linkTo(fromArityCheck, this);
emitStoreCodeOrigin(CodeOrigin(0));
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(-maxFrameExtentForSlowPathCall), stackPointerRegister);
m_speculative->callOperationWithCallFrameRollbackOnException(m_codeBlock->m_isConstructor ? operationConstructArityCheck : operationCallArityCheck, GPRInfo::regT0);
- branchTest32(Zero, GPRInfo::regT0).linkTo(fromArityCheck, this);
+ if (maxFrameExtentForSlowPathCall)
+ addPtr(TrustedImm32(maxFrameExtentForSlowPathCall), stackPointerRegister);
+ branchTest32(Zero, GPRInfo::returnValueGPR).linkTo(fromArityCheck, this);
emitStoreCodeOrigin(CodeOrigin(0));
+ move(GPRInfo::returnValueGPR, GPRInfo::argumentGPR0);
m_callArityFixup = call();
jump(fromArityCheck);
// Generate slow path code.
- m_speculative->runSlowPathGenerators();
+ m_speculative->runSlowPathGenerators(m_pcToCodeOriginMapBuilder);
+ m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
compileExceptionHandlers();
linkOSRExits();
@@ -376,14 +484,11 @@ void JITCompiler::compileFunction()
// Create OSR entry trampolines if necessary.
m_speculative->createOSREntries();
setEndOfCode();
-}
-void JITCompiler::linkFunction()
-{
// === Link ===
- OwnPtr<LinkBuffer> linkBuffer = adoptPtr(new LinkBuffer(*m_vm, this, m_codeBlock, JITCompilationCanFail));
+ auto linkBuffer = std::make_unique<LinkBuffer>(*m_vm, *this, m_codeBlock, JITCompilationCanFail);
if (linkBuffer->didFailToAllocate()) {
- m_graph.m_plan.finalizer = adoptPtr(new FailedFinalizer(m_graph.m_plan));
+ m_graph.m_plan.finalizer = std::make_unique<FailedFinalizer>(m_graph.m_plan);
return;
}
link(*linkBuffer);
@@ -392,25 +497,165 @@ void JITCompiler::linkFunction()
m_jitCode->shrinkToFit();
codeBlock()->shrinkToFit(CodeBlock::LateShrink);
- linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixup)).code().executableAddress()));
+ linkBuffer->link(m_callArityFixup, FunctionPtr((m_vm->getCTIStub(arityFixupGenerator)).code().executableAddress()));
disassemble(*linkBuffer);
MacroAssemblerCodePtr withArityCheck = linkBuffer->locationOf(m_arityCheck);
- m_graph.m_plan.finalizer = adoptPtr(new JITFinalizer(
- m_graph.m_plan, m_jitCode.release(), linkBuffer.release(), withArityCheck));
+ m_graph.m_plan.finalizer = std::make_unique<JITFinalizer>(
+ m_graph.m_plan, m_jitCode.releaseNonNull(), WTFMove(linkBuffer), withArityCheck);
}
void JITCompiler::disassemble(LinkBuffer& linkBuffer)
{
- if (shouldShowDisassembly())
+ if (shouldDumpDisassembly()) {
m_disassembler->dump(linkBuffer);
+ linkBuffer.didAlreadyDisassemble();
+ }
if (m_graph.m_plan.compilation)
m_disassembler->reportToProfiler(m_graph.m_plan.compilation.get(), linkBuffer);
}
+#if USE(JSVALUE32_64)
+void* JITCompiler::addressOfDoubleConstant(Node* node)
+{
+ double value = node->asNumber();
+ int64_t valueBits = bitwise_cast<int64_t>(value);
+ auto it = m_graph.m_doubleConstantsMap.find(valueBits);
+ if (it != m_graph.m_doubleConstantsMap.end())
+ return it->second;
+
+ if (!m_graph.m_doubleConstants)
+ m_graph.m_doubleConstants = std::make_unique<Bag<double>>();
+
+ double* addressInConstantPool = m_graph.m_doubleConstants->add();
+ *addressInConstantPool = value;
+ m_graph.m_doubleConstantsMap[valueBits] = addressInConstantPool;
+ return addressInConstantPool;
+}
+#endif
+
+void JITCompiler::noticeOSREntry(BasicBlock& basicBlock, JITCompiler::Label blockHead, LinkBuffer& linkBuffer)
+{
+ // OSR entry is not allowed into blocks deemed unreachable by control flow analysis.
+ if (!basicBlock.intersectionOfCFAHasVisited)
+ return;
+
+ OSREntryData* entry = m_jitCode->appendOSREntryData(basicBlock.bytecodeBegin, linkBuffer.offsetOf(blockHead));
+
+ entry->m_expectedValues = basicBlock.intersectionOfPastValuesAtHead;
+
+ // Fix the expected values: in our protocol, a dead variable will have an expected
+ // value of (None, []). But the old JIT may stash some values there. So we really
+ // need (Top, TOP).
+ for (size_t argument = 0; argument < basicBlock.variablesAtHead.numberOfArguments(); ++argument) {
+ Node* node = basicBlock.variablesAtHead.argument(argument);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.argument(argument).makeHeapTop();
+ }
+ for (size_t local = 0; local < basicBlock.variablesAtHead.numberOfLocals(); ++local) {
+ Node* node = basicBlock.variablesAtHead.local(local);
+ if (!node || !node->shouldGenerate())
+ entry->m_expectedValues.local(local).makeHeapTop();
+ else {
+ VariableAccessData* variable = node->variableAccessData();
+ entry->m_machineStackUsed.set(variable->machineLocal().toLocal());
+
+ switch (variable->flushFormat()) {
+ case FlushedDouble:
+ entry->m_localsForcedDouble.set(local);
+ break;
+ case FlushedInt52:
+ entry->m_localsForcedAnyInt.set(local);
+ break;
+ default:
+ break;
+ }
+
+ if (variable->local() != variable->machineLocal()) {
+ entry->m_reshufflings.append(
+ OSREntryReshuffling(
+ variable->local().offset(), variable->machineLocal().offset()));
+ }
+ }
+ }
+
+ entry->m_reshufflings.shrinkToFit();
+}
+
+void JITCompiler::appendExceptionHandlingOSRExit(ExitKind kind, unsigned eventStreamIndex, CodeOrigin opCatchOrigin, HandlerInfo* exceptionHandler, CallSiteIndex callSite, MacroAssembler::JumpList jumpsToFail)
+{
+ OSRExit exit(kind, JSValueRegs(), MethodOfGettingAValueProfile(), m_speculative.get(), eventStreamIndex);
+ exit.m_codeOrigin = opCatchOrigin;
+ exit.m_exceptionHandlerCallSiteIndex = callSite;
+ OSRExitCompilationInfo& exitInfo = appendExitInfo(jumpsToFail);
+ jitCode()->appendOSRExit(exit);
+ m_exceptionHandlerOSRExitCallSites.append(ExceptionHandlingOSRExitInfo { exitInfo, *exceptionHandler, callSite });
+}
+
+void JITCompiler::exceptionCheck()
+{
+ // It's important that we use origin.forExit here. Consider if we hoist string
+ // addition outside a loop, and that we exit at the point of that concatenation
+ // from an out of memory exception.
+ // If the original loop had a try/catch around string concatenation, if we "catch"
+ // that exception inside the loop, then the loops induction variable will be undefined
+ // in the OSR exit value recovery. It's more defensible for the string concatenation,
+ // then, to not be caught by the for loops' try/catch.
+ // Here is the program I'm speaking about:
+ //
+ // >>>> lets presume "c = a + b" gets hoisted here.
+ // for (var i = 0; i < length; i++) {
+ // try {
+ // c = a + b
+ // } catch(e) {
+ // If we threw an out of memory error, and we cought the exception
+ // right here, then "i" would almost certainly be undefined, which
+ // would make no sense.
+ // ...
+ // }
+ // }
+ CodeOrigin opCatchOrigin;
+ HandlerInfo* exceptionHandler;
+ bool willCatchException = m_graph.willCatchExceptionInMachineFrame(m_speculative->m_currentNode->origin.forExit, opCatchOrigin, exceptionHandler);
+ if (willCatchException) {
+ unsigned streamIndex = m_speculative->m_outOfLineStreamIndex ? *m_speculative->m_outOfLineStreamIndex : m_speculative->m_stream->size();
+ MacroAssembler::Jump hadException = emitNonPatchableExceptionCheck();
+ // We assume here that this is called after callOpeartion()/appendCall() is called.
+ appendExceptionHandlingOSRExit(ExceptionCheck, streamIndex, opCatchOrigin, exceptionHandler, m_jitCode->common.lastCallSite(), hadException);
+ } else
+ m_exceptionChecks.append(emitExceptionCheck());
+}
+
+CallSiteIndex JITCompiler::recordCallSiteAndGenerateExceptionHandlingOSRExitIfNeeded(const CodeOrigin& callSiteCodeOrigin, unsigned eventStreamIndex)
+{
+ CodeOrigin opCatchOrigin;
+ HandlerInfo* exceptionHandler;
+ bool willCatchException = m_graph.willCatchExceptionInMachineFrame(callSiteCodeOrigin, opCatchOrigin, exceptionHandler);
+ CallSiteIndex callSite = addCallSite(callSiteCodeOrigin);
+ if (willCatchException)
+ appendExceptionHandlingOSRExit(GenericUnwind, eventStreamIndex, opCatchOrigin, exceptionHandler, callSite);
+ return callSite;
+}
+
+void JITCompiler::setEndOfMainPath()
+{
+ m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), m_speculative->m_origin.semantic);
+ if (LIKELY(!m_disassembler))
+ return;
+ m_disassembler->setEndOfMainPath(labelIgnoringWatchpoints());
+}
+
+void JITCompiler::setEndOfCode()
+{
+ m_pcToCodeOriginMapBuilder.appendItem(labelIgnoringWatchpoints(), PCToCodeOriginMapBuilder::defaultCodeOrigin());
+ if (LIKELY(!m_disassembler))
+ return;
+ m_disassembler->setEndOfCode(labelIgnoringWatchpoints());
+}
+
} } // namespace JSC::DFG
#endif // ENABLE(DFG_JIT)