summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
diff options
context:
space:
mode:
authorLorry Tar Creator <lorry-tar-importer@lorry>2015-05-20 09:56:07 +0000
committerLorry Tar Creator <lorry-tar-importer@lorry>2015-05-20 09:56:07 +0000
commit41386e9cb918eed93b3f13648cbef387e371e451 (patch)
treea97f9d7bd1d9d091833286085f72da9d83fd0606 /Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
parente15dd966d523731101f70ccf768bba12435a0208 (diff)
downloadWebKitGtk-tarball-41386e9cb918eed93b3f13648cbef387e371e451.tar.gz
webkitgtk-2.4.9webkitgtk-2.4.9
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp')
-rw-r--r--Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp227
1 files changed, 153 insertions, 74 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
index 0851a58cf..9402d115e 100644
--- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2011, 2013-2015 Apple Inc. All rights reserved.
+ * Copyright (C) 2011, 2013 Apple Inc. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
@@ -31,25 +31,23 @@
#include "DFGOperations.h"
#include "DFGOSRExitCompilerCommon.h"
#include "DFGSpeculativeJIT.h"
-#include "JSCInlines.h"
+#include "Operations.h"
#include <wtf/DataLog.h>
namespace JSC { namespace DFG {
void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery)
{
- // Pro-forma stuff.
+ // 1) Pro-forma stuff.
if (Options::printEachOSRExit()) {
SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo;
debugInfo->codeBlock = m_jit.codeBlock();
- debugInfo->kind = exit.m_kind;
- debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex;
m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo);
}
- // Perform speculation recovery. This only comes into play when an operation
- // starts mutating state before verifying the speculation it has already made.
+ // 2) Perform speculation recovery. This only comes into play when an operation
+ // starts mutating state before verifying the speculation it has already made.
if (recovery) {
switch (recovery->type()) {
@@ -65,7 +63,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Refine some value profile, if appropriate.
+ // 3) Refine some value profile, if appropriate.
if (!!exit.m_jsValueSource) {
if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) {
@@ -102,8 +100,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2);
scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1);
+#if CPU(ARM64)
+ m_jit.pushToSave(scratch1);
+ m_jit.pushToSave(scratch2);
+#else
m_jit.push(scratch1);
m_jit.push(scratch2);
+#endif
GPRReg value;
if (exit.m_jsValueSource.isAddress()) {
@@ -112,15 +115,20 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
} else
value = exit.m_jsValueSource.payloadGPR();
- m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1);
- m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID());
+ m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1);
+ m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure());
m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1);
m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2);
m_jit.lshift32(scratch1, scratch2);
m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes()));
+#if CPU(ARM64)
+ m_jit.popToRestore(scratch2);
+ m_jit.popToRestore(scratch1);
+#else
m_jit.pop(scratch2);
m_jit.pop(scratch1);
+#endif
}
}
@@ -131,14 +139,22 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Save a register so we can use it.
GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base());
+#if CPU(ARM64)
+ m_jit.pushToSave(scratch);
+#else
m_jit.push(scratch);
+#endif
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch);
m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch);
m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
+#if CPU(ARM64)
+ m_jit.popToRestore(scratch);
+#else
m_jit.pop(scratch);
+#endif
} else if (exit.m_jsValueSource.hasKnownTag()) {
m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag);
m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload);
@@ -152,7 +168,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we
// do this simple approach.
- // Save all state from GPRs into the scratch buffer.
+ // 4) Save all state from GPRs into the scratch buffer.
ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size());
EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0;
@@ -185,7 +201,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Now all GPRs are free to reuse.
- // Save all state from FPRs into the scratch buffer.
+ // 5) Save all state from FPRs into the scratch buffer.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -203,9 +219,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
// Now all FPRs are free to reuse.
- // Save all state from the stack into the scratch buffer. For simplicity we
- // do this even for state that's already in the right place on the stack.
- // It makes things simpler later.
+ // 6) Save all state from the stack into the scratch buffer. For simplicity we
+ // do this even for state that's already in the right place on the stack.
+ // It makes things simpler later.
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -235,15 +251,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This
- // could toast some stack that the DFG used. We need to do it before storing to stack offsets
- // used by baseline.
- m_jit.addPtr(
- CCallHelpers::TrustedImm32(
- -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)),
- CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister);
+ // 7) Do all data format conversions and store the results into the stack.
- // Do all data format conversions and store the results into the stack.
+ bool haveArguments = false;
for (size_t index = 0; index < operands.size(); ++index) {
const ValueRecovery& recovery = operands[index];
@@ -251,7 +261,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
switch (recovery.technique()) {
case InPair:
+ case InFPR:
case DisplacedInJSStack:
+ case DoubleDisplacedInJSStack:
m_jit.load32(
&bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag,
GPRInfo::regT0);
@@ -266,14 +278,6 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
AssemblyHelpers::payloadFor(operand));
break;
- case InFPR:
- case DoubleDisplacedInJSStack:
- m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0);
- m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0);
- m_jit.purifyNaN(FPRInfo::fpRegT0);
- m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand));
- break;
-
case UnboxedInt32InGPR:
case Int32DisplacedInJSStack:
m_jit.load32(
@@ -322,9 +326,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
AssemblyHelpers::payloadFor(operand));
break;
- case DirectArgumentsThatWereNotCreated:
- case ClonedArgumentsThatWereNotCreated:
- // Don't do this, yet.
+ case ArgumentsThatWereNotCreated:
+ haveArguments = true;
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue().tag()),
+ AssemblyHelpers::tagFor(operand));
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue().payload()),
+ AssemblyHelpers::payloadFor(operand));
break;
default:
@@ -332,57 +341,127 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov
}
}
- // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments
- // recoveries don't recursively refer to each other. But, we don't try to assume that they only
- // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible.
- // Note that we also roughly assume that the arguments might still be materialized outside of its
- // inline call frame scope - but for now the DFG wouldn't do that.
-
- emitRestoreArguments(operands);
-
- // Adjust the old JIT's execute counter. Since we are exiting OSR, we know
- // that all new calls into this code will go to the new JIT, so the execute
- // counter only affects call frames that performed OSR exit and call frames
- // that were still executing the old JIT at the time of another call frame's
- // OSR exit. We want to ensure that the following is true:
+ // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know
+ // that all new calls into this code will go to the new JIT, so the execute
+ // counter only affects call frames that performed OSR exit and call frames
+ // that were still executing the old JIT at the time of another call frame's
+ // OSR exit. We want to ensure that the following is true:
//
- // (a) Code the performs an OSR exit gets a chance to reenter optimized
- // code eventually, since optimized code is faster. But we don't
- // want to do such reentery too aggressively (see (c) below).
+ // (a) Code the performs an OSR exit gets a chance to reenter optimized
+ // code eventually, since optimized code is faster. But we don't
+ // want to do such reentery too aggressively (see (c) below).
//
- // (b) If there is code on the call stack that is still running the old
- // JIT's code and has never OSR'd, then it should get a chance to
- // perform OSR entry despite the fact that we've exited.
+ // (b) If there is code on the call stack that is still running the old
+ // JIT's code and has never OSR'd, then it should get a chance to
+ // perform OSR entry despite the fact that we've exited.
//
- // (c) Code the performs an OSR exit should not immediately retry OSR
- // entry, since both forms of OSR are expensive. OSR entry is
- // particularly expensive.
+ // (c) Code the performs an OSR exit should not immediately retry OSR
+ // entry, since both forms of OSR are expensive. OSR entry is
+ // particularly expensive.
//
- // (d) Frequent OSR failures, even those that do not result in the code
- // running in a hot loop, result in recompilation getting triggered.
+ // (d) Frequent OSR failures, even those that do not result in the code
+ // running in a hot loop, result in recompilation getting triggered.
//
- // To ensure (c), we'd like to set the execute counter to
- // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
- // (a) and (b), since then every OSR exit would delay the opportunity for
- // every call frame to perform OSR entry. Essentially, if OSR exit happens
- // frequently and the function has few loops, then the counter will never
- // become non-negative and OSR entry will never be triggered. OSR entry
- // will only happen if a loop gets hot in the old JIT, which does a pretty
- // good job of ensuring (a) and (b). But that doesn't take care of (d),
- // since each speculation failure would reset the execute counter.
- // So we check here if the number of speculation failures is significantly
- // larger than the number of successes (we want 90% success rate), and if
- // there have been a large enough number of failures. If so, we set the
- // counter to 0; otherwise we set the counter to
- // counterValueForOptimizeAfterWarmUp().
+ // To ensure (c), we'd like to set the execute counter to
+ // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger
+ // (a) and (b), since then every OSR exit would delay the opportunity for
+ // every call frame to perform OSR entry. Essentially, if OSR exit happens
+ // frequently and the function has few loops, then the counter will never
+ // become non-negative and OSR entry will never be triggered. OSR entry
+ // will only happen if a loop gets hot in the old JIT, which does a pretty
+ // good job of ensuring (a) and (b). But that doesn't take care of (d),
+ // since each speculation failure would reset the execute counter.
+ // So we check here if the number of speculation failures is significantly
+ // larger than the number of successes (we want 90% success rate), and if
+ // there have been a large enough number of failures. If so, we set the
+ // counter to 0; otherwise we set the counter to
+ // counterValueForOptimizeAfterWarmUp().
handleExitCounts(m_jit, exit);
- // Reify inlined call frames.
+ // 9) Reify inlined call frames.
reifyInlinedCallFrames(m_jit, exit);
- // And finish.
+ // 10) Create arguments if necessary and place them into the appropriate aliased
+ // registers.
+
+ if (haveArguments) {
+ HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash,
+ NullableHashTraits<InlineCallFrame*>> didCreateArgumentsObject;
+
+ for (size_t index = 0; index < operands.size(); ++index) {
+ const ValueRecovery& recovery = operands[index];
+ if (recovery.technique() != ArgumentsThatWereNotCreated)
+ continue;
+ int operand = operands.operandForIndex(index);
+ // Find the right inline call frame.
+ InlineCallFrame* inlineCallFrame = 0;
+ for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame;
+ current;
+ current = current->caller.inlineCallFrame) {
+ if (current->stackOffset >= operand) {
+ inlineCallFrame = current;
+ break;
+ }
+ }
+
+ if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments())
+ continue;
+ VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame);
+ if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) {
+ // We know this call frame optimized out an arguments object that
+ // the baseline JIT would have created. Do that creation now.
+ if (inlineCallFrame) {
+ m_jit.setupArgumentsWithExecState(
+ AssemblyHelpers::TrustedImmPtr(inlineCallFrame));
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateInlinedArguments)),
+ GPRInfo::nonArgGPR0);
+ } else {
+ m_jit.setupArgumentsExecState();
+ m_jit.move(
+ AssemblyHelpers::TrustedImmPtr(
+ bitwise_cast<void*>(operationCreateArguments)),
+ GPRInfo::nonArgGPR0);
+ }
+ m_jit.call(GPRInfo::nonArgGPR0);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(argumentsRegister));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(argumentsRegister));
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.store32(
+ GPRInfo::returnValueGPR,
+ AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister)));
+ m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms.
+ }
+
+ m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0);
+ m_jit.store32(
+ AssemblyHelpers::TrustedImm32(JSValue::CellTag),
+ AssemblyHelpers::tagFor(operand));
+ m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand));
+ }
+ }
+
+#if ENABLE(GGC)
+ // 11) Write barrier the owner executable because we're jumping into a different block.
+ for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) {
+ CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin);
+ m_jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0);
+ SpeculativeJIT::osrWriteBarrier(m_jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2);
+ if (!codeOrigin.inlineCallFrame)
+ break;
+ }
+#endif
+
+ // 12) And finish.
adjustAndJumpToTarget(m_jit, exit);
}