diff options
author | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
---|---|---|
committer | Lorry Tar Creator <lorry-tar-importer@lorry> | 2017-06-27 06:07:23 +0000 |
commit | 1bf1084f2b10c3b47fd1a588d85d21ed0eb41d0c (patch) | |
tree | 46dcd36c86e7fbc6e5df36deb463b33e9967a6f7 /Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp | |
parent | 32761a6cee1d0dee366b885b7b9c777e67885688 (diff) | |
download | WebKitGtk-tarball-master.tar.gz |
webkitgtk-2.16.5HEADwebkitgtk-2.16.5master
Diffstat (limited to 'Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp')
-rw-r--r-- | Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp | 287 |
1 files changed, 114 insertions, 173 deletions
diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp index 9402d115e..79eff7684 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011, 2013 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2013-2016 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -31,29 +31,35 @@ #include "DFGOperations.h" #include "DFGOSRExitCompilerCommon.h" #include "DFGSpeculativeJIT.h" -#include "Operations.h" +#include "JSCInlines.h" #include <wtf/DataLog.h> namespace JSC { namespace DFG { void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery) { - // 1) Pro-forma stuff. + // Pro-forma stuff. if (Options::printEachOSRExit()) { SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; debugInfo->codeBlock = m_jit.codeBlock(); + debugInfo->kind = exit.m_kind; + debugInfo->bytecodeOffset = exit.m_codeOrigin.bytecodeIndex; m_jit.debugCall(debugOperationPrintSpeculationFailure, debugInfo); } - // 2) Perform speculation recovery. This only comes into play when an operation - // starts mutating state before verifying the speculation it has already made. + // Perform speculation recovery. This only comes into play when an operation + // starts mutating state before verifying the speculation it has already made. if (recovery) { switch (recovery->type()) { case SpeculativeAdd: m_jit.sub32(recovery->src(), recovery->dest()); break; + + case SpeculativeAddImmediate: + m_jit.sub32(AssemblyHelpers::Imm32(recovery->immediate()), recovery->dest()); + break; case BooleanSpeculationCheck: break; @@ -63,7 +69,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 3) Refine some value profile, if appropriate. + // Refine some value profile, if appropriate. if (!!exit.m_jsValueSource) { if (exit.m_kind == BadCache || exit.m_kind == BadIndexingType) { @@ -100,13 +106,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov scratch1 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2); scratch2 = AssemblyHelpers::selectScratchGPR(usedRegister1, usedRegister2, scratch1); -#if CPU(ARM64) - m_jit.pushToSave(scratch1); - m_jit.pushToSave(scratch2); -#else m_jit.push(scratch1); m_jit.push(scratch2); -#endif GPRReg value; if (exit.m_jsValueSource.isAddress()) { @@ -115,60 +116,49 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } else value = exit.m_jsValueSource.payloadGPR(); - m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureOffset()), scratch1); - m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructure()); - m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeOffset()), scratch1); + m_jit.loadPtr(AssemblyHelpers::Address(value, JSCell::structureIDOffset()), scratch1); + m_jit.storePtr(scratch1, arrayProfile->addressOfLastSeenStructureID()); + m_jit.load8(AssemblyHelpers::Address(scratch1, Structure::indexingTypeIncludingHistoryOffset()), scratch1); m_jit.move(AssemblyHelpers::TrustedImm32(1), scratch2); m_jit.lshift32(scratch1, scratch2); m_jit.or32(scratch2, AssemblyHelpers::AbsoluteAddress(arrayProfile->addressOfArrayModes())); -#if CPU(ARM64) - m_jit.popToRestore(scratch2); - m_jit.popToRestore(scratch1); -#else m_jit.pop(scratch2); m_jit.pop(scratch1); -#endif } } - if (!!exit.m_valueProfile) { - EncodedJSValue* bucket = exit.m_valueProfile.getSpecFailBucket(0); - + if (MethodOfGettingAValueProfile profile = exit.m_valueProfile) { if (exit.m_jsValueSource.isAddress()) { // Save a register so we can use it. - GPRReg scratch = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base()); - -#if CPU(ARM64) - m_jit.pushToSave(scratch); -#else - m_jit.push(scratch); -#endif + GPRReg scratchPayload = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base()); + GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.base(), scratchPayload); + m_jit.pushToSave(scratchPayload); + m_jit.pushToSave(scratchTag); - m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), scratch); - m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); - m_jit.load32(exit.m_jsValueSource.asAddress(OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratch); - m_jit.store32(scratch, &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); + JSValueRegs scratch(scratchTag, scratchPayload); + + m_jit.loadValue(exit.m_jsValueSource.asAddress(), scratch); + profile.emitReportValue(m_jit, scratch); -#if CPU(ARM64) - m_jit.popToRestore(scratch); -#else - m_jit.pop(scratch); -#endif + m_jit.popToRestore(scratchTag); + m_jit.popToRestore(scratchPayload); } else if (exit.m_jsValueSource.hasKnownTag()) { - m_jit.store32(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); - m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); - } else { - m_jit.store32(exit.m_jsValueSource.tagGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.tag); - m_jit.store32(exit.m_jsValueSource.payloadGPR(), &bitwise_cast<EncodedValueDescriptor*>(bucket)->asBits.payload); - } + GPRReg scratchTag = AssemblyHelpers::selectScratchGPR(exit.m_jsValueSource.payloadGPR()); + m_jit.pushToSave(scratchTag); + m_jit.move(AssemblyHelpers::TrustedImm32(exit.m_jsValueSource.tag()), scratchTag); + JSValueRegs value(scratchTag, exit.m_jsValueSource.payloadGPR()); + profile.emitReportValue(m_jit, value); + m_jit.popToRestore(scratchTag); + } else + profile.emitReportValue(m_jit, exit.m_jsValueSource.regs()); } } // Do a simplified OSR exit. See DFGOSRExitCompiler64.cpp's comment regarding how and wny we // do this simple approach. - // 4) Save all state from GPRs into the scratch buffer. + // Save all state from GPRs into the scratch buffer. ScratchBuffer* scratchBuffer = m_jit.vm()->scratchBufferForSize(sizeof(EncodedJSValue) * operands.size()); EncodedJSValue* scratch = scratchBuffer ? static_cast<EncodedJSValue*>(scratchBuffer->dataBuffer()) : 0; @@ -201,12 +191,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Now all GPRs are free to reuse. - // 5) Save all state from FPRs into the scratch buffer. + // Save all state from FPRs into the scratch buffer. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { + case UnboxedDoubleInFPR: case InFPR: m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); m_jit.storeDouble(recovery.fpr(), MacroAssembler::Address(GPRInfo::regT0)); @@ -219,9 +210,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov // Now all FPRs are free to reuse. - // 6) Save all state from the stack into the scratch buffer. For simplicity we - // do this even for state that's already in the right place on the stack. - // It makes things simpler later. + // Save all state from the stack into the scratch buffer. For simplicity we + // do this even for state that's already in the right place on the stack. + // It makes things simpler later. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; @@ -251,19 +242,36 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 7) Do all data format conversions and store the results into the stack. + // Need to ensure that the stack pointer accounts for the worst-case stack usage at exit. This + // could toast some stack that the DFG used. We need to do it before storing to stack offsets + // used by baseline. + m_jit.addPtr( + CCallHelpers::TrustedImm32( + -m_jit.codeBlock()->jitCode()->dfgCommon()->requiredRegisterCountForExit * sizeof(Register)), + CCallHelpers::framePointerRegister, CCallHelpers::stackPointerRegister); - bool haveArguments = false; + // Restore the DFG callee saves and then save the ones the baseline JIT uses. + m_jit.emitRestoreCalleeSaves(); + m_jit.emitSaveCalleeSavesFor(m_jit.baselineCodeBlock()); + + if (exit.isExceptionHandler()) + m_jit.copyCalleeSavesToVMEntryFrameCalleeSavesBuffer(); + + // Do all data format conversions and store the results into the stack. for (size_t index = 0; index < operands.size(); ++index) { const ValueRecovery& recovery = operands[index]; - int operand = operands.operandForIndex(index); - + VirtualRegister reg = operands.virtualRegisterForIndex(index); + + if (reg.isLocal() && reg.toLocal() < static_cast<int>(m_jit.baselineCodeBlock()->calleeSaveSpaceAsVirtualRegisters())) + continue; + + int operand = reg.offset(); + switch (recovery.technique()) { case InPair: - case InFPR: case DisplacedInJSStack: - case DoubleDisplacedInJSStack: + case InFPR: m_jit.load32( &bitwise_cast<EncodedValueDescriptor*>(scratch + index)->asBits.tag, GPRInfo::regT0); @@ -278,6 +286,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov AssemblyHelpers::payloadFor(operand)); break; + case UnboxedDoubleInFPR: + case DoubleDisplacedInJSStack: + m_jit.move(AssemblyHelpers::TrustedImmPtr(scratch + index), GPRInfo::regT0); + m_jit.loadDouble(MacroAssembler::Address(GPRInfo::regT0), FPRInfo::fpRegT0); + m_jit.purifyNaN(FPRInfo::fpRegT0); + m_jit.storeDouble(FPRInfo::fpRegT0, AssemblyHelpers::addressFor(operand)); + break; + case UnboxedInt32InGPR: case Int32DisplacedInJSStack: m_jit.load32( @@ -326,14 +342,9 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov AssemblyHelpers::payloadFor(operand)); break; - case ArgumentsThatWereNotCreated: - haveArguments = true; - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue().tag()), - AssemblyHelpers::tagFor(operand)); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue().payload()), - AssemblyHelpers::payloadFor(operand)); + case DirectArgumentsThatWereNotCreated: + case ClonedArgumentsThatWereNotCreated: + // Don't do this, yet. break; default: @@ -341,127 +352,57 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecov } } - // 8) Adjust the old JIT's execute counter. Since we are exiting OSR, we know - // that all new calls into this code will go to the new JIT, so the execute - // counter only affects call frames that performed OSR exit and call frames - // that were still executing the old JIT at the time of another call frame's - // OSR exit. We want to ensure that the following is true: + // Now that things on the stack are recovered, do the arguments recovery. We assume that arguments + // recoveries don't recursively refer to each other. But, we don't try to assume that they only + // refer to certain ranges of locals. Hence why we need to do this here, once the stack is sensible. + // Note that we also roughly assume that the arguments might still be materialized outside of its + // inline call frame scope - but for now the DFG wouldn't do that. + + emitRestoreArguments(operands); + + // Adjust the old JIT's execute counter. Since we are exiting OSR, we know + // that all new calls into this code will go to the new JIT, so the execute + // counter only affects call frames that performed OSR exit and call frames + // that were still executing the old JIT at the time of another call frame's + // OSR exit. We want to ensure that the following is true: // - // (a) Code the performs an OSR exit gets a chance to reenter optimized - // code eventually, since optimized code is faster. But we don't - // want to do such reentery too aggressively (see (c) below). + // (a) Code the performs an OSR exit gets a chance to reenter optimized + // code eventually, since optimized code is faster. But we don't + // want to do such reentery too aggressively (see (c) below). // - // (b) If there is code on the call stack that is still running the old - // JIT's code and has never OSR'd, then it should get a chance to - // perform OSR entry despite the fact that we've exited. + // (b) If there is code on the call stack that is still running the old + // JIT's code and has never OSR'd, then it should get a chance to + // perform OSR entry despite the fact that we've exited. // - // (c) Code the performs an OSR exit should not immediately retry OSR - // entry, since both forms of OSR are expensive. OSR entry is - // particularly expensive. + // (c) Code the performs an OSR exit should not immediately retry OSR + // entry, since both forms of OSR are expensive. OSR entry is + // particularly expensive. // - // (d) Frequent OSR failures, even those that do not result in the code - // running in a hot loop, result in recompilation getting triggered. + // (d) Frequent OSR failures, even those that do not result in the code + // running in a hot loop, result in recompilation getting triggered. // - // To ensure (c), we'd like to set the execute counter to - // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger - // (a) and (b), since then every OSR exit would delay the opportunity for - // every call frame to perform OSR entry. Essentially, if OSR exit happens - // frequently and the function has few loops, then the counter will never - // become non-negative and OSR entry will never be triggered. OSR entry - // will only happen if a loop gets hot in the old JIT, which does a pretty - // good job of ensuring (a) and (b). But that doesn't take care of (d), - // since each speculation failure would reset the execute counter. - // So we check here if the number of speculation failures is significantly - // larger than the number of successes (we want 90% success rate), and if - // there have been a large enough number of failures. If so, we set the - // counter to 0; otherwise we set the counter to - // counterValueForOptimizeAfterWarmUp(). + // To ensure (c), we'd like to set the execute counter to + // counterValueForOptimizeAfterWarmUp(). This seems like it would endanger + // (a) and (b), since then every OSR exit would delay the opportunity for + // every call frame to perform OSR entry. Essentially, if OSR exit happens + // frequently and the function has few loops, then the counter will never + // become non-negative and OSR entry will never be triggered. OSR entry + // will only happen if a loop gets hot in the old JIT, which does a pretty + // good job of ensuring (a) and (b). But that doesn't take care of (d), + // since each speculation failure would reset the execute counter. + // So we check here if the number of speculation failures is significantly + // larger than the number of successes (we want 90% success rate), and if + // there have been a large enough number of failures. If so, we set the + // counter to 0; otherwise we set the counter to + // counterValueForOptimizeAfterWarmUp(). handleExitCounts(m_jit, exit); - // 9) Reify inlined call frames. + // Reify inlined call frames. reifyInlinedCallFrames(m_jit, exit); - // 10) Create arguments if necessary and place them into the appropriate aliased - // registers. - - if (haveArguments) { - HashSet<InlineCallFrame*, DefaultHash<InlineCallFrame*>::Hash, - NullableHashTraits<InlineCallFrame*>> didCreateArgumentsObject; - - for (size_t index = 0; index < operands.size(); ++index) { - const ValueRecovery& recovery = operands[index]; - if (recovery.technique() != ArgumentsThatWereNotCreated) - continue; - int operand = operands.operandForIndex(index); - // Find the right inline call frame. - InlineCallFrame* inlineCallFrame = 0; - for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; - current; - current = current->caller.inlineCallFrame) { - if (current->stackOffset >= operand) { - inlineCallFrame = current; - break; - } - } - - if (!m_jit.baselineCodeBlockFor(inlineCallFrame)->usesArguments()) - continue; - VirtualRegister argumentsRegister = m_jit.baselineArgumentsRegisterFor(inlineCallFrame); - if (didCreateArgumentsObject.add(inlineCallFrame).isNewEntry) { - // We know this call frame optimized out an arguments object that - // the baseline JIT would have created. Do that creation now. - if (inlineCallFrame) { - m_jit.setupArgumentsWithExecState( - AssemblyHelpers::TrustedImmPtr(inlineCallFrame)); - m_jit.move( - AssemblyHelpers::TrustedImmPtr( - bitwise_cast<void*>(operationCreateInlinedArguments)), - GPRInfo::nonArgGPR0); - } else { - m_jit.setupArgumentsExecState(); - m_jit.move( - AssemblyHelpers::TrustedImmPtr( - bitwise_cast<void*>(operationCreateArguments)), - GPRInfo::nonArgGPR0); - } - m_jit.call(GPRInfo::nonArgGPR0); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue::CellTag), - AssemblyHelpers::tagFor(argumentsRegister)); - m_jit.store32( - GPRInfo::returnValueGPR, - AssemblyHelpers::payloadFor(argumentsRegister)); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue::CellTag), - AssemblyHelpers::tagFor(unmodifiedArgumentsRegister(argumentsRegister))); - m_jit.store32( - GPRInfo::returnValueGPR, - AssemblyHelpers::payloadFor(unmodifiedArgumentsRegister(argumentsRegister))); - m_jit.move(GPRInfo::returnValueGPR, GPRInfo::regT0); // no-op move on almost all platforms. - } - - m_jit.load32(AssemblyHelpers::payloadFor(argumentsRegister), GPRInfo::regT0); - m_jit.store32( - AssemblyHelpers::TrustedImm32(JSValue::CellTag), - AssemblyHelpers::tagFor(operand)); - m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor(operand)); - } - } - -#if ENABLE(GGC) - // 11) Write barrier the owner executable because we're jumping into a different block. - for (CodeOrigin codeOrigin = exit.m_codeOrigin; ; codeOrigin = codeOrigin.inlineCallFrame->caller) { - CodeBlock* baselineCodeBlock = m_jit.baselineCodeBlockFor(codeOrigin); - m_jit.move(AssemblyHelpers::TrustedImmPtr(baselineCodeBlock->ownerExecutable()), GPRInfo::nonArgGPR0); - SpeculativeJIT::osrWriteBarrier(m_jit, GPRInfo::nonArgGPR0, GPRInfo::nonArgGPR1, GPRInfo::nonArgGPR2); - if (!codeOrigin.inlineCallFrame) - break; - } -#endif - - // 12) And finish. + // And finish. adjustAndJumpToTarget(m_jit, exit); } |