From 8995b83bcbfbb68245f779b64e5517627c6cc6ea Mon Sep 17 00:00:00 2001 From: Simon Hausmann Date: Wed, 17 Oct 2012 16:21:14 +0200 Subject: Imported WebKit commit cf4f8fc6f19b0629f51860cb2d4b25e139d07e00 (http://svn.webkit.org/repository/webkit/trunk@131592) New snapshot that includes the build fixes for Mac OS X 10.6 and earlier as well as the previously cherry-picked changes --- .../jit/ExecutableAllocatorFixedVMPool.cpp | 1 - Source/JavaScriptCore/jit/HostCallReturnValue.h | 16 +- Source/JavaScriptCore/jit/JIT.cpp | 47 +- Source/JavaScriptCore/jit/JIT.h | 82 ++- Source/JavaScriptCore/jit/JITCall.cpp | 22 +- Source/JavaScriptCore/jit/JITCall32_64.cpp | 34 +- Source/JavaScriptCore/jit/JITCode.h | 6 +- Source/JavaScriptCore/jit/JITInlineMethods.h | 83 +-- Source/JavaScriptCore/jit/JITOpcodes.cpp | 103 ++-- Source/JavaScriptCore/jit/JITOpcodes32_64.cpp | 132 ++--- Source/JavaScriptCore/jit/JITPropertyAccess.cpp | 557 ++++++++++++++++++++- .../JavaScriptCore/jit/JITPropertyAccess32_64.cpp | 192 ++++++- Source/JavaScriptCore/jit/JITStubs.cpp | 203 ++++++-- Source/JavaScriptCore/jit/JITStubs.h | 20 +- Source/JavaScriptCore/jit/JSInterfaceJIT.h | 2 +- Source/JavaScriptCore/jit/SpecializedThunkJIT.h | 10 +- 16 files changed, 1182 insertions(+), 328 deletions(-) (limited to 'Source/JavaScriptCore/jit') diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp index 2123f5a67..7ee3e0497 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -31,7 +31,6 @@ #include "CodeProfiling.h" #include -#include #include #include #include diff --git a/Source/JavaScriptCore/jit/HostCallReturnValue.h b/Source/JavaScriptCore/jit/HostCallReturnValue.h index 0e17ca035..3f61179a3 100644 --- a/Source/JavaScriptCore/jit/HostCallReturnValue.h +++ b/Source/JavaScriptCore/jit/HostCallReturnValue.h @@ -30,10 +30,7 @@ #include "MacroAssemblerCodeRef.h" #include -// Unfortunately this only works on GCC-like compilers. And it's currently only used -// by LLInt and DFG, which also are restricted to GCC-like compilers. We should -// probably fix that at some point. -#if COMPILER(GCC) && ENABLE(JIT) +#if ENABLE(JIT) #if CALLING_CONVENTION_IS_STDCALL #define HOST_CALL_RETURN_VALUE_OPTION CDECL @@ -45,6 +42,8 @@ namespace JSC { extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValue() REFERENCED_FROM_ASM WTF_INTERNAL; +#if COMPILER(GCC) + // This is a public declaration only to convince CLANG not to elide it. extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState*) REFERENCED_FROM_ASM WTF_INTERNAL; @@ -53,15 +52,14 @@ inline void initializeHostCallReturnValue() getHostCallReturnValueWithExecState(0); } -} - #else // COMPILER(GCC) -namespace JSC { inline void initializeHostCallReturnValue() { } -} #endif // COMPILER(GCC) -#endif // HostCallReturnValue_h +} // namespace JSC +#endif // ENABLE(JIT) + +#endif // HostCallReturnValue_h diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index bf5ac88dd..49f9ec3b5 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -75,10 +75,14 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) , m_codeBlock(codeBlock) , m_labels(codeBlock ? codeBlock->numberOfInstructions() : 0) , m_bytecodeOffset((unsigned)-1) + , m_propertyAccessInstructionIndex(UINT_MAX) + , m_byValInstructionIndex(UINT_MAX) + , m_globalResolveInfoIndex(UINT_MAX) + , m_callLinkInfoIndex(UINT_MAX) #if USE(JSVALUE32_64) , m_jumpTargetIndex(0) , m_mappedBytecodeOffset((unsigned)-1) - , m_mappedVirtualRegisterIndex(RegisterFile::ReturnPC) + , m_mappedVirtualRegisterIndex(JSStack::ReturnPC) , m_mappedTag((RegisterID)-1) , m_mappedPayload((RegisterID)-1) #else @@ -90,6 +94,10 @@ JIT::JIT(JSGlobalData* globalData, CodeBlock* codeBlock) #else , m_randomGenerator(static_cast(randomNumber() * 0xFFFFFFF)) #endif +#if ENABLE(VALUE_PROFILER) + , m_canBeOptimized(false) + , m_shouldEmitProfiling(false) +#endif { } @@ -400,6 +408,7 @@ void JIT::privateCompileSlowCases() Instruction* instructionsBegin = m_codeBlock->instructions().begin(); m_propertyAccessInstructionIndex = 0; + m_byValInstructionIndex = 0; m_globalResolveInfoIndex = 0; m_callLinkInfoIndex = 0; @@ -606,8 +615,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo nop(); preserveReturnAddressAfterCall(regT2); - emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); - emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); + emitPutToCallFrameHeader(regT2, JSStack::ReturnPC); + emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); Label beginLabel(this); @@ -616,7 +625,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo sampleInstruction(m_codeBlock->instructions().begin()); #endif - Jump registerFileCheck; + Jump stackCheck; if (m_codeBlock->codeType() == FunctionCode) { #if ENABLE(DFG_JIT) #if DFG_ENABLE(SUCCESS_STATS) @@ -646,7 +655,7 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo #endif addPtr(TrustedImm32(m_codeBlock->m_numCalleeRegisters * sizeof(Register)), callFrameRegister, regT1); - registerFileCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->registerFile().addressOfEnd()), regT1); + stackCheck = branchPtr(Below, AbsoluteAddress(m_globalData->interpreter->stack().addressOfEnd()), regT1); } Label functionBody = label(); @@ -662,9 +671,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo Label arityCheck; if (m_codeBlock->codeType() == FunctionCode) { - registerFileCheck.link(this); + stackCheck.link(this); m_bytecodeOffset = 0; - JITStubCall(this, cti_register_file_check).call(); + JITStubCall(this, cti_stack_check).call(); #ifndef NDEBUG m_bytecodeOffset = (unsigned)-1; // Reset this, in order to guard its use with ASSERTs. #endif @@ -672,10 +681,10 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo arityCheck = label(); preserveReturnAddressAfterCall(regT2); - emitPutToCallFrameHeader(regT2, RegisterFile::ReturnPC); - emitPutImmediateToCallFrameHeader(m_codeBlock, RegisterFile::CodeBlock); + emitPutToCallFrameHeader(regT2, JSStack::ReturnPC); + emitPutImmediateToCallFrameHeader(m_codeBlock, JSStack::CodeBlock); - load32(payloadFor(RegisterFile::ArgumentCount), regT1); + load32(payloadFor(JSStack::ArgumentCount), regT1); branch32(AboveOrEqual, regT1, TrustedImm32(m_codeBlock->m_numParameters)).linkTo(beginLabel, this); m_bytecodeOffset = 0; @@ -715,8 +724,8 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo StringJumpTable::StringOffsetTable::iterator end = record.jumpTable.stringJumpTable->offsetTable.end(); for (StringJumpTable::StringOffsetTable::iterator it = record.jumpTable.stringJumpTable->offsetTable.begin(); it != end; ++it) { - unsigned offset = it->second.branchOffset; - it->second.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; + unsigned offset = it->value.branchOffset; + it->value.ctiOffset = offset ? patchBuffer.locationOf(m_labels[bytecodeOffset + offset]) : record.jumpTable.stringJumpTable->ctiDefault; } } } @@ -738,6 +747,20 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo m_codeBlock->setNumberOfStructureStubInfos(m_propertyAccessCompilationInfo.size()); for (unsigned i = 0; i < m_propertyAccessCompilationInfo.size(); ++i) m_propertyAccessCompilationInfo[i].copyToStubInfo(m_codeBlock->structureStubInfo(i), patchBuffer); + m_codeBlock->setNumberOfByValInfos(m_byValCompilationInfo.size()); + for (unsigned i = 0; i < m_byValCompilationInfo.size(); ++i) { + CodeLocationJump badTypeJump = CodeLocationJump(patchBuffer.locationOf(m_byValCompilationInfo[i].badTypeJump)); + CodeLocationLabel doneTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].doneTarget); + CodeLocationLabel slowPathTarget = patchBuffer.locationOf(m_byValCompilationInfo[i].slowPathTarget); + CodeLocationCall returnAddress = patchBuffer.locationOf(m_byValCompilationInfo[i].returnAddress); + + m_codeBlock->byValInfo(i) = ByValInfo( + m_byValCompilationInfo[i].bytecodeIndex, + badTypeJump, + m_byValCompilationInfo[i].arrayMode, + differenceBetweenCodePtr(badTypeJump, doneTarget), + differenceBetweenCodePtr(returnAddress, slowPathTarget)); + } m_codeBlock->setNumberOfCallLinkInfos(m_callStructureStubCompilationInfo.size()); for (unsigned i = 0; i < m_codeBlock->numberOfCallLinkInfos(); ++i) { CallLinkInfo& info = m_codeBlock->callLinkInfo(i); diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index 150aae9ea..3e16972e2 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -58,9 +58,9 @@ namespace JSC { class JIT; class JSPropertyNameIterator; class Interpreter; - class Register; - class RegisterFile; class JSScope; + class JSStack; + class Register; class StructureChain; struct CallLinkInfo; @@ -264,6 +264,25 @@ namespace JSC { void copyToStubInfo(StructureStubInfo& info, LinkBuffer &patchBuffer); }; + struct ByValCompilationInfo { + ByValCompilationInfo() { } + + ByValCompilationInfo(unsigned bytecodeIndex, MacroAssembler::PatchableJump badTypeJump, JITArrayMode arrayMode, MacroAssembler::Label doneTarget) + : bytecodeIndex(bytecodeIndex) + , badTypeJump(badTypeJump) + , arrayMode(arrayMode) + , doneTarget(doneTarget) + { + } + + unsigned bytecodeIndex; + MacroAssembler::PatchableJump badTypeJump; + JITArrayMode arrayMode; + MacroAssembler::Label doneTarget; + MacroAssembler::Label slowPathTarget; + MacroAssembler::Call returnAddress; + }; + struct StructureStubCompilationInfo { MacroAssembler::DataLabelPtr hotPathBegin; MacroAssembler::Call hotPathOther; @@ -348,6 +367,20 @@ namespace JSC { jit.m_bytecodeOffset = stubInfo->bytecodeIndex; jit.privateCompilePutByIdTransition(stubInfo, oldStructure, newStructure, cachedOffset, chain, returnAddress, direct); } + + static void compileGetByVal(JSGlobalData* globalData, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) + { + JIT jit(globalData, codeBlock); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompileGetByVal(byValInfo, returnAddress, arrayMode); + } + + static void compilePutByVal(JSGlobalData* globalData, CodeBlock* codeBlock, ByValInfo* byValInfo, ReturnAddressPtr returnAddress, JITArrayMode arrayMode) + { + JIT jit(globalData, codeBlock); + jit.m_bytecodeOffset = byValInfo->bytecodeIndex; + jit.privateCompilePutByVal(byValInfo, returnAddress, arrayMode); + } static PassRefPtr compileCTIMachineTrampolines(JSGlobalData* globalData, TrampolineStructure *trampolines) { @@ -379,6 +412,10 @@ namespace JSC { static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) { JIT jit(globalData, codeBlock); +#if ENABLE(DFG_JIT) + // Force profiling to be enabled during stub generation. + jit.m_canBeOptimized = true; +#endif // ENABLE(DFG_JIT) return jit.privateCompilePatchGetArrayLength(returnAddress); } @@ -397,6 +434,9 @@ namespace JSC { void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*); void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*); void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, PropertyOffset cachedOffset, StructureChain*, ReturnAddressPtr, bool direct); + + void privateCompileGetByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); + void privateCompilePutByVal(ByValInfo*, ReturnAddressPtr, JITArrayMode); PassRefPtr privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*); Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false); @@ -434,7 +474,7 @@ namespace JSC { void emitWriteBarrier(RegisterID owner, RegisterID valueTag, RegisterID scratch, RegisterID scratch2, WriteBarrierMode, WriteBarrierUseKind); void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind); - template void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr); + template void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr); void emitAllocateBasicStorage(size_t, ptrdiff_t offsetFromBase, RegisterID result); template void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr); void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr, RegisterID scratch); @@ -452,7 +492,27 @@ namespace JSC { void emitArrayProfilingSite(RegisterID structureAndIndexingType, RegisterID scratch, ArrayProfile*); void emitArrayProfilingSiteForBytecodeIndex(RegisterID structureAndIndexingType, RegisterID scratch, unsigned bytecodeIndex); void emitArrayProfileStoreToHoleSpecialCase(ArrayProfile*); - + + JITArrayMode chooseArrayMode(ArrayProfile*); + + // Property is in regT1, base is in regT0. regT2 contains indexing type. + // Property is int-checked and zero extended. Base is cell checked. + // Structure is already profiled. Returns the slow cases. Fall-through + // case contains result in regT0, and it is not yet profiled. + JumpList emitContiguousGetByVal(Instruction*, PatchableJump& badType); + JumpList emitArrayStorageGetByVal(Instruction*, PatchableJump& badType); + JumpList emitIntTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness); + JumpList emitFloatTypedArrayGetByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize); + + // Property is in regT0, base is in regT0. regT2 contains indecing type. + // The value to store is not yet loaded. Property is int-checked and + // zero-extended. Base is cell checked. Structure is already profiled. + // returns the slow cases. + JumpList emitContiguousPutByVal(Instruction*, PatchableJump& badType); + JumpList emitArrayStoragePutByVal(Instruction*, PatchableJump& badType); + JumpList emitIntTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize, TypedArraySignedness, TypedArrayRounding); + JumpList emitFloatTypedArrayPutByVal(Instruction*, PatchableJump& badType, const TypedArrayDescriptor&, size_t elementSize); + enum FinalObjectMode { MayBeFinal, KnownNotFinal }; #if USE(JSVALUE32_64) @@ -778,12 +838,12 @@ namespace JSC { void emitInitRegister(unsigned dst); - void emitPutToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry entry); - void emitPutCellToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry); - void emitPutIntToCallFrameHeader(RegisterID from, RegisterFile::CallFrameHeaderEntry); - void emitPutImmediateToCallFrameHeader(void* value, RegisterFile::CallFrameHeaderEntry entry); - void emitGetFromCallFrameHeaderPtr(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); - void emitGetFromCallFrameHeader32(RegisterFile::CallFrameHeaderEntry entry, RegisterID to, RegisterID from = callFrameRegister); + void emitPutToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); + void emitPutCellToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); + void emitPutIntToCallFrameHeader(RegisterID from, JSStack::CallFrameHeaderEntry); + void emitPutImmediateToCallFrameHeader(void* value, JSStack::CallFrameHeaderEntry); + void emitGetFromCallFrameHeaderPtr(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); + void emitGetFromCallFrameHeader32(JSStack::CallFrameHeaderEntry, RegisterID to, RegisterID from = callFrameRegister); JSValue getConstantOperand(unsigned src); bool isOperandConstantImmediateInt(unsigned src); @@ -870,6 +930,7 @@ namespace JSC { Vector m_calls; Vector