summaryrefslogtreecommitdiff
path: root/Source/JavaScriptCore
diff options
context:
space:
mode:
authorSimon Hausmann <simon.hausmann@nokia.com>2012-07-18 13:59:13 +0200
committerSimon Hausmann <simon.hausmann@nokia.com>2012-07-18 13:59:28 +0200
commit4d6084feccab99c0a7b3ecef26bb49c41dd50201 (patch)
treefd1195897f551eee6d5a15d07ff5733b15aa2a5c /Source/JavaScriptCore
parentae901828d4689ab9e89113f6b6ea8042b37a9fda (diff)
downloadqtwebkit-4d6084feccab99c0a7b3ecef26bb49c41dd50201.tar.gz
Imported WebKit commit ff52235a78888e5cb8e286a828a8698042200e67 (http://svn.webkit.org/repository/webkit/trunk@122948)
New snapshot that should fix the rendering issues recently introduced
Diffstat (limited to 'Source/JavaScriptCore')
-rw-r--r--Source/JavaScriptCore/ChangeLog463
-rw-r--r--Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig3
-rw-r--r--Source/JavaScriptCore/Configurations/Version.xcconfig2
-rw-r--r--Source/JavaScriptCore/GNUmakefile.list.am2
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.pri6
-rw-r--r--Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj8
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssembler.h15
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARM.h7
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h6
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h7
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86.h7
-rw-r--r--Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h22
-rw-r--r--Source/JavaScriptCore/assembler/X86Assembler.h19
-rw-r--r--Source/JavaScriptCore/bytecode/StructureStubInfo.h3
-rw-r--r--Source/JavaScriptCore/create_jit_stubs22
-rw-r--r--Source/JavaScriptCore/dfg/DFGAbstractState.cpp123
-rw-r--r--Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp5
-rw-r--r--Source/JavaScriptCore/dfg/DFGCCallHelpers.h52
-rw-r--r--Source/JavaScriptCore/dfg/DFGCSEPhase.cpp23
-rw-r--r--Source/JavaScriptCore/dfg/DFGFixupPhase.cpp17
-rw-r--r--Source/JavaScriptCore/dfg/DFGGPRInfo.h6
-rw-r--r--Source/JavaScriptCore/dfg/DFGGraph.h39
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.cpp2
-rw-r--r--Source/JavaScriptCore/dfg/DFGJITCompiler.h9
-rw-r--r--Source/JavaScriptCore/dfg/DFGNodeType.h7
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.cpp9
-rw-r--r--Source/JavaScriptCore/dfg/DFGOperations.h1
-rw-r--r--Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp6
-rw-r--r--Source/JavaScriptCore/dfg/DFGRegisterBank.h5
-rw-r--r--Source/JavaScriptCore/dfg/DFGRegisterSet.h217
-rw-r--r--Source/JavaScriptCore/dfg/DFGRepatch.cpp218
-rw-r--r--Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h192
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp8
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h20
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp143
-rw-r--r--Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp152
-rw-r--r--Source/JavaScriptCore/heap/CopiedAllocator.h114
-rw-r--r--Source/JavaScriptCore/heap/CopiedBlock.h76
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.cpp32
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpace.h2
-rw-r--r--Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h30
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.cpp14
-rw-r--r--Source/JavaScriptCore/heap/MachineStackMarker.h4
-rw-r--r--Source/JavaScriptCore/heap/MarkStack.cpp32
-rw-r--r--Source/JavaScriptCore/heap/SlotVisitor.h3
-rw-r--r--Source/JavaScriptCore/jit/JIT.h2
-rw-r--r--Source/JavaScriptCore/jit/JITInlineMethods.h25
47 files changed, 1735 insertions, 445 deletions
diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog
index 79b7789b2..0d7945765 100644
--- a/Source/JavaScriptCore/ChangeLog
+++ b/Source/JavaScriptCore/ChangeLog
@@ -1,20 +1,457 @@
-2012-06-13 Patrick Gansterer <paroga@webkit.org>
+2012-07-17 Filip Pizlo <fpizlo@apple.com>
- [WIN] Remove dependency on pthread from MachineStackMarker
- https://bugs.webkit.org/show_bug.cgi?id=68429
+ DFG 32-bit PutById transition stub passes the payload/tag arguments to a DFG operation in the wrong order
+ https://bugs.webkit.org/show_bug.cgi?id=91576
- Reviewed by NOBODY (OOPS!).
+ Reviewed by Gavin Barraclough.
+
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::emitPutTransitionStub):
- Implement pthread TLS functionality with native windows functions.
+2012-07-17 Filip Pizlo <fpizlo@apple.com>
- * heap/MachineStackMarker.cpp: Use the new functions instead of pthread directly.
- * heap/MachineStackMarker.h:
- * wtf/ThreadSpecific.h:
- (WTF::ThreadSpecificKeyCreate): Added wrapper around pthread_key_create.
- (WTF::ThreadSpecificKeyDelete): Added wrapper around pthread_key_delete.
- (WTF::ThreadSpecificSet): Added wrapper around pthread_setspecific.
- (WTF::ThreadSpecificGet): Added wrapper around pthread_getspecific.
- * wtf/ThreadSpecificWin.cpp:
+ [Qt] REGRESSION(r122768, r122771): They broke jquery/data.html and inspector/elements/edit-dom-actions.html
+ https://bugs.webkit.org/show_bug.cgi?id=91476
+
+ Reviewed by Mark Hahnenberg.
+
+ The 32-bit repatching code was not correctly adapted to the new world where there may not always
+ be an available scratch register. Fixed it by ensuring that the scratch register we select does
+ not overlap with the value tag.
+
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::generateProtoChainAccessStub):
+ (JSC::DFG::tryCacheGetByID):
+ (JSC::DFG::tryBuildGetByIDList):
+ (JSC::DFG::emitPutReplaceStub):
+
+2012-07-17 Gabor Rapcsanyi <rgabor@webkit.org>
+
+ Unreviewed buildfix from Zoltan Herczeg after 122768.
+
+ * dfg/DFGCCallHelpers.h:
+ (JSC::DFG::CCallHelpers::setupArgumentsWithExecState):
+ (CCallHelpers):
+
+2012-07-17 David Barr <davidbarr@chromium.org>
+
+ Introduce ENABLE_CSS_IMAGE_ORIENTATION compile flag
+ https://bugs.webkit.org/show_bug.cgi?id=89055
+
+ Reviewed by Kent Tamura.
+
+ The css3-images module is at candidate recommendation.
+ http://www.w3.org/TR/2012/CR-css3-images-20120417/#the-image-orientation
+
+ Add a configuration option for CSS image-orientation support, disabling it by default.
+
+ * Configurations/FeatureDefines.xcconfig:
+
+2012-07-16 Filip Pizlo <fpizlo@apple.com>
+
+ Unreviewed, roll out 122790 because it broke the Windows build. I'm not
+ sure what to do with exported symbols that are predicated on NDEBUG.
+
+ * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def:
+ * bytecode/CodeBlock.cpp:
+ (JSC):
+ * bytecode/CodeBlock.h:
+ (CodeBlock):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::generate):
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ * interpreter/Interpreter.cpp:
+ (JSC):
+ (JSC::Interpreter::dumpRegisters):
+ (JSC::getCallerInfo):
+ (JSC::Interpreter::getStackTrace):
+ (JSC::Interpreter::retrieveCallerFromVMCode):
+ * interpreter/Interpreter.h:
+ (Interpreter):
+ * jsc.cpp:
+ (GlobalObject::finishCreation):
+
+2012-07-16 Oliver Hunt <oliver@apple.com>
+
+ dumpCallFrame is broken in ToT
+ https://bugs.webkit.org/show_bug.cgi?id=91444
+
+ Reviewed by Gavin Barraclough.
+
+ Various changes have been made to the SF calling convention, but
+ dumpCallFrame has not been updated to reflect these changes.
+ That resulted in both bogus information, as well as numerous
+ assertions of sadness.
+
+ This patch makes dumpCallFrame actually work again and adds the
+ wonderful feature of telling you the name of the variable that a
+ register reflects, or what value it contains.
+
+ * bytecode/CodeBlock.cpp:
+ (JSC::CodeBlock::nameForRegister):
+ A really innefficient mechanism for finding the name of a local register.
+ This should only ever be used by debug code so this should be okay.
+ * bytecode/CodeBlock.h:
+ (CodeBlock):
+ * bytecompiler/BytecodeGenerator.cpp:
+ (JSC::BytecodeGenerator::generate):
+ Debug builds no longer throw away a functions symbol table, this allows
+ us to actually perform a register# to name mapping
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ We weren't propogating the bytecode offset here leading to assertions
+ in debug builds when dumping bytecode of DFG compiled code.
+ * interpreter/Interpreter.cpp:
+ (JSC):
+ (JSC::Interpreter::dumpRegisters):
+ Rework to actually be correct.
+ (JSC::getCallerInfo):
+ Return the byteocde offset as well now, given we have to determine it
+ anyway.
+ (JSC::Interpreter::getStackTrace):
+ (JSC::Interpreter::retrieveCallerFromVMCode):
+ * interpreter/Interpreter.h:
+ (Interpreter):
+ * jsc.cpp:
+ (GlobalObject::finishCreation):
+ (functionDumpCallFrame):
+ Give debug builds of JSC a method for calling dumpCallFrame so we can
+ inspect a callframe without requiring us to break in a debugger.
+
+2012-07-16 Filip Pizlo <fpizlo@apple.com>
+
+ Unreviewed, adding forgotten files.
+
+ * dfg/DFGRegisterSet.h: Added.
+ (DFG):
+ (RegisterSet):
+ (JSC::DFG::RegisterSet::RegisterSet):
+ (JSC::DFG::RegisterSet::asPOD):
+ (JSC::DFG::RegisterSet::copyInfo):
+ (JSC::DFG::RegisterSet::set):
+ (JSC::DFG::RegisterSet::setGPRByIndex):
+ (JSC::DFG::RegisterSet::clear):
+ (JSC::DFG::RegisterSet::get):
+ (JSC::DFG::RegisterSet::getGPRByIndex):
+ (JSC::DFG::RegisterSet::getFreeGPR):
+ (JSC::DFG::RegisterSet::setFPRByIndex):
+ (JSC::DFG::RegisterSet::getFPRByIndex):
+ (JSC::DFG::RegisterSet::setByIndex):
+ (JSC::DFG::RegisterSet::getByIndex):
+ (JSC::DFG::RegisterSet::numberOfSetGPRs):
+ (JSC::DFG::RegisterSet::numberOfSetFPRs):
+ (JSC::DFG::RegisterSet::numberOfSetRegisters):
+ (JSC::DFG::RegisterSet::setBit):
+ (JSC::DFG::RegisterSet::clearBit):
+ (JSC::DFG::RegisterSet::getBit):
+ * dfg/DFGScratchRegisterAllocator.h: Added.
+ (DFG):
+ (ScratchRegisterAllocator):
+ (JSC::DFG::ScratchRegisterAllocator::ScratchRegisterAllocator):
+ (JSC::DFG::ScratchRegisterAllocator::lock):
+ (JSC::DFG::ScratchRegisterAllocator::allocateScratch):
+ (JSC::DFG::ScratchRegisterAllocator::allocateScratchGPR):
+ (JSC::DFG::ScratchRegisterAllocator::allocateScratchFPR):
+ (JSC::DFG::ScratchRegisterAllocator::didReuseRegisters):
+ (JSC::DFG::ScratchRegisterAllocator::preserveReusedRegistersByPushing):
+ (JSC::DFG::ScratchRegisterAllocator::restoreReusedRegistersByPopping):
+ (JSC::DFG::ScratchRegisterAllocator::desiredScratchBufferSize):
+ (JSC::DFG::ScratchRegisterAllocator::preserveUsedRegistersToScratchBuffer):
+ (JSC::DFG::ScratchRegisterAllocator::restoreUsedRegistersFromScratchBuffer):
+
+2012-07-15 Filip Pizlo <fpizlo@apple.com>
+
+ DFG PutById transition should handle storage allocation, and inline it
+ https://bugs.webkit.org/show_bug.cgi?id=91337
+
+ Reviewed by Oliver Hunt.
+
+ This enables the patching of DFG PutById to handle the out-of-line storage
+ allocation case. Furthermore, it inlines out-of-line storage allocation (and
+ reallocation) into the generated stubs.
+
+ To do this, this patch adds the ability to store the relevant register
+ allocation state (i.e. the set of in-use registers) in the structure stub
+ info so that the stub generation code can more flexibly select scratch
+ registers: sometimes it needs none, sometimes one - or sometimes up to
+ three. Moreover, to make the stub generation register allocation simple and
+ maintainable, this patch introduces a reusable scratch register allocator
+ class. This register allocator understands that some registers are in use by
+ the main path code and so must be spilled as necessary, other registers are
+ locked for use in the stub itself and so cannot even be spilled, while still
+ others may be allocated for scratch purposes. A scratch register that is
+ used must be spilled. If a register is locked, it cannot be used as a
+ scratch register. If a register is used, it can be used as a scratch
+ register so long as it is spilled.
+
+ This is a sub-1% speed-up on V8 and neutral elsewhere.
+
+ * GNUmakefile.list.am:
+ * JavaScriptCore.xcodeproj/project.pbxproj:
+ * assembler/MacroAssemblerCodeRef.h:
+ (FunctionPtr):
+ (JSC::FunctionPtr::FunctionPtr):
+ * bytecode/StructureStubInfo.h:
+ * dfg/DFGCCallHelpers.h:
+ (JSC::DFG::CCallHelpers::setupArgumentsWithExecState):
+ (CCallHelpers):
+ * dfg/DFGGPRInfo.h:
+ * dfg/DFGJITCompiler.cpp:
+ (JSC::DFG::JITCompiler::link):
+ * dfg/DFGJITCompiler.h:
+ (JSC::DFG::PropertyAccessRecord::PropertyAccessRecord):
+ (PropertyAccessRecord):
+ * dfg/DFGOperations.cpp:
+ * dfg/DFGOperations.h:
+ * dfg/DFGRegisterBank.h:
+ (JSC::DFG::RegisterBank::isInUse):
+ (RegisterBank):
+ * dfg/DFGRegisterSet.h: Added.
+ (DFG):
+ (RegisterSet):
+ (JSC::DFG::RegisterSet::RegisterSet):
+ (JSC::DFG::RegisterSet::asPOD):
+ (JSC::DFG::RegisterSet::copyInfo):
+ (JSC::DFG::RegisterSet::set):
+ (JSC::DFG::RegisterSet::setGPRByIndex):
+ (JSC::DFG::RegisterSet::clear):
+ (JSC::DFG::RegisterSet::get):
+ (JSC::DFG::RegisterSet::getGPRByIndex):
+ (JSC::DFG::RegisterSet::getFreeGPR):
+ (JSC::DFG::RegisterSet::setFPRByIndex):
+ (JSC::DFG::RegisterSet::getFPRByIndex):
+ (JSC::DFG::RegisterSet::setByIndex):
+ (JSC::DFG::RegisterSet::getByIndex):
+ (JSC::DFG::RegisterSet::numberOfSetGPRs):
+ (JSC::DFG::RegisterSet::numberOfSetFPRs):
+ (JSC::DFG::RegisterSet::numberOfSetRegisters):
+ (JSC::DFG::RegisterSet::setBit):
+ (JSC::DFG::RegisterSet::clearBit):
+ (JSC::DFG::RegisterSet::getBit):
+ * dfg/DFGRepatch.cpp:
+ (JSC::DFG::generateProtoChainAccessStub):
+ (JSC::DFG::tryCacheGetByID):
+ (JSC::DFG::tryBuildGetByIDList):
+ (JSC::DFG::emitPutReplaceStub):
+ (JSC::DFG::emitPutTransitionStub):
+ (JSC::DFG::tryCachePutByID):
+ (JSC::DFG::tryBuildPutByIdList):
+ * dfg/DFGScratchRegisterAllocator.h: Added.
+ (DFG):
+ (ScratchRegisterAllocator):
+ (JSC::DFG::ScratchRegisterAllocator::ScratchRegisterAllocator):
+ (JSC::DFG::ScratchRegisterAllocator::lock):
+ (JSC::DFG::ScratchRegisterAllocator::allocateScratch):
+ (JSC::DFG::ScratchRegisterAllocator::allocateScratchGPR):
+ (JSC::DFG::ScratchRegisterAllocator::allocateScratchFPR):
+ (JSC::DFG::ScratchRegisterAllocator::didReuseRegisters):
+ (JSC::DFG::ScratchRegisterAllocator::preserveReusedRegistersByPushing):
+ (JSC::DFG::ScratchRegisterAllocator::restoreReusedRegistersByPopping):
+ (JSC::DFG::ScratchRegisterAllocator::desiredScratchBufferSize):
+ (JSC::DFG::ScratchRegisterAllocator::preserveUsedRegistersToScratchBuffer):
+ (JSC::DFG::ScratchRegisterAllocator::restoreUsedRegistersFromScratchBuffer):
+ * dfg/DFGSpeculativeJIT.h:
+ (SpeculativeJIT):
+ (JSC::DFG::SpeculativeJIT::usedRegisters):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetById):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::cachedGetById):
+ (JSC::DFG::SpeculativeJIT::cachedPutById):
+ (JSC::DFG::SpeculativeJIT::compile):
+ * heap/CopiedAllocator.h:
+ (CopiedAllocator):
+ (JSC::CopiedAllocator::fastPathShouldSucceed):
+ (JSC):
+
+2012-07-16 Patrick Gansterer <paroga@webkit.org>
+
+ Add dfg switch to create_jit_stubs script
+ https://bugs.webkit.org/show_bug.cgi?id=91256
+
+ Reviewed by Geoffrey Garen.
+
+ * create_jit_stubs: Add a switch to enable or disable the generation of
+ stub functions in #if ENABLE(DFG_JIT) conditions.
+
+2012-07-16 Gabor Rapcsanyi <rgabor@webkit.org>
+
+ Unreviewed buildfix after r122729. Typo fix.
+
+ * assembler/MacroAssemblerARM.h:
+ (JSC::MacroAssemblerARM::add32):
+
+2012-07-16 Gabor Rapcsanyi <rgabor@webkit.org>
+
+ Unreviewed buildfix from Zoltan Herczeg after r122677.
+ Implement missing add32 function to MacroAssemblerARM.
+
+ * assembler/MacroAssemblerARM.h:
+ (JSC::MacroAssemblerARM::add32):
+ (MacroAssemblerARM):
+
+2012-07-14 Filip Pizlo <fpizlo@apple.com>
+
+ DFG PutByVal opcodes should accept more than 3 operands
+ https://bugs.webkit.org/show_bug.cgi?id=91332
+
+ Reviewed by Oliver Hunt.
+
+ Turned PutByVal/PutByValAlias into var-arg nodes, so that we can give them
+ 4 or more operands in the future.
+
+ * dfg/DFGAbstractState.cpp:
+ (JSC::DFG::AbstractState::execute):
+ * dfg/DFGByteCodeParser.cpp:
+ (JSC::DFG::ByteCodeParser::parseBlock):
+ * dfg/DFGCSEPhase.cpp:
+ (JSC::DFG::CSEPhase::getByValLoadElimination):
+ (JSC::DFG::CSEPhase::getIndexedPropertyStorageLoadElimination):
+ (JSC::DFG::CSEPhase::performNodeCSE):
+ * dfg/DFGFixupPhase.cpp:
+ (JSC::DFG::FixupPhase::fixupNode):
+ (JSC::DFG::FixupPhase::fixDoubleEdge):
+ * dfg/DFGGraph.h:
+ (JSC::DFG::Graph::byValIsPure):
+ (JSC::DFG::Graph::varArgNumChildren):
+ (Graph):
+ (JSC::DFG::Graph::numChildren):
+ (JSC::DFG::Graph::varArgChild):
+ (JSC::DFG::Graph::child):
+ * dfg/DFGNodeType.h:
+ (DFG):
+ * dfg/DFGPredictionPropagationPhase.cpp:
+ (JSC::DFG::PredictionPropagationPhase::propagate):
+ * dfg/DFGSpeculativeJIT.cpp:
+ (JSC::DFG::SpeculativeJIT::compilePutByValForIntTypedArray):
+ (JSC::DFG::SpeculativeJIT::compilePutByValForFloatTypedArray):
+ * dfg/DFGSpeculativeJIT32_64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+ * dfg/DFGSpeculativeJIT64.cpp:
+ (JSC::DFG::SpeculativeJIT::compile):
+
+2012-07-14 Filip Pizlo <fpizlo@apple.com>
+
+ Rationalize and optimize storage allocation
+ https://bugs.webkit.org/show_bug.cgi?id=91303
+
+ Reviewed by Oliver Hunt.
+
+ This implements a backwards bump allocator for copied space storage
+ allocation, shown in pseudo-code below:
+
+ pointer bump(size) {
+ pointer tmp = allocator->remaining;
+ tmp -= size;
+ if (tmp < 0)
+ fail;
+ allocator->remaining = tmp;
+ return allocator->payloadEnd - tmp - size;
+ }
+
+ The advantage of this allocator is that it:
+
+ - Only requires one comparison in the common case where size is known to
+ not be huge, and this comparison can be done by checking the sign bit
+ of the subtraction.
+
+ - Can be implemented even when only one register is available. This
+ register is reused for both temporary storage during allocation and
+ for the result.
+
+ - Preserves the behavior that memory in a block is filled in from lowest
+ address to highest address, which allows for a cheap reallocation fast
+ path.
+
+ - Is resilient against the block used for allocation being the last one
+ in virtual memory, thereby otherwise leading to the risk of overflow
+ in the bump pointer, despite only doing one branch.
+
+ In order to implement this allocator using the smallest possible chunk
+ of code, I refactored the copied space code so that all of the allocation
+ logic is in CopiedAllocator, and all of the state is in either
+ CopiedBlock or CopiedAllocator. This should make changing the allocation
+ fast path easier in the future.
+
+ In order to do this, I needed to add some new assembler support,
+ particularly for various forms of add(address, register) and negPtr().
+
+ This is performance neutral. The purpose of this change is to facilitate
+ further inlining of storage allocation without having to reserve
+ additional registers or emit too much code.
+
+ * assembler/MacroAssembler.h:
+ (JSC::MacroAssembler::addPtr):
+ (MacroAssembler):
+ (JSC::MacroAssembler::negPtr):
+ * assembler/MacroAssemblerARMv7.h:
+ (MacroAssemblerARMv7):
+ (JSC::MacroAssemblerARMv7::add32):
+ * assembler/MacroAssemblerX86.h:
+ (JSC::MacroAssemblerX86::add32):
+ (MacroAssemblerX86):
+ * assembler/MacroAssemblerX86_64.h:
+ (MacroAssemblerX86_64):
+ (JSC::MacroAssemblerX86_64::addPtr):
+ (JSC::MacroAssemblerX86_64::negPtr):
+ * assembler/X86Assembler.h:
+ (X86Assembler):
+ (JSC::X86Assembler::addl_mr):
+ (JSC::X86Assembler::addq_mr):
+ (JSC::X86Assembler::negq_r):
+ * heap/CopiedAllocator.h:
+ (CopiedAllocator):
+ (JSC::CopiedAllocator::isValid):
+ (JSC::CopiedAllocator::CopiedAllocator):
+ (JSC::CopiedAllocator::tryAllocate):
+ (JSC):
+ (JSC::CopiedAllocator::tryReallocate):
+ (JSC::CopiedAllocator::forceAllocate):
+ (JSC::CopiedAllocator::resetCurrentBlock):
+ (JSC::CopiedAllocator::setCurrentBlock):
+ (JSC::CopiedAllocator::currentCapacity):
+ * heap/CopiedBlock.h:
+ (CopiedBlock):
+ (JSC::CopiedBlock::create):
+ (JSC::CopiedBlock::zeroFillWilderness):
+ (JSC::CopiedBlock::CopiedBlock):
+ (JSC::CopiedBlock::payloadEnd):
+ (JSC):
+ (JSC::CopiedBlock::payloadCapacity):
+ (JSC::CopiedBlock::data):
+ (JSC::CopiedBlock::dataEnd):
+ (JSC::CopiedBlock::dataSize):
+ (JSC::CopiedBlock::wilderness):
+ (JSC::CopiedBlock::wildernessEnd):
+ (JSC::CopiedBlock::wildernessSize):
+ (JSC::CopiedBlock::size):
+ * heap/CopiedSpace.cpp:
+ (JSC::CopiedSpace::tryAllocateSlowCase):
+ (JSC::CopiedSpace::tryAllocateOversize):
+ (JSC::CopiedSpace::tryReallocate):
+ (JSC::CopiedSpace::doneFillingBlock):
+ (JSC::CopiedSpace::doneCopying):
+ * heap/CopiedSpace.h:
+ (CopiedSpace):
+ * heap/CopiedSpaceInlineMethods.h:
+ (JSC::CopiedSpace::startedCopying):
+ (JSC::CopiedSpace::allocateBlockForCopyingPhase):
+ (JSC::CopiedSpace::allocateBlock):
+ (JSC::CopiedSpace::tryAllocate):
+ (JSC):
+ * heap/MarkStack.cpp:
+ (JSC::SlotVisitor::startCopying):
+ (JSC::SlotVisitor::allocateNewSpace):
+ (JSC::SlotVisitor::doneCopying):
+ * heap/SlotVisitor.h:
+ (JSC::SlotVisitor::SlotVisitor):
+ * jit/JIT.h:
+ * jit/JITInlineMethods.h:
+ (JSC::JIT::emitAllocateBasicStorage):
+ (JSC::JIT::emitAllocateJSArray):
2012-07-13 Mark Lam <mark.lam@apple.com>
diff --git a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
index 5fa30a6e9..dcea98333 100644
--- a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
+++ b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig
@@ -41,6 +41,7 @@ ENABLE_CSS3_FLEXBOX = ENABLE_CSS3_FLEXBOX;
ENABLE_CSS_EXCLUSIONS = ENABLE_CSS_EXCLUSIONS;
ENABLE_CSS_FILTERS = ENABLE_CSS_FILTERS;
ENABLE_CSS_SHADERS = ENABLE_CSS_SHADERS;
+ENABLE_CSS_IMAGE_ORIENTATION = ;
ENABLE_CSS_IMAGE_RESOLUTION = ;
ENABLE_CSS_REGIONS = ENABLE_CSS_REGIONS;
ENABLE_CSS_VARIABLES = ;
@@ -133,4 +134,4 @@ ENABLE_WEB_TIMING = ;
ENABLE_WORKERS = ENABLE_WORKERS;
ENABLE_XSLT = ENABLE_XSLT;
-FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS3_FLEXBOX) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_VARIABLES) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIALOG_ELEMENT) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_TAG) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PROGRESS_TAG) $(ENABLE_QUOTA) $(ENABLE_REGISTER_PROTOCOL_HANDLER) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_AUTOSIZING) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_UNDO_MANAGER) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XSLT);
+FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS3_FLEXBOX) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_IMAGE_ORIENTATION) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_VARIABLES) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIALOG_ELEMENT) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_TAG) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PROGRESS_TAG) $(ENABLE_QUOTA) $(ENABLE_REGISTER_PROTOCOL_HANDLER) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_AUTOSIZING) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_UNDO_MANAGER) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XSLT);
diff --git a/Source/JavaScriptCore/Configurations/Version.xcconfig b/Source/JavaScriptCore/Configurations/Version.xcconfig
index 9760c9ada..39ad1b2be 100644
--- a/Source/JavaScriptCore/Configurations/Version.xcconfig
+++ b/Source/JavaScriptCore/Configurations/Version.xcconfig
@@ -22,7 +22,7 @@
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
MAJOR_VERSION = 537;
-MINOR_VERSION = 1;
+MINOR_VERSION = 2;
TINY_VERSION = 0;
FULL_VERSION = $(MAJOR_VERSION).$(MINOR_VERSION);
diff --git a/Source/JavaScriptCore/GNUmakefile.list.am b/Source/JavaScriptCore/GNUmakefile.list.am
index 751d5657a..e21ad80d7 100644
--- a/Source/JavaScriptCore/GNUmakefile.list.am
+++ b/Source/JavaScriptCore/GNUmakefile.list.am
@@ -216,9 +216,11 @@ javascriptcore_sources += \
Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp \
Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.h \
Source/JavaScriptCore/dfg/DFGRegisterBank.h \
+ Source/JavaScriptCore/dfg/DFGRegisterSet.h \
Source/JavaScriptCore/dfg/DFGRepatch.cpp \
Source/JavaScriptCore/dfg/DFGRepatch.h \
Source/JavaScriptCore/dfg/DFGScoreBoard.h \
+ Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h \
Source/JavaScriptCore/dfg/DFGSilentRegisterSavePlan.h \
Source/JavaScriptCore/dfg/DFGSlowPathGenerator.h \
Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp \
diff --git a/Source/JavaScriptCore/JavaScriptCore.pri b/Source/JavaScriptCore/JavaScriptCore.pri
index f6580c51f..380bbaf1b 100644
--- a/Source/JavaScriptCore/JavaScriptCore.pri
+++ b/Source/JavaScriptCore/JavaScriptCore.pri
@@ -34,6 +34,12 @@ INCLUDEPATH += \
win32-* {
LIBS += -lwinmm
+
+ win32-g++* {
+ LIBS += -lpthreadGC2
+ } else:win32-msvc* {
+ LIBS += -lpthreadVC2
+ }
}
wince* {
diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
index f9548f184..a81416659 100644
--- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
+++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj
@@ -150,6 +150,8 @@
0F766D3515AE253B008F363E /* JumpReplacementWatchpoint.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D3315AE2535008F363E /* JumpReplacementWatchpoint.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F766D3815AE4A1C008F363E /* StructureStubClearingWatchpoint.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F766D3615AE4A1A008F363E /* StructureStubClearingWatchpoint.cpp */; };
0F766D3915AE4A1F008F363E /* StructureStubClearingWatchpoint.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D3715AE4A1A008F363E /* StructureStubClearingWatchpoint.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F766D4415B2A3C0008F363E /* DFGRegisterSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D4215B2A3BD008F363E /* DFGRegisterSet.h */; settings = {ATTRIBUTES = (Private, ); }; };
+ 0F766D4615B3701F008F363E /* DFGScratchRegisterAllocator.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D4515B3701D008F363E /* DFGScratchRegisterAllocator.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F7700921402FF3C0078EB39 /* SamplingCounter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F7700911402FF280078EB39 /* SamplingCounter.cpp */; };
0F7B294A14C3CD29007C3DB1 /* DFGCCallHelpers.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F7B294814C3CD23007C3DB1 /* DFGCCallHelpers.h */; settings = {ATTRIBUTES = (Private, ); }; };
0F7B294B14C3CD2F007C3DB1 /* DFGCapabilities.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82E1F14172C2F00179C94 /* DFGCapabilities.h */; settings = {ATTRIBUTES = (Private, ); }; };
@@ -895,6 +897,8 @@
0F766D3315AE2535008F363E /* JumpReplacementWatchpoint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JumpReplacementWatchpoint.h; sourceTree = "<group>"; };
0F766D3615AE4A1A008F363E /* StructureStubClearingWatchpoint.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = StructureStubClearingWatchpoint.cpp; sourceTree = "<group>"; };
0F766D3715AE4A1A008F363E /* StructureStubClearingWatchpoint.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = StructureStubClearingWatchpoint.h; sourceTree = "<group>"; };
+ 0F766D4215B2A3BD008F363E /* DFGRegisterSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGRegisterSet.h; path = dfg/DFGRegisterSet.h; sourceTree = "<group>"; };
+ 0F766D4515B3701D008F363E /* DFGScratchRegisterAllocator.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGScratchRegisterAllocator.h; path = dfg/DFGScratchRegisterAllocator.h; sourceTree = "<group>"; };
0F77008E1402FDD60078EB39 /* SamplingCounter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SamplingCounter.h; sourceTree = "<group>"; };
0F7700911402FF280078EB39 /* SamplingCounter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SamplingCounter.cpp; sourceTree = "<group>"; };
0F7B294814C3CD23007C3DB1 /* DFGCCallHelpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCCallHelpers.h; path = dfg/DFGCCallHelpers.h; sourceTree = "<group>"; };
@@ -2304,9 +2308,11 @@
0A4337BA1506218800991C95 /* DFGRedundantPhiEliminationPhase.cpp */,
0A4337BD1506219B00991C95 /* DFGRedundantPhiEliminationPhase.h */,
86EC9DC11328DF82002B2AD7 /* DFGRegisterBank.h */,
+ 0F766D4215B2A3BD008F363E /* DFGRegisterSet.h */,
86BB09BE138E381B0056702F /* DFGRepatch.cpp */,
86BB09BF138E381B0056702F /* DFGRepatch.h */,
86ECA3F9132DF25A002B2AD7 /* DFGScoreBoard.h */,
+ 0F766D4515B3701D008F363E /* DFGScratchRegisterAllocator.h */,
0F1E3A65153A21DF000F9456 /* DFGSilentRegisterSavePlan.h */,
0F1E3A501537C2CB000F9456 /* DFGSlowPathGenerator.h */,
86EC9DC21328DF82002B2AD7 /* DFGSpeculativeJIT.cpp */,
@@ -2850,6 +2856,8 @@
0F766D3115AA8112008F363E /* JITStubRoutine.h in Headers */,
0F766D3515AE253B008F363E /* JumpReplacementWatchpoint.h in Headers */,
0F766D3915AE4A1F008F363E /* StructureStubClearingWatchpoint.h in Headers */,
+ 0F766D4415B2A3C0008F363E /* DFGRegisterSet.h in Headers */,
+ 0F766D4615B3701F008F363E /* DFGScratchRegisterAllocator.h in Headers */,
);
runOnlyForDeploymentPostprocessing = 0;
};
diff --git a/Source/JavaScriptCore/assembler/MacroAssembler.h b/Source/JavaScriptCore/assembler/MacroAssembler.h
index 516ffac16..1a9af2989 100644
--- a/Source/JavaScriptCore/assembler/MacroAssembler.h
+++ b/Source/JavaScriptCore/assembler/MacroAssembler.h
@@ -280,6 +280,16 @@ public:
// On 32-bit platforms (i.e. x86), these methods directly map onto their 32-bit equivalents.
// FIXME: should this use a test for 32-bitness instead of this specific exception?
#if !CPU(X86_64)
+ void addPtr(Address src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ add32(src, dest);
+ }
+
void addPtr(RegisterID src, RegisterID dest)
{
add32(src, dest);
@@ -314,6 +324,11 @@ public:
{
and32(imm, srcDest);
}
+
+ void negPtr(RegisterID dest)
+ {
+ neg32(dest);
+ }
void orPtr(RegisterID src, RegisterID dest)
{
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
index 8e123d423..2773b022c 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h
@@ -107,6 +107,13 @@ public:
m_assembler.adds_r(dest, dest, m_assembler.getImm(imm.m_value, ARMRegisters::S0));
}
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1);
+ m_assembler.dtr_u(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0);
+ add32(ARMRegisters::S1, dest);
+ }
+
void add32(Address src, RegisterID dest)
{
load32(src, ARMRegisters::S1);
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
index 3694c9163..cf6f02ca9 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h
@@ -157,6 +157,12 @@ public:
{
add32(imm, dest, dest);
}
+
+ void add32(AbsoluteAddress src, RegisterID dest)
+ {
+ load32(src.m_ptr, dataTempRegister);
+ add32(dataTempRegister, dest);
+ }
void add32(TrustedImm32 imm, RegisterID src, RegisterID dest)
{
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
index a1b3a8338..c6db26597 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerCodeRef.h
@@ -126,6 +126,13 @@ public:
ASSERT_VALID_CODE_POINTER(m_value);
}
+ template<typename returnType, typename argType1, typename argType2, typename argType3, typename argType4, typename argType5>
+ FunctionPtr(returnType(*value)(argType1, argType2, argType3, argType4, argType5))
+ : m_value((void*)value)
+ {
+ ASSERT_VALID_CODE_POINTER(m_value);
+ }
+
// MSVC doesn't seem to treat functions with different calling conventions as
// different types; these methods already defined for fastcall, below.
#if CALLING_CONVENTION_IS_STDCALL && !OS(WINDOWS)
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
index 45de8139f..da9dd8f2a 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h
@@ -63,6 +63,11 @@ public:
m_assembler.addl_im(imm.m_value, address.m_ptr);
}
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ m_assembler.addl_mr(address.m_ptr, dest);
+ }
+
void add64(TrustedImm32 imm, AbsoluteAddress address)
{
m_assembler.addl_im(imm.m_value, address.m_ptr);
@@ -78,7 +83,7 @@ public:
{
m_assembler.orl_im(imm.m_value, address.m_ptr);
}
-
+
void sub32(TrustedImm32 imm, AbsoluteAddress address)
{
m_assembler.subl_im(imm.m_value, address.m_ptr);
diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
index 1fb574b51..43bcddb64 100644
--- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
+++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h
@@ -63,6 +63,12 @@ public:
and32(imm, Address(scratchRegister));
}
+ void add32(AbsoluteAddress address, RegisterID dest)
+ {
+ move(TrustedImmPtr(address.m_ptr), scratchRegister);
+ add32(Address(scratchRegister), dest);
+ }
+
void or32(TrustedImm32 imm, AbsoluteAddress address)
{
move(TrustedImmPtr(address.m_ptr), scratchRegister);
@@ -140,6 +146,17 @@ public:
{
m_assembler.addq_rr(src, dest);
}
+
+ void addPtr(Address src, RegisterID dest)
+ {
+ m_assembler.addq_mr(src.offset, src.base, dest);
+ }
+
+ void addPtr(AbsoluteAddress src, RegisterID dest)
+ {
+ move(TrustedImmPtr(src.m_ptr), scratchRegister);
+ addPtr(Address(scratchRegister), dest);
+ }
void addPtr(TrustedImm32 imm, RegisterID srcDest)
{
@@ -182,6 +199,11 @@ public:
{
m_assembler.andq_ir(imm.m_value, srcDest);
}
+
+ void negPtr(RegisterID dest)
+ {
+ m_assembler.negq_r(dest);
+ }
void orPtr(RegisterID src, RegisterID dest)
{
diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h
index cf8133266..83d681cf7 100644
--- a/Source/JavaScriptCore/assembler/X86Assembler.h
+++ b/Source/JavaScriptCore/assembler/X86Assembler.h
@@ -304,6 +304,13 @@ public:
{
m_formatter.oneByteOp(OP_ADD_GvEv, dst, base, offset);
}
+
+#if !CPU(X86_64)
+ void addl_mr(const void* addr, RegisterID dst)
+ {
+ m_formatter.oneByteOp(OP_ADD_GvEv, dst, addr);
+ }
+#endif
void addl_rm(RegisterID src, int offset, RegisterID base)
{
@@ -338,6 +345,11 @@ public:
m_formatter.oneByteOp64(OP_ADD_EvGv, src, dst);
}
+ void addq_mr(int offset, RegisterID base, RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_ADD_GvEv, dst, base, offset);
+ }
+
void addq_ir(int imm, RegisterID dst)
{
if (CAN_SIGN_EXTEND_8_32(imm)) {
@@ -443,6 +455,13 @@ public:
m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
}
+#if CPU(X86_64)
+ void negq_r(RegisterID dst)
+ {
+ m_formatter.oneByteOp64(OP_GROUP3_Ev, GROUP3_OP_NEG, dst);
+ }
+#endif
+
void negl_m(int offset, RegisterID base)
{
m_formatter.oneByteOp(OP_GROUP3_Ev, GROUP3_OP_NEG, base, offset);
diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
index 737ea88c2..a1bbc9c37 100644
--- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h
+++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h
@@ -31,6 +31,7 @@
#if ENABLE(JIT)
#include "CodeOrigin.h"
+#include "DFGRegisterSet.h"
#include "Instruction.h"
#include "JITStubRoutine.h"
#include "MacroAssembler.h"
@@ -212,7 +213,7 @@ namespace JSC {
int8_t valueTagGPR;
#endif
int8_t valueGPR;
- int8_t scratchGPR;
+ DFG::RegisterSetPOD usedRegisters;
int32_t deltaCallToDone;
int32_t deltaCallToStorageLoad;
int32_t deltaCallToStructCheck;
diff --git a/Source/JavaScriptCore/create_jit_stubs b/Source/JavaScriptCore/create_jit_stubs
index f0fcb53b0..d90fa8eb4 100644
--- a/Source/JavaScriptCore/create_jit_stubs
+++ b/Source/JavaScriptCore/create_jit_stubs
@@ -28,10 +28,12 @@ my $rtype_template = quotemeta("#rtype#");
my $op_template = quotemeta("#op#");
my $prefix;
+my $enable_dfg = 0;
my $file;
my $getOptionsResult = GetOptions(
- 'prefix=s' => \$prefix
+ 'prefix=s' => \$prefix,
+ 'dfg!' => \$enable_dfg
);
$file = $ARGV[0];
@@ -44,11 +46,25 @@ my $stub = "";
my $rtype = "";
my $op = "";
+my $if_counter = 0;
+my $dfg_begin = 0;
print STDERR "Creating JIT stubs for $file \n";
open(IN, $file) or die "No such file $file";
while ( $_ = <IN> ) {
+ if ( /^#if (.*)/ ) {
+ $if_counter++;
+ if ( $1 eq "ENABLE(DFG_JIT)" ) {
+ $dfg_begin = $if_counter;
+ }
+ }
+ if ( /^#endif/ ) {
+ if ( $if_counter == $dfg_begin ) {
+ $dfg_begin = 0;
+ }
+ $if_counter--;
+ }
if ( /^$prefix\_BEGIN\((.*)\)/ ) {
$stub = $1;
print $stub . "\n";
@@ -66,7 +82,9 @@ while ( $_ = <IN> ) {
$stub =~ s/$rtype_template/$rtype/g;
$stub =~ s/$op_template/$op/g;
$stub =~ s/\\\*/\*/g;
- print $stub;
+ if ( $enable_dfg == 1 || $dfg_begin == 0 ) {
+ print $stub;
+ }
}
}
diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
index 95f44c092..e4561da06 100644
--- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
+++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp
@@ -946,13 +946,18 @@ bool AbstractState::execute(unsigned indexInBlock)
case PutByVal:
case PutByValAlias: {
node.setCanExit(true);
- if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction()) {
+
+ Edge child1 = m_graph.varArgChild(node, 0);
+ Edge child2 = m_graph.varArgChild(node, 1);
+ Edge child3 = m_graph.varArgChild(node, 2);
+
+ if (!m_graph[child1].prediction() || !m_graph[child2].prediction()) {
m_isValid = false;
break;
}
- if (!m_graph[node.child2()].shouldSpeculateInteger() || !isActionableMutableArraySpeculation(m_graph[node.child1()].prediction())
+ if (!m_graph[child2].shouldSpeculateInteger() || !isActionableMutableArraySpeculation(m_graph[child1].prediction())
#if USE(JSVALUE32_64)
- || m_graph[node.child1()].shouldSpeculateArguments()
+ || m_graph[child1].shouldSpeculateArguments()
#endif
) {
ASSERT(node.op() == PutByVal);
@@ -961,89 +966,89 @@ bool AbstractState::execute(unsigned indexInBlock)
break;
}
- if (m_graph[node.child1()].shouldSpeculateArguments()) {
- forNode(node.child1()).filter(SpecArguments);
- forNode(node.child2()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateArguments()) {
+ forNode(child1).filter(SpecArguments);
+ forNode(child2).filter(SpecInt32);
break;
}
- if (m_graph[node.child1()].shouldSpeculateInt8Array()) {
- forNode(node.child1()).filter(SpecInt8Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateInt8Array()) {
+ forNode(child1).filter(SpecInt8Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateInt16Array()) {
- forNode(node.child1()).filter(SpecInt16Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateInt16Array()) {
+ forNode(child1).filter(SpecInt16Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateInt32Array()) {
- forNode(node.child1()).filter(SpecInt32Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateInt32Array()) {
+ forNode(child1).filter(SpecInt32Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint8Array()) {
- forNode(node.child1()).filter(SpecUint8Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint8Array()) {
+ forNode(child1).filter(SpecUint8Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint8ClampedArray()) {
- forNode(node.child1()).filter(SpecUint8ClampedArray);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint8ClampedArray()) {
+ forNode(child1).filter(SpecUint8ClampedArray);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint16Array()) {
- forNode(node.child1()).filter(SpecUint16Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint16Array()) {
+ forNode(child1).filter(SpecUint16Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateUint32Array()) {
- forNode(node.child1()).filter(SpecUint32Array);
- forNode(node.child2()).filter(SpecInt32);
- if (m_graph[node.child3()].shouldSpeculateInteger())
- forNode(node.child3()).filter(SpecInt32);
+ if (m_graph[child1].shouldSpeculateUint32Array()) {
+ forNode(child1).filter(SpecUint32Array);
+ forNode(child2).filter(SpecInt32);
+ if (m_graph[child3].shouldSpeculateInteger())
+ forNode(child3).filter(SpecInt32);
else
- forNode(node.child3()).filter(SpecNumber);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateFloat32Array()) {
- forNode(node.child1()).filter(SpecFloat32Array);
- forNode(node.child2()).filter(SpecInt32);
- forNode(node.child3()).filter(SpecNumber);
+ if (m_graph[child1].shouldSpeculateFloat32Array()) {
+ forNode(child1).filter(SpecFloat32Array);
+ forNode(child2).filter(SpecInt32);
+ forNode(child3).filter(SpecNumber);
break;
}
- if (m_graph[node.child1()].shouldSpeculateFloat64Array()) {
- forNode(node.child1()).filter(SpecFloat64Array);
- forNode(node.child2()).filter(SpecInt32);
- forNode(node.child3()).filter(SpecNumber);
+ if (m_graph[child1].shouldSpeculateFloat64Array()) {
+ forNode(child1).filter(SpecFloat64Array);
+ forNode(child2).filter(SpecInt32);
+ forNode(child3).filter(SpecNumber);
break;
}
- ASSERT(m_graph[node.child1()].shouldSpeculateArray());
- forNode(node.child1()).filter(SpecArray);
- forNode(node.child2()).filter(SpecInt32);
+ ASSERT(m_graph[child1].shouldSpeculateArray());
+ forNode(child1).filter(SpecArray);
+ forNode(child2).filter(SpecInt32);
if (node.op() == PutByVal)
clobberWorld(node.codeOrigin, indexInBlock);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
index 91b882399..1b1395934 100644
--- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
+++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp
@@ -2152,7 +2152,10 @@ bool ByteCodeParser::parseBlock(unsigned limit)
NodeIndex property = get(currentInstruction[2].u.operand);
NodeIndex value = get(currentInstruction[3].u.operand);
- addToGraph(PutByVal, base, property, value);
+ addVarArgChild(base);
+ addVarArgChild(property);
+ addVarArgChild(value);
+ addToGraph(Node::VarArg, PutByVal, OpInfo(0), OpInfo(0));
NEXT_OPCODE(op_put_by_val);
}
diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
index 5985b251e..fd4e1cae0 100644
--- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
+++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h
@@ -229,6 +229,27 @@ public:
addCallArgument(arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ resetCallArguments();
+ addCallArgument(GPRInfo::callFrameRegister);
+ addCallArgument(arg1);
+ addCallArgument(arg2);
+ addCallArgument(arg3);
+ addCallArgument(arg4);
+ addCallArgument(arg5);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, GPRReg arg3, GPRReg arg4)
{
resetCallArguments();
@@ -570,6 +591,14 @@ public:
move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3)
+ {
+ move(arg1, GPRInfo::argumentGPR1);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImmPtr arg3)
{
move(arg1, GPRInfo::argumentGPR1);
@@ -642,6 +671,19 @@ public:
setupArgumentsWithExecState(arg1, arg2, arg3);
}
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4, GPRReg arg5)
+ {
+ poke(arg5, 1);
+ poke(arg4);
+ setupArgumentsWithExecState(arg1, arg2, arg3);
+ }
+
ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, GPRReg arg2, TrustedImm32 arg3, TrustedImm32 arg4)
{
poke(arg4);
@@ -722,6 +764,16 @@ public:
#endif // NUMBER_OF_ARGUMENT_REGISTERS == 4
+#if NUMBER_OF_ARGUMENT_REGISTERS >= 5
+ ALWAYS_INLINE void setupArgumentsWithExecState(GPRReg arg1, TrustedImmPtr arg2, TrustedImm32 arg3, GPRReg arg4)
+ {
+ setupTwoStubArgs<GPRInfo::argumentGPR1, GPRInfo::argumentGPR4>(arg1, arg4);
+ move(arg2, GPRInfo::argumentGPR2);
+ move(arg3, GPRInfo::argumentGPR3);
+ move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0);
+ }
+#endif
+
void setupResults(GPRReg destA, GPRReg destB)
{
GPRReg srcA = GPRInfo::returnValueGPR;
diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
index 108cf1965..04c3ebc66 100644
--- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp
@@ -284,15 +284,16 @@ private:
return index;
break;
case PutByVal:
- case PutByValAlias:
+ case PutByValAlias: {
if (!m_graph.byValIsPure(node))
return NoNode;
- if (node.child1() == child1 && canonicalize(node.child2()) == canonicalize(child2))
- return node.child3().index();
+ if (m_graph.varArgChild(node, 0) == child1 && canonicalize(m_graph.varArgChild(node, 1)) == canonicalize(child2))
+ return m_graph.varArgChild(node, 2).index();
// We must assume that the PutByVal will clobber the location we're getting from.
// FIXME: We can do better; if we know that the PutByVal is accessing an array of a
// different type than the GetByVal, then we know that they won't clobber each other.
return NoNode;
+ }
case PutStructure:
case PutByOffset:
// GetByVal currently always speculates that it's accessing an
@@ -634,7 +635,7 @@ private:
break;
case PutByVal:
- if (isFixedIndexedStorageObjectSpeculation(m_graph[node.child1()].prediction()) && m_graph.byValIsPure(node))
+ if (isFixedIndexedStorageObjectSpeculation(m_graph[m_graph.varArgChild(node, 0)].prediction()) && m_graph.byValIsPure(node))
break;
return NoNode;
@@ -1079,17 +1080,19 @@ private:
setReplacement(getByValLoadElimination(node.child1().index(), node.child2().index()));
break;
- case PutByVal:
- if (isActionableMutableArraySpeculation(m_graph[node.child1()].prediction())
- && m_graph[node.child2()].shouldSpeculateInteger()
- && !m_graph[node.child1()].shouldSpeculateArguments()) {
- NodeIndex nodeIndex = getByValLoadElimination(
- node.child1().index(), node.child2().index());
+ case PutByVal: {
+ Edge child1 = m_graph.varArgChild(node, 0);
+ Edge child2 = m_graph.varArgChild(node, 1);
+ if (isActionableMutableArraySpeculation(m_graph[child1].prediction())
+ && m_graph[child2].shouldSpeculateInteger()
+ && !m_graph[child1].shouldSpeculateArguments()) {
+ NodeIndex nodeIndex = getByValLoadElimination(child1.index(), child2.index());
if (nodeIndex == NoNode)
break;
node.setOp(PutByValAlias);
}
break;
+ }
case CheckStructure:
if (checkStructureLoadElimination(node.structureSet(), node.child1().index()))
diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
index 2e7389f21..a1954d7e0 100644
--- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp
@@ -315,19 +315,22 @@ private:
}
case PutByVal: {
- if (!m_graph[node.child1()].prediction() || !m_graph[node.child2()].prediction())
+ Edge child1 = m_graph.varArgChild(node, 0);
+ Edge child2 = m_graph.varArgChild(node, 1);
+ Edge child3 = m_graph.varArgChild(node, 2);
+ if (!m_graph[child1].prediction() || !m_graph[child2].prediction())
break;
- if (!m_graph[node.child2()].shouldSpeculateInteger())
+ if (!m_graph[child2].shouldSpeculateInteger())
break;
- if (isActionableIntMutableArraySpeculation(m_graph[node.child1()].prediction())) {
- if (m_graph[node.child3()].isConstant())
+ if (isActionableIntMutableArraySpeculation(m_graph[child1].prediction())) {
+ if (m_graph[child3].isConstant())
break;
- if (m_graph[node.child3()].shouldSpeculateInteger())
+ if (m_graph[child3].shouldSpeculateInteger())
break;
fixDoubleEdge(2);
break;
}
- if (isActionableFloatMutableArraySpeculation(m_graph[node.child1()].prediction())) {
+ if (isActionableFloatMutableArraySpeculation(m_graph[child1].prediction())) {
fixDoubleEdge(2);
break;
}
@@ -368,7 +371,7 @@ private:
void fixDoubleEdge(unsigned childIndex)
{
Node& source = m_graph[m_compileIndex];
- Edge& edge = source.children.child(childIndex);
+ Edge& edge = m_graph.child(source, childIndex);
if (!m_graph[edge].shouldSpeculateInteger()) {
edge.setUseKind(DoubleUse);
diff --git a/Source/JavaScriptCore/dfg/DFGGPRInfo.h b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
index 23f1697a6..498b116ec 100644
--- a/Source/JavaScriptCore/dfg/DFGGPRInfo.h
+++ b/Source/JavaScriptCore/dfg/DFGGPRInfo.h
@@ -26,10 +26,12 @@
#ifndef DFGGPRInfo_h
#define DFGGPRInfo_h
+#include <wtf/Platform.h>
+
#if ENABLE(DFG_JIT)
-#include <assembler/MacroAssembler.h>
-#include <dfg/DFGRegisterBank.h>
+#include "DFGRegisterBank.h"
+#include "MacroAssembler.h"
namespace JSC { namespace DFG {
diff --git a/Source/JavaScriptCore/dfg/DFGGraph.h b/Source/JavaScriptCore/dfg/DFGGraph.h
index a9080d117..4091c48f7 100644
--- a/Source/JavaScriptCore/dfg/DFGGraph.h
+++ b/Source/JavaScriptCore/dfg/DFGGraph.h
@@ -463,26 +463,35 @@ public:
bool byValIsPure(Node& node)
{
- if (!at(node.child2()).shouldSpeculateInteger())
- return false;
- SpeculatedType prediction = at(node.child1()).prediction();
switch (node.op()) {
- case PutByVal:
+ case PutByVal: {
+ if (!at(varArgChild(node, 1)).shouldSpeculateInteger())
+ return false;
+ SpeculatedType prediction = at(varArgChild(node, 0)).prediction();
if (!isActionableMutableArraySpeculation(prediction))
return false;
if (isArraySpeculation(prediction))
return false;
return true;
+ }
- case PutByValAlias:
+ case PutByValAlias: {
+ if (!at(varArgChild(node, 1)).shouldSpeculateInteger())
+ return false;
+ SpeculatedType prediction = at(varArgChild(node, 0)).prediction();
if (!isActionableMutableArraySpeculation(prediction))
return false;
return true;
+ }
- case GetByVal:
+ case GetByVal: {
+ if (!at(node.child2()).shouldSpeculateInteger())
+ return false;
+ SpeculatedType prediction = at(node.child1()).prediction();
if (!isActionableArraySpeculation(prediction))
return false;
return true;
+ }
default:
ASSERT_NOT_REACHED();
@@ -524,17 +533,29 @@ public:
void resetExitStates();
+ unsigned varArgNumChildren(Node& node)
+ {
+ ASSERT(node.flags() & NodeHasVarArgs);
+ return node.numChildren();
+ }
+
unsigned numChildren(Node& node)
{
if (node.flags() & NodeHasVarArgs)
- return node.numChildren();
+ return varArgNumChildren(node);
return AdjacencyList::Size;
}
- Edge child(Node& node, unsigned index)
+ Edge& varArgChild(Node& node, unsigned index)
+ {
+ ASSERT(node.flags() & NodeHasVarArgs);
+ return m_varArgChildren[node.firstChild() + index];
+ }
+
+ Edge& child(Node& node, unsigned index)
{
if (node.flags() & NodeHasVarArgs)
- return m_varArgChildren[node.firstChild() + index];
+ return varArgChild(node, index);
return node.children.child(index);
}
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
index 2ebee13c1..5a9f972b8 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp
@@ -179,7 +179,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer)
info.patch.dfg.valueTagGPR = m_propertyAccesses[i].m_valueTagGPR;
info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR;
#endif
- info.patch.dfg.scratchGPR = m_propertyAccesses[i].m_scratchGPR;
+ m_propertyAccesses[i].m_usedRegisters.copyInfo(info.patch.dfg.usedRegisters);
info.patch.dfg.registersFlushed = m_propertyAccesses[i].m_registerMode == PropertyAccessRecord::RegistersFlushed;
}
diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
index d6374b790..7ff399f78 100644
--- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h
+++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h
@@ -35,6 +35,7 @@
#include "DFGGPRInfo.h"
#include "DFGGraph.h"
#include "DFGRegisterBank.h"
+#include "DFGRegisterSet.h"
#include "JITCode.h"
#include "LinkBuffer.h"
#include "MacroAssembler.h"
@@ -169,7 +170,7 @@ struct PropertyAccessRecord {
MacroAssembler::Label done,
int8_t baseGPR,
int8_t valueGPR,
- int8_t scratchGPR,
+ const RegisterSet& usedRegisters,
RegisterMode registerMode = RegistersInUse)
#elif USE(JSVALUE32_64)
PropertyAccessRecord(
@@ -184,7 +185,7 @@ struct PropertyAccessRecord {
int8_t baseGPR,
int8_t valueTagGPR,
int8_t valueGPR,
- int8_t scratchGPR,
+ const RegisterSet& usedRegisters,
RegisterMode registerMode = RegistersInUse)
#endif
: m_codeOrigin(codeOrigin)
@@ -204,7 +205,7 @@ struct PropertyAccessRecord {
, m_valueTagGPR(valueTagGPR)
#endif
, m_valueGPR(valueGPR)
- , m_scratchGPR(scratchGPR)
+ , m_usedRegisters(usedRegisters)
, m_registerMode(registerMode)
{
}
@@ -226,7 +227,7 @@ struct PropertyAccessRecord {
int8_t m_valueTagGPR;
#endif
int8_t m_valueGPR;
- int8_t m_scratchGPR;
+ RegisterSet m_usedRegisters;
RegisterMode m_registerMode;
};
diff --git a/Source/JavaScriptCore/dfg/DFGNodeType.h b/Source/JavaScriptCore/dfg/DFGNodeType.h
index 8c2f96222..7fcd2ec14 100644
--- a/Source/JavaScriptCore/dfg/DFGNodeType.h
+++ b/Source/JavaScriptCore/dfg/DFGNodeType.h
@@ -109,10 +109,11 @@ namespace JSC { namespace DFG {
/* Property access. */\
/* PutByValAlias indicates a 'put' aliases a prior write to the same property. */\
/* Since a put to 'length' may invalidate optimizations here, */\
- /* this must be the directly subsequent property put. */\
+ /* this must be the directly subsequent property put. Note that PutByVal */\
+ /* opcodes use VarArgs beause they may have up to 4 children. */\
macro(GetByVal, NodeResultJS | NodeMustGenerate | NodeMightClobber) \
- macro(PutByVal, NodeMustGenerate | NodeMightClobber) \
- macro(PutByValAlias, NodeMustGenerate | NodeMightClobber) \
+ macro(PutByVal, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \
+ macro(PutByValAlias, NodeMustGenerate | NodeHasVarArgs | NodeMightClobber) \
macro(GetById, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
macro(GetByIdFlush, NodeResultJS | NodeMustGenerate | NodeClobbersWorld) \
macro(PutById, NodeMustGenerate | NodeClobbersWorld) \
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp
index bbe55d351..03c0666b7 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.cpp
+++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp
@@ -1232,6 +1232,15 @@ size_t DFG_OPERATION operationIsFunction(EncodedJSValue value)
return jsIsFunctionType(JSValue::decode(value));
}
+void DFG_OPERATION operationReallocateStorageAndFinishPut(ExecState* exec, JSObject* base, Structure* structure, PropertyOffset offset, EncodedJSValue value)
+{
+ JSGlobalData& globalData = exec->globalData();
+ ASSERT(structure->outOfLineCapacity() > base->structure()->outOfLineCapacity());
+ ASSERT(!globalData.heap.storageAllocator().fastPathShouldSucceed(structure->outOfLineCapacity() * sizeof(JSValue)));
+ base->setStructureAndReallocateStorageIfNecessary(globalData, structure);
+ base->putDirectOffset(globalData, offset, JSValue::decode(value));
+}
+
double DFG_OPERATION operationFModOnInts(int32_t a, int32_t b)
{
return fmod(a, b);
diff --git a/Source/JavaScriptCore/dfg/DFGOperations.h b/Source/JavaScriptCore/dfg/DFGOperations.h
index 3c85ee761..109dcb2eb 100644
--- a/Source/JavaScriptCore/dfg/DFGOperations.h
+++ b/Source/JavaScriptCore/dfg/DFGOperations.h
@@ -180,6 +180,7 @@ JSCell* DFG_OPERATION operationNewFunctionExpression(ExecState*, JSCell*) WTF_IN
double DFG_OPERATION operationFModOnInts(int32_t, int32_t) WTF_INTERNAL;
size_t DFG_OPERATION operationIsObject(EncodedJSValue) WTF_INTERNAL;
size_t DFG_OPERATION operationIsFunction(EncodedJSValue) WTF_INTERNAL;
+void DFG_OPERATION operationReallocateStorageAndFinishPut(ExecState*, JSObject*, Structure*, PropertyOffset, EncodedJSValue) WTF_INTERNAL;
// This method is used to lookup an exception hander, keyed by faultLocation, which is
// the return location from one of the calls out to one of the helper operations above.
diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
index 320eb6cb6..d23cd8265 100644
--- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
+++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp
@@ -635,9 +635,9 @@ private:
}
case PutByVal:
- changed |= m_graph[node.child1()].mergeFlags(NodeUsedAsValue);
- changed |= m_graph[node.child2()].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
- changed |= m_graph[node.child3()].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[m_graph.varArgChild(node, 0)].mergeFlags(NodeUsedAsValue);
+ changed |= m_graph[m_graph.varArgChild(node, 1)].mergeFlags(NodeUsedAsNumber | NodeUsedAsInt);
+ changed |= m_graph[m_graph.varArgChild(node, 2)].mergeFlags(NodeUsedAsValue);
break;
case PutScopedVar:
diff --git a/Source/JavaScriptCore/dfg/DFGRegisterBank.h b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
index 85dc246f2..1d1d6fa52 100644
--- a/Source/JavaScriptCore/dfg/DFGRegisterBank.h
+++ b/Source/JavaScriptCore/dfg/DFGRegisterBank.h
@@ -226,6 +226,11 @@ public:
return nameAtIndex(BankInfo::toIndex(reg));
}
+ bool isInUse(RegID reg) const
+ {
+ return isLocked(reg) || name(reg) != InvalidVirtualRegister;
+ }
+
#ifndef NDEBUG
void dump()
{
diff --git a/Source/JavaScriptCore/dfg/DFGRegisterSet.h b/Source/JavaScriptCore/dfg/DFGRegisterSet.h
new file mode 100644
index 000000000..bb36359f0
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGRegisterSet.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGRegisterSet_h
+#define DFGRegisterSet_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGFPRInfo.h"
+#include "DFGGPRInfo.h"
+#include <wtf/Bitmap.h>
+
+namespace JSC { namespace DFG {
+
+static const unsigned totalNumberOfRegisters =
+ GPRInfo::numberOfRegisters + FPRInfo::numberOfRegisters;
+
+static const unsigned numberOfBytesInRegisterSet =
+ (totalNumberOfRegisters + 7) >> 3;
+
+typedef uint8_t RegisterSetPOD[numberOfBytesInRegisterSet];
+
+class RegisterSet {
+public:
+ RegisterSet()
+ {
+ for (unsigned i = numberOfBytesInRegisterSet; i--;)
+ m_set[i] = 0;
+ }
+
+ RegisterSet(const RegisterSetPOD& other)
+ {
+ for (unsigned i = numberOfBytesInRegisterSet; i--;)
+ m_set[i] = other[i];
+ }
+
+ const RegisterSetPOD& asPOD() const { return m_set; }
+
+ void copyInfo(RegisterSetPOD& other) const
+ {
+ for (unsigned i = numberOfBytesInRegisterSet; i--;)
+ other[i] = m_set[i];
+ }
+
+ void set(GPRReg reg)
+ {
+ setBit(GPRInfo::toIndex(reg));
+ }
+
+ void setGPRByIndex(unsigned index)
+ {
+ ASSERT(index < GPRInfo::numberOfRegisters);
+ setBit(index);
+ }
+
+ void clear(GPRReg reg)
+ {
+ clearBit(GPRInfo::toIndex(reg));
+ }
+
+ bool get(GPRReg reg) const
+ {
+ return getBit(GPRInfo::toIndex(reg));
+ }
+
+ bool getGPRByIndex(unsigned index) const
+ {
+ ASSERT(index < GPRInfo::numberOfRegisters);
+ return getBit(index);
+ }
+
+ // Return the index'th free GPR.
+ GPRReg getFreeGPR(unsigned index = 0) const
+ {
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (!getGPRByIndex(i) && !index--)
+ return GPRInfo::toRegister(i);
+ }
+ return InvalidGPRReg;
+ }
+
+ void set(FPRReg reg)
+ {
+ setBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ void setFPRByIndex(unsigned index)
+ {
+ ASSERT(index < FPRInfo::numberOfRegisters);
+ setBit(GPRInfo::numberOfRegisters + index);
+ }
+
+ void clear(FPRReg reg)
+ {
+ clearBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ bool get(FPRReg reg) const
+ {
+ return getBit(GPRInfo::numberOfRegisters + FPRInfo::toIndex(reg));
+ }
+
+ bool getFPRByIndex(unsigned index) const
+ {
+ ASSERT(index < FPRInfo::numberOfRegisters);
+ return getBit(GPRInfo::numberOfRegisters + index);
+ }
+
+ template<typename BankInfo>
+ void setByIndex(unsigned index)
+ {
+ set(BankInfo::toRegister(index));
+ }
+
+ template<typename BankInfo>
+ bool getByIndex(unsigned index)
+ {
+ return get(BankInfo::toRegister(index));
+ }
+
+ unsigned numberOfSetGPRs() const
+ {
+ unsigned result = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (!getBit(i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+ unsigned numberOfSetFPRs() const
+ {
+ unsigned result = 0;
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (!getBit(GPRInfo::numberOfRegisters + i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+ unsigned numberOfSetRegisters() const
+ {
+ unsigned result = 0;
+ for (unsigned i = totalNumberOfRegisters; i--;) {
+ if (!getBit(i))
+ continue;
+ result++;
+ }
+ return result;
+ }
+
+private:
+ void setBit(unsigned i)
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ m_set[i >> 3] |= (1 << (i & 7));
+ }
+
+ void clearBit(unsigned i)
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ m_set[i >> 3] &= ~(1 << (i & 7));
+ }
+
+ bool getBit(unsigned i) const
+ {
+ ASSERT(i < totalNumberOfRegisters);
+ return !!(m_set[i >> 3] & (1 << (i & 7)));
+ }
+
+ RegisterSetPOD m_set;
+};
+
+} } // namespace JSC::DFG
+
+#else // ENABLE(DFG_JIT) -> so if DFG is disabled
+
+namespace JSC { namespace DFG {
+
+// Define RegisterSetPOD to something that is a POD, but is otherwise useless,
+// to make it easier to refer to this type in code that may be compiled when
+// the DFG is disabled.
+
+struct RegisterSetPOD { };
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGRegisterSet_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
index e25c6aa27..cfc2cd664 100644
--- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp
+++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp
@@ -29,6 +29,7 @@
#if ENABLE(DFG_JIT)
#include "DFGCCallHelpers.h"
+#include "DFGScratchRegisterAllocator.h"
#include "DFGSpeculativeJIT.h"
#include "DFGThunks.h"
#include "GCAwareJITStubRoutine.h"
@@ -161,11 +162,15 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
bool needToRestoreScratch = false;
if (scratchGPR == InvalidGPRReg) {
+#if USE(JSVALUE64)
scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
stubJit.push(scratchGPR);
needToRestoreScratch = true;
}
@@ -231,13 +236,17 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
bool needToRestoreScratch = false;
MacroAssembler stubJit;
if (scratchGPR == InvalidGPRReg) {
+#if USE(JSVALUE64)
scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR);
+#else
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, resultGPR, resultTagGPR);
+#endif
stubJit.push(scratchGPR);
needToRestoreScratch = true;
}
@@ -384,7 +393,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
GPRReg resultTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg resultGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
CCallHelpers stubJit(globalData, codeBlock);
@@ -404,6 +413,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi
if (slot.cachedPropertyType() == PropertySlot::Getter
|| slot.cachedPropertyType() == PropertySlot::Custom) {
if (slot.cachedPropertyType() == PropertySlot::Getter) {
+ ASSERT(scratchGPR != InvalidGPRReg);
ASSERT(baseGPR != scratchGPR);
if (isInlineOffset(slot.cachedOffset())) {
#if USE(JSVALUE64)
@@ -629,7 +639,7 @@ static void emitPutReplaceStub(
GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
+ GPRReg scratchGPR = RegisterSet(stubInfo.patch.dfg.usedRegisters).getFreeGPR();
bool needToRestoreScratch = false;
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
GPRReg scratchGPR2;
@@ -641,7 +651,11 @@ static void emitPutReplaceStub(
MacroAssembler stubJit;
if (scratchGPR == InvalidGPRReg && (writeBarrierNeeded || isOutOfLineOffset(slot.cachedOffset()))) {
+#if USE(JSVALUE64)
scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
+#else
+ scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, valueTagGPR);
+#endif
needToRestoreScratch = true;
stubJit.push(scratchGPR);
}
@@ -652,7 +666,11 @@ static void emitPutReplaceStub(
MacroAssembler::TrustedImmPtr(structure));
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+#if USE(JSVALUE64)
scratchGPR2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
+#else
+ scratchGPR2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, valueTagGPR, scratchGPR);
+#endif
stubJit.push(scratchGPR2);
SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratchGPR2, WriteBarrierForPropertyAccess);
stubJit.pop(scratchGPR2);
@@ -722,89 +740,203 @@ static void emitPutTransitionStub(
GPRReg valueTagGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueTagGPR);
#endif
GPRReg valueGPR = static_cast<GPRReg>(stubInfo.patch.dfg.valueGPR);
- GPRReg scratchGPR = static_cast<GPRReg>(stubInfo.patch.dfg.scratchGPR);
- bool needToRestoreScratch = false;
-
- ASSERT(scratchGPR != baseGPR);
+
+ ScratchRegisterAllocator allocator(stubInfo.patch.dfg.usedRegisters);
+ allocator.lock(baseGPR);
+#if USE(JSVALUE32_64)
+ allocator.lock(valueTagGPR);
+#endif
+ allocator.lock(valueGPR);
+
+ CCallHelpers stubJit(globalData);
- MacroAssembler stubJit;
+ GPRReg scratchGPR1 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR1 != baseGPR);
+ ASSERT(scratchGPR1 != valueGPR);
+
+ bool needSecondScratch = false;
+ bool needThirdScratch = false;
+#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ needSecondScratch = true;
+#endif
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity()) {
+ needSecondScratch = true;
+ needThirdScratch = true;
+ }
+
+ GPRReg scratchGPR2;
+ if (needSecondScratch) {
+ scratchGPR2 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR2 != baseGPR);
+ ASSERT(scratchGPR2 != valueGPR);
+ ASSERT(scratchGPR2 != scratchGPR1);
+ } else
+ scratchGPR2 = InvalidGPRReg;
+ GPRReg scratchGPR3;
+ if (needThirdScratch) {
+ scratchGPR3 = allocator.allocateScratchGPR();
+ ASSERT(scratchGPR3 != baseGPR);
+ ASSERT(scratchGPR3 != valueGPR);
+ ASSERT(scratchGPR3 != scratchGPR1);
+ ASSERT(scratchGPR3 != scratchGPR2);
+ } else
+ scratchGPR3 = InvalidGPRReg;
+ allocator.preserveReusedRegistersByPushing(stubJit);
+
MacroAssembler::JumpList failureCases;
- if (scratchGPR == InvalidGPRReg) {
- scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR);
- stubJit.push(scratchGPR);
- needToRestoreScratch = true;
- }
-
ASSERT(oldStructure->transitionWatchpointSetHasBeenInvalidated());
failureCases.append(stubJit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseGPR, JSCell::structureOffset()), MacroAssembler::TrustedImmPtr(oldStructure)));
addStructureTransitionCheck(
oldStructure->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR);
+ scratchGPR1);
if (putKind == NotDirect) {
for (WriteBarrier<Structure>* it = prototypeChain->head(); *it; ++it) {
addStructureTransitionCheck(
(*it)->storedPrototype(), exec->codeBlock(), stubInfo, stubJit, failureCases,
- scratchGPR);
+ scratchGPR1);
}
}
#if ENABLE(GGC) || ENABLE(WRITE_BARRIER_PROFILING)
+ ASSERT(needSecondScratch);
+ ASSERT(scratchGPR2 != InvalidGPRReg);
// Must always emit this write barrier as the structure transition itself requires it
- GPRReg scratch2 = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR, scratchGPR);
- stubJit.push(scratch2);
- SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR, scratch2, WriteBarrierForPropertyAccess);
- stubJit.pop(scratch2);
+ SpeculativeJIT::writeBarrier(stubJit, baseGPR, scratchGPR1, scratchGPR2, WriteBarrierForPropertyAccess);
#endif
+
+ MacroAssembler::JumpList slowPath;
+
+ bool scratchGPR1HasStorage = false;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ size_t newSize = structure->outOfLineCapacity() * sizeof(JSValue);
+ CopiedAllocator* copiedAllocator = &globalData->heap.storageAllocator();
+
+ if (!oldStructure->outOfLineCapacity()) {
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.subPtr(MacroAssembler::TrustedImm32(newSize), scratchGPR1);
+ } else {
+ size_t oldSize = oldStructure->outOfLineCapacity() * sizeof(JSValue);
+ ASSERT(newSize > oldSize);
+
+ // Optimistically assume that the old storage was the very last thing
+ // allocated.
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR3);
+ stubJit.loadPtr(&copiedAllocator->m_currentPayloadEnd, scratchGPR2);
+ stubJit.loadPtr(&copiedAllocator->m_currentRemaining, scratchGPR1);
+ stubJit.subPtr(scratchGPR1, scratchGPR2);
+ stubJit.subPtr(MacroAssembler::TrustedImm32(oldSize), scratchGPR2);
+ MacroAssembler::Jump needFullRealloc =
+ stubJit.branchPtr(MacroAssembler::NotEqual, scratchGPR2, scratchGPR3);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize - oldSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.move(scratchGPR2, scratchGPR1);
+ MacroAssembler::Jump doneRealloc = stubJit.jump();
+
+ needFullRealloc.link(&stubJit);
+ slowPath.append(stubJit.branchSubPtr(MacroAssembler::Signed, MacroAssembler::TrustedImm32(newSize), scratchGPR1));
+ stubJit.storePtr(scratchGPR1, &copiedAllocator->m_currentRemaining);
+ stubJit.negPtr(scratchGPR1);
+ stubJit.addPtr(MacroAssembler::AbsoluteAddress(&copiedAllocator->m_currentPayloadEnd), scratchGPR1);
+ stubJit.subPtr(MacroAssembler::TrustedImm32(newSize), scratchGPR1);
+ // We have scratchGPR1 = new storage, scratchGPR3 = old storage, scratchGPR2 = available
+ for (size_t offset = 0; offset < oldSize; offset += sizeof(JSValue)) {
+ stubJit.loadPtr(MacroAssembler::Address(scratchGPR3, offset), scratchGPR2);
+ stubJit.storePtr(scratchGPR2, MacroAssembler::Address(scratchGPR1, offset));
+ }
+
+ doneRealloc.link(&stubJit);
+ }
+
+ stubJit.storePtr(scratchGPR1, MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()));
+ scratchGPR1HasStorage = true;
+ }
stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset()));
#if USE(JSVALUE64)
if (isInlineOffset(slot.cachedOffset()))
stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue)));
else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR);
- stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue)));
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR1);
+ stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue)));
}
#elif USE(JSVALUE32_64)
if (isInlineOffset(slot.cachedOffset())) {
stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
} else {
- stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR);
- stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
- stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
+ if (!scratchGPR1HasStorage)
+ stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR1);
+ stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR1, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)));
+ stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR1, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)));
}
#endif
MacroAssembler::Jump success;
MacroAssembler::Jump failure;
- if (needToRestoreScratch) {
- stubJit.pop(scratchGPR);
+ if (allocator.didReuseRegisters()) {
+ allocator.restoreReusedRegistersByPopping(stubJit);
success = stubJit.jump();
failureCases.link(&stubJit);
- stubJit.pop(scratchGPR);
+ allocator.restoreReusedRegistersByPopping(stubJit);
failure = stubJit.jump();
} else
success = stubJit.jump();
-
+
+ MacroAssembler::Call operationCall;
+ MacroAssembler::Jump successInSlowPath;
+
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ slowPath.link(&stubJit);
+
+ allocator.restoreReusedRegistersByPopping(stubJit);
+ ScratchBuffer* scratchBuffer = globalData->scratchBufferForSize(allocator.desiredScratchBufferSize());
+ allocator.preserveUsedRegistersToScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+#if USE(JSVALUE64)
+ stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR);
+#else
+ stubJit.setupArgumentsWithExecState(baseGPR, MacroAssembler::TrustedImmPtr(structure), MacroAssembler::TrustedImm32(slot.cachedOffset()), valueGPR, valueTagGPR);
+#endif
+ operationCall = stubJit.call();
+ allocator.restoreUsedRegistersFromScratchBuffer(stubJit, scratchBuffer, scratchGPR1);
+ successInSlowPath = stubJit.jump();
+ }
+
LinkBuffer patchBuffer(*globalData, &stubJit, exec->codeBlock());
patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone));
- if (needToRestoreScratch)
+ if (allocator.didReuseRegisters())
patchBuffer.link(failure, failureLabel);
else
patchBuffer.link(failureCases, failureLabel);
+ if (structure->outOfLineCapacity() != oldStructure->outOfLineCapacity()) {
+ patchBuffer.link(operationCall, operationReallocateStorageAndFinishPut);
+ patchBuffer.link(successInSlowPath, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone));
+ }
- stubRoutine = FINALIZE_CODE_FOR_STUB(
- patchBuffer,
- ("DFG PutById transition stub for CodeBlock %p, return point %p",
- exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset(
- stubInfo.patch.dfg.deltaCallToDone).executableAddress()));
+ stubRoutine =
+ createJITStubRoutine(
+ FINALIZE_CODE(
+ patchBuffer,
+ ("DFG PutById transition stub for CodeBlock %p, return point %p",
+ exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset(
+ stubInfo.patch.dfg.deltaCallToDone).executableAddress())),
+ *globalData,
+ exec->codeBlock()->ownerExecutable(),
+ structure->outOfLineCapacity() != oldStructure->outOfLineCapacity(),
+ structure);
}
static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier& ident, const PutPropertySlot& slot, StructureStubInfo& stubInfo, PutKind putKind)
@@ -829,8 +961,11 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier
if (structure->isDictionary())
return false;
- // skip optimizing the case where we need a realloc
- if (oldStructure->outOfLineCapacity() != structure->outOfLineCapacity())
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
return false;
normalizePrototypeChain(exec, baseCell);
@@ -892,8 +1027,11 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi
if (structure->isDictionary())
return false;
- // skip optimizing the case where we need a realloc
- if (oldStructure->outOfLineCapacity() != structure->outOfLineCapacity())
+ // Skip optimizing the case where we need a realloc, if we don't have
+ // enough registers to make it happen.
+ if (GPRInfo::numberOfRegisters < 6
+ && oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()
+ && oldStructure->outOfLineCapacity())
return false;
normalizePrototypeChain(exec, baseCell);
diff --git a/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
new file mode 100644
index 000000000..9a65e8b7d
--- /dev/null
+++ b/Source/JavaScriptCore/dfg/DFGScratchRegisterAllocator.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2012 Apple Inc. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY
+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR
+ * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
+ * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
+ * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
+ * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef DFGScratchRegisterAllocator_h
+#define DFGScratchRegisterAllocator_h
+
+#include <wtf/Platform.h>
+
+#if ENABLE(DFG_JIT)
+
+#include "DFGRegisterSet.h"
+#include "MacroAssembler.h"
+
+namespace JSC { namespace DFG {
+
+// This class provides a low-level register allocator for use in stubs.
+
+class ScratchRegisterAllocator {
+public:
+ ScratchRegisterAllocator(const RegisterSet& usedRegisters)
+ : m_usedRegisters(usedRegisters)
+ , m_didReuseRegisters(false)
+ {
+ }
+
+ template<typename T>
+ void lock(T reg) { m_lockedRegisters.set(reg); }
+
+ template<typename BankInfo>
+ typename BankInfo::RegisterType allocateScratch()
+ {
+ // First try to allocate a register that is totally free.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg)
+ && !m_usedRegisters.get(reg)
+ && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ return reg;
+ }
+ }
+
+ // Since that failed, try to allocate a register that is not yet
+ // locked or used for scratch.
+ for (unsigned i = 0; i < BankInfo::numberOfRegisters; ++i) {
+ typename BankInfo::RegisterType reg = BankInfo::toRegister(i);
+ if (!m_lockedRegisters.get(reg) && !m_scratchRegisters.get(reg)) {
+ m_scratchRegisters.set(reg);
+ m_didReuseRegisters = true;
+ return reg;
+ }
+ }
+
+ // We failed.
+ CRASH();
+ // Make some silly compilers happy.
+ return static_cast<typename BankInfo::RegisterType>(-1);
+ }
+
+ GPRReg allocateScratchGPR() { return allocateScratch<GPRInfo>(); }
+ FPRReg allocateScratchFPR() { return allocateScratch<FPRInfo>(); }
+
+ bool didReuseRegisters() const
+ {
+ return m_didReuseRegisters;
+ }
+
+ void preserveReusedRegistersByPushing(MacroAssembler& jit)
+ {
+ if (!m_didReuseRegisters)
+ return;
+
+ for (unsigned i = 0; i < FPRInfo::numberOfRegisters; ++i) {
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i)) {
+ jit.subPtr(MacroAssembler::TrustedImm32(8), MacroAssembler::stackPointerRegister);
+ jit.storeDouble(FPRInfo::toRegister(i), MacroAssembler::stackPointerRegister);
+ }
+ }
+ for (unsigned i = 0; i < GPRInfo::numberOfRegisters; ++i) {
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
+ jit.push(GPRInfo::toRegister(i));
+ }
+ }
+
+ void restoreReusedRegistersByPopping(MacroAssembler& jit)
+ {
+ if (!m_didReuseRegisters)
+ return;
+
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_scratchRegisters.getGPRByIndex(i) && m_usedRegisters.getGPRByIndex(i))
+ jit.pop(GPRInfo::toRegister(i));
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_scratchRegisters.getFPRByIndex(i) && m_usedRegisters.getFPRByIndex(i)) {
+ jit.loadDouble(MacroAssembler::stackPointerRegister, FPRInfo::toRegister(i));
+ jit.addPtr(MacroAssembler::TrustedImm32(8), MacroAssembler::stackPointerRegister);
+ }
+ }
+ }
+
+ unsigned desiredScratchBufferSize() const { return m_usedRegisters.numberOfSetRegisters() * sizeof(JSValue); }
+
+ void preserveUsedRegistersToScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
+ {
+ unsigned count = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getGPRByIndex(i))
+ jit.storePtr(GPRInfo::toRegister(i), scratchBuffer->m_buffer + (count++));
+ if (scratchGPR == InvalidGPRReg && !m_lockedRegisters.getGPRByIndex(i) && !m_scratchRegisters.getGPRByIndex(i))
+ scratchGPR = GPRInfo::toRegister(i);
+ }
+ ASSERT(scratchGPR != InvalidGPRReg);
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getFPRByIndex(i)) {
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.storeDouble(FPRInfo::toRegister(i), scratchGPR);
+ }
+ }
+ ASSERT(count * sizeof(JSValue) == desiredScratchBufferSize());
+
+ jit.move(MacroAssembler::TrustedImmPtr(&scratchBuffer->m_activeLength), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(static_cast<size_t>(count * sizeof(JSValue))), scratchGPR);
+ }
+
+ void restoreUsedRegistersFromScratchBuffer(MacroAssembler& jit, ScratchBuffer* scratchBuffer, GPRReg scratchGPR = InvalidGPRReg)
+ {
+ if (scratchGPR == InvalidGPRReg) {
+ // Find a scratch register.
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_lockedRegisters.getGPRByIndex(i) || m_scratchRegisters.getGPRByIndex(i))
+ continue;
+ scratchGPR = GPRInfo::toRegister(i);
+ break;
+ }
+ }
+ ASSERT(scratchGPR != InvalidGPRReg);
+
+ jit.move(MacroAssembler::TrustedImmPtr(&scratchBuffer->m_activeLength), scratchGPR);
+ jit.storePtr(MacroAssembler::TrustedImmPtr(0), scratchGPR);
+
+ // Restore double registers first.
+ unsigned count = m_usedRegisters.numberOfSetGPRs();
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getFPRByIndex(i)) {
+ jit.move(MacroAssembler::TrustedImmPtr(scratchBuffer->m_buffer + (count++)), scratchGPR);
+ jit.loadDouble(scratchGPR, FPRInfo::toRegister(i));
+ }
+ }
+
+ count = 0;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ if (m_usedRegisters.getGPRByIndex(i))
+ jit.loadPtr(scratchBuffer->m_buffer + (count++), GPRInfo::toRegister(i));
+ }
+ }
+
+private:
+ RegisterSet m_usedRegisters;
+ RegisterSet m_lockedRegisters;
+ RegisterSet m_scratchRegisters;
+ bool m_didReuseRegisters;
+};
+
+} } // namespace JSC::DFG
+
+#endif // ENABLE(DFG_JIT)
+
+#endif // DFGScratchRegisterAllocator_h
+
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
index c6ec62129..e8824b832 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp
@@ -1911,8 +1911,8 @@ void SpeculativeJIT::compileGetByValOnIntTypedArray(const TypedArrayDescriptor&
void SpeculativeJIT::compilePutByValForIntTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements, TypedArraySignedness signedness, TypedArrayRounding rounding)
{
- Edge baseUse = node.child1();
- Edge valueUse = node.child3();
+ Edge baseUse = m_jit.graph().varArgChild(node, 0);
+ Edge valueUse = m_jit.graph().varArgChild(node, 2);
if (speculationRequirements != NoTypedArrayTypeSpecCheck)
speculationCheck(BadType, JSValueSource::unboxedCell(base), baseUse, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(base, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(descriptor.m_classInfo)));
@@ -2052,8 +2052,8 @@ void SpeculativeJIT::compileGetByValOnFloatTypedArray(const TypedArrayDescriptor
void SpeculativeJIT::compilePutByValForFloatTypedArray(const TypedArrayDescriptor& descriptor, GPRReg base, GPRReg property, Node& node, size_t elementSize, TypedArraySpeculationRequirements speculationRequirements)
{
- Edge baseUse = node.child1();
- Edge valueUse = node.child3();
+ Edge baseUse = m_jit.graph().varArgChild(node, 0);
+ Edge valueUse = m_jit.graph().varArgChild(node, 2);
SpeculateDoubleOperand valueOp(this, valueUse);
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
index 28d8033cb..487addd7f 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h
@@ -272,6 +272,22 @@ public:
{
use(nodeUse.index());
}
+
+ RegisterSet usedRegisters()
+ {
+ RegisterSet result;
+ for (unsigned i = GPRInfo::numberOfRegisters; i--;) {
+ GPRReg gpr = GPRInfo::toRegister(i);
+ if (m_gprs.isInUse(gpr))
+ result.set(gpr);
+ }
+ for (unsigned i = FPRInfo::numberOfRegisters; i--;) {
+ FPRReg fpr = FPRInfo::toRegister(i);
+ if (m_fprs.isInUse(fpr))
+ result.set(fpr);
+ }
+ return result;
+ }
static void markCellCard(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2);
static void writeBarrier(MacroAssembler&, GPRReg ownerGPR, GPRReg scratchGPR1, GPRReg scratchGPR2, WriteBarrierUseKind);
@@ -942,10 +958,10 @@ public:
void nonSpeculativeUInt32ToNumber(Node&);
#if USE(JSVALUE64)
- void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetById(CodeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedPutById(CodeOrigin, GPRReg base, GPRReg value, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#elif USE(JSVALUE32_64)
- void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
+ void cachedGetById(CodeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget = JITCompiler::Jump(), SpillRegistersMode = NeedToSpill);
void cachedPutById(CodeOrigin, GPRReg basePayloadGPR, GPRReg valueTagGPR, GPRReg valuePayloadGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind, JITCompiler::Jump slowPathTarget = JITCompiler::Jump());
#endif
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
index ec2377389..ed98e0800 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp
@@ -503,7 +503,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(resultTag.gpr(), resultPayload.gpr(), m_compileIndex);
}
-void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNone, GPRReg basePayloadGPR, GPRReg resultTagGPR, GPRReg resultPayloadGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
@@ -553,7 +553,7 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNon
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
tagLoadWithPatch, payloadLoadWithPatch, slowPath.get(), doneLabel,
safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR),
- safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR),
+ safeCast<int8_t>(resultPayloadGPR), usedRegisters(),
spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
addSlowPathGenerator(slowPath.release());
}
@@ -595,6 +595,11 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
slowCases, this, optimizedCall, NoResult, valueTagGPR, valuePayloadGPR,
basePayloadGPR, identifier(identifierNumber));
}
+ RegisterSet currentlyUsedRegisters = usedRegisters();
+ currentlyUsedRegisters.clear(scratchGPR);
+ ASSERT(currentlyUsedRegisters.get(basePayloadGPR));
+ ASSERT(currentlyUsedRegisters.get(valueTagGPR));
+ ASSERT(currentlyUsedRegisters.get(valuePayloadGPR));
m_jit.addPropertyAccess(
PropertyAccessRecord(
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
@@ -602,7 +607,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR,
JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()),
slowPath.get(), doneLabel, safeCast<int8_t>(basePayloadGPR),
safeCast<int8_t>(valueTagGPR), safeCast<int8_t>(valuePayloadGPR),
- safeCast<int8_t>(scratchGPR)));
+ usedRegisters()));
addSlowPathGenerator(slowPath.release());
}
@@ -2471,17 +2476,21 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByVal: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- if (!at(node.child2()).shouldSpeculateInteger()
- || !isActionableMutableArraySpeculation(at(node.child1()).prediction())
- || at(node.child1()).shouldSpeculateArguments()) {
- SpeculateCellOperand base(this, node.child1()); // Save a register, speculate cell. We'll probably be right.
- JSValueOperand property(this, node.child2());
- JSValueOperand value(this, node.child3());
+ if (!at(child2).shouldSpeculateInteger()
+ || !isActionableMutableArraySpeculation(at(child1).prediction())
+ || at(child1).shouldSpeculateArguments()) {
+ SpeculateCellOperand base(this, child1); // Save a register, speculate cell. We'll probably be right.
+ JSValueOperand property(this, child2);
+ JSValueOperand value(this, child3);
GPRReg baseGPR = base.gpr();
GPRReg propertyTagGPR = property.tagGPR();
GPRReg propertyPayloadGPR = property.payloadGPR();
@@ -2495,74 +2504,74 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateInt8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
+ if (at(child1).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this);
// Map base, property & value into registers, allocate a scratch register.
@@ -2575,12 +2584,12 @@ void SpeculativeJIT::compile(Node& node)
if (!m_compileOkay)
return;
- writeBarrier(baseReg, valueTagReg, node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(baseReg, valueTagReg, child3, WriteBarrierForPropertyAccess, scratchReg);
// Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
// If we have predicted the base to be type array, we can skip the check.
- if (!isArraySpeculation(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ if (!isArraySpeculation(m_state.forNode(child1).m_type))
+ speculationCheck(BadType, JSValueSource::unboxedCell(baseReg), child1, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
base.use();
property.use();
@@ -2620,89 +2629,93 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByValAlias: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- ASSERT(isActionableMutableArraySpeculation(at(node.child1()).prediction()));
- ASSERT(at(node.child2()).shouldSpeculateInteger());
+ ASSERT(isActionableMutableArraySpeculation(at(child1).prediction()));
+ ASSERT(at(child2).shouldSpeculateInteger());
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
- if (at(node.child1()).shouldSpeculateInt8Array()) {
+ if (at(child1).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
+ if (at(child1).shouldSpeculateInt16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
+ if (at(child1).shouldSpeculateInt32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
+ if (at(child1).shouldSpeculateUint8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
+ if (at(child1).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
+ if (at(child1).shouldSpeculateUint32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ if (at(child1).shouldSpeculateFloat32Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ if (at(child1).shouldSpeculateFloat64Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this, base);
GPRReg baseReg = base.gpr();
GPRReg scratchReg = scratch.gpr();
- writeBarrier(baseReg, value.tagGPR(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(baseReg, value.tagGPR(), child3, WriteBarrierForPropertyAccess, scratchReg);
// Get the array storage.
GPRReg storageReg = scratchReg;
@@ -3290,16 +3303,10 @@ void SpeculativeJIT::compile(Node& node)
GPRReg baseGPR = base.gpr();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR;
-
- if (resultTagGPR == baseGPR)
- scratchGPR = resultPayloadGPR;
- else
- scratchGPR = resultTagGPR;
-
+
base.use();
- cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber());
+ cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber());
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3313,18 +3320,12 @@ void SpeculativeJIT::compile(Node& node)
GPRReg basePayloadGPR = base.payloadGPR();
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR;
-
- if (resultTagGPR == basePayloadGPR)
- scratchGPR = resultPayloadGPR;
- else
- scratchGPR = resultTagGPR;
base.use();
JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
- cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell);
+ cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber(), notCell);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3346,13 +3347,11 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseGPR, resultTagGPR, resultPayloadGPR);
-
base.use();
flushRegisters();
- cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
+ cachedGetById(node.codeOrigin, InvalidGPRReg, baseGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3367,15 +3366,13 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultTagGPR = resultTag.gpr();
GPRReg resultPayloadGPR = resultPayload.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR);
-
base.use();
flushRegisters();
JITCompiler::Jump notCell = m_jit.branch32(JITCompiler::NotEqual, baseTagGPR, TrustedImm32(JSValue::CellTag));
- cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill);
+ cachedGetById(node.codeOrigin, baseTagGPR, basePayloadGPR, resultTagGPR, resultPayloadGPR, node.identifierNumber(), notCell, DontSpill);
jsValueResult(resultTagGPR, resultPayloadGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
index b5058e35a..9e468e758 100644
--- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
+++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp
@@ -492,7 +492,7 @@ void SpeculativeJIT::nonSpeculativeUInt32ToNumber(Node& node)
jsValueResult(result.gpr(), m_compileIndex);
}
-void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, GPRReg scratchGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
+void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg resultGPR, unsigned identifierNumber, JITCompiler::Jump slowPathTarget, SpillRegistersMode spillMode)
{
JITCompiler::DataLabelPtr structureToCompare;
JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1)));
@@ -520,13 +520,9 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
PropertyAccessRecord(
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, loadWithPatch,
slowPath.get(), doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR),
- safeCast<int8_t>(scratchGPR),
+ usedRegisters(),
spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed));
addSlowPathGenerator(slowPath.release());
-
-
- if (scratchGPR != resultGPR && scratchGPR != InvalidGPRReg && spillMode == NeedToSpill)
- unlock(scratchGPR);
}
void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg valueGPR, Edge valueUse, GPRReg scratchGPR, unsigned identifierNumber, PutKind putKind, JITCompiler::Jump slowPathTarget)
@@ -568,11 +564,15 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg
slowCases, this, optimizedCall, NoResult, valueGPR, baseGPR,
identifier(identifierNumber));
}
+ RegisterSet currentlyUsedRegisters = usedRegisters();
+ currentlyUsedRegisters.clear(scratchGPR);
+ ASSERT(currentlyUsedRegisters.get(baseGPR));
+ ASSERT(currentlyUsedRegisters.get(valueGPR));
m_jit.addPropertyAccess(
PropertyAccessRecord(
codeOrigin, structureToCompare, structureCheck, propertyStorageLoad,
JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel,
- safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR)));
+ safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), currentlyUsedRegisters));
addSlowPathGenerator(slowPath.release());
}
@@ -2501,15 +2501,19 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByVal: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- if (!at(node.child2()).shouldSpeculateInteger() || !isActionableMutableArraySpeculation(at(node.child1()).prediction())) {
- JSValueOperand arg1(this, node.child1());
- JSValueOperand arg2(this, node.child2());
- JSValueOperand arg3(this, node.child3());
+ if (!at(child2).shouldSpeculateInteger() || !isActionableMutableArraySpeculation(at(child1).prediction())) {
+ JSValueOperand arg1(this, child1);
+ JSValueOperand arg2(this, child2);
+ JSValueOperand arg3(this, child3);
GPRReg arg1GPR = arg1.gpr();
GPRReg arg2GPR = arg2.gpr();
GPRReg arg3GPR = arg3.gpr();
@@ -2521,12 +2525,12 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateArguments()) {
- JSValueOperand value(this, node.child3());
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
+ if (at(child1).shouldSpeculateArguments()) {
+ JSValueOperand value(this, child3);
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
GPRTemporary scratch(this);
GPRTemporary scratch2(this);
@@ -2539,9 +2543,9 @@ void SpeculativeJIT::compile(Node& node)
if (!m_compileOkay)
return;
- if (!isArgumentsSpeculation(m_state.forNode(node.child1()).m_type)) {
+ if (!isArgumentsSpeculation(m_state.forNode(child1).m_type)) {
speculationCheck(
- BadType, JSValueSource::unboxedCell(baseReg), node.child1(),
+ BadType, JSValueSource::unboxedCell(baseReg), child1,
m_jit.branchPtr(
MacroAssembler::NotEqual,
MacroAssembler::Address(baseReg, JSCell::classInfoOffset()),
@@ -2582,70 +2586,70 @@ void SpeculativeJIT::compile(Node& node)
break;
}
- if (at(node.child1()).shouldSpeculateInt8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), isInt8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), isInt16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
+ if (at(child1).shouldSpeculateInt32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), isInt32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint8Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), isUint8ClampedArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray, ClampRounding);
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint16Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), isUint16ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
- compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
+ if (at(child1).shouldSpeculateUint32Array()) {
+ compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), isUint32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat32Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), isFloat32ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
- compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(node.child1()).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
+ if (at(child1).shouldSpeculateFloat64Array()) {
+ compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), isFloat64ArraySpeculation(m_state.forNode(child1).m_type) ? NoTypedArrayTypeSpecCheck : AllTypedArraySpecChecks);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this);
// Map base, property & value into registers, allocate a scratch register.
@@ -2657,12 +2661,12 @@ void SpeculativeJIT::compile(Node& node)
if (!m_compileOkay)
return;
- writeBarrier(baseReg, value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(baseReg, value.gpr(), child3, WriteBarrierForPropertyAccess, scratchReg);
// Check that base is an array, and that property is contained within m_vector (< m_vectorLength).
// If we have predicted the base to be type array, we can skip the check.
- if (!isArraySpeculation(m_state.forNode(node.child1()).m_type))
- speculationCheck(BadType, JSValueRegs(baseReg), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
+ if (!isArraySpeculation(m_state.forNode(child1).m_type))
+ speculationCheck(BadType, JSValueRegs(baseReg), child1, m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(baseReg, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info)));
base.use();
property.use();
@@ -2701,88 +2705,92 @@ void SpeculativeJIT::compile(Node& node)
}
case PutByValAlias: {
- if (!at(node.child1()).prediction() || !at(node.child2()).prediction()) {
+ Edge child1 = m_jit.graph().varArgChild(node, 0);
+ Edge child2 = m_jit.graph().varArgChild(node, 1);
+ Edge child3 = m_jit.graph().varArgChild(node, 2);
+
+ if (!at(child1).prediction() || !at(child2).prediction()) {
terminateSpeculativeExecution(InadequateCoverage, JSValueRegs(), NoNode);
break;
}
- ASSERT(isActionableMutableArraySpeculation(at(node.child1()).prediction()));
- ASSERT(at(node.child2()).shouldSpeculateInteger());
+ ASSERT(isActionableMutableArraySpeculation(at(child1).prediction()));
+ ASSERT(at(child2).shouldSpeculateInteger());
- SpeculateCellOperand base(this, node.child1());
- SpeculateStrictInt32Operand property(this, node.child2());
- if (at(node.child1()).shouldSpeculateInt8Array()) {
+ SpeculateCellOperand base(this, child1);
+ SpeculateStrictInt32Operand property(this, child2);
+ if (at(child1).shouldSpeculateInt8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int8_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt16Array()) {
+ if (at(child1).shouldSpeculateInt16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int16_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateInt32Array()) {
+ if (at(child1).shouldSpeculateInt32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->int32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(int32_t), NoTypedArraySpecCheck, SignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8Array()) {
+ if (at(child1).shouldSpeculateUint8Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint8ClampedArray()) {
+ if (at(child1).shouldSpeculateUint8ClampedArray()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint8ClampedArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint8_t), NoTypedArraySpecCheck, UnsignedTypedArray, ClampRounding);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint16Array()) {
+ if (at(child1).shouldSpeculateUint16Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint16ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint16_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateUint32Array()) {
+ if (at(child1).shouldSpeculateUint32Array()) {
compilePutByValForIntTypedArray(m_jit.globalData()->uint32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(uint32_t), NoTypedArraySpecCheck, UnsignedTypedArray);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat32Array()) {
+ if (at(child1).shouldSpeculateFloat32Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float32ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(float), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- if (at(node.child1()).shouldSpeculateFloat64Array()) {
+ if (at(child1).shouldSpeculateFloat64Array()) {
compilePutByValForFloatTypedArray(m_jit.globalData()->float64ArrayDescriptor(), base.gpr(), property.gpr(), node, sizeof(double), NoTypedArraySpecCheck);
if (!m_compileOkay)
return;
break;
}
- ASSERT(at(node.child1()).shouldSpeculateArray());
+ ASSERT(at(child1).shouldSpeculateArray());
- JSValueOperand value(this, node.child3());
+ JSValueOperand value(this, child3);
GPRTemporary scratch(this);
GPRReg baseReg = base.gpr();
GPRReg scratchReg = scratch.gpr();
- writeBarrier(base.gpr(), value.gpr(), node.child3(), WriteBarrierForPropertyAccess, scratchReg);
+ writeBarrier(base.gpr(), value.gpr(), child3, WriteBarrierForPropertyAccess, scratchReg);
// Get the array storage.
GPRReg storageReg = scratchReg;
@@ -3319,16 +3327,10 @@ void SpeculativeJIT::compile(Node& node)
GPRReg baseGPR = base.gpr();
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR;
-
- if (resultGPR == baseGPR)
- scratchGPR = tryAllocate();
- else
- scratchGPR = resultGPR;
base.use();
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber());
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber());
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3339,18 +3341,12 @@ void SpeculativeJIT::compile(Node& node)
GPRReg baseGPR = base.gpr();
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR;
-
- if (resultGPR == baseGPR)
- scratchGPR = tryAllocate();
- else
- scratchGPR = resultGPR;
base.use();
JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell);
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell);
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
@@ -3371,13 +3367,11 @@ void SpeculativeJIT::compile(Node& node)
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR);
-
base.use();
flushRegisters();
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), JITCompiler::Jump(), DontSpill);
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
break;
@@ -3389,14 +3383,12 @@ void SpeculativeJIT::compile(Node& node)
GPRResult result(this);
GPRReg resultGPR = result.gpr();
- GPRReg scratchGPR = selectScratchGPR(baseGPR, resultGPR);
-
base.use();
flushRegisters();
JITCompiler::Jump notCell = m_jit.branchTestPtr(JITCompiler::NonZero, baseGPR, GPRInfo::tagMaskRegister);
- cachedGetById(node.codeOrigin, baseGPR, resultGPR, scratchGPR, node.identifierNumber(), notCell, DontSpill);
+ cachedGetById(node.codeOrigin, baseGPR, resultGPR, node.identifierNumber(), notCell, DontSpill);
jsValueResult(resultGPR, m_compileIndex, UseChildrenCalledExplicitly);
diff --git a/Source/JavaScriptCore/heap/CopiedAllocator.h b/Source/JavaScriptCore/heap/CopiedAllocator.h
index 7455ec816..32b84f008 100644
--- a/Source/JavaScriptCore/heap/CopiedAllocator.h
+++ b/Source/JavaScriptCore/heap/CopiedAllocator.h
@@ -27,72 +27,128 @@
#define CopiedAllocator_h
#include "CopiedBlock.h"
+#include <wtf/CheckedBoolean.h>
+#include <wtf/DataLog.h>
namespace JSC {
class CopiedAllocator {
- friend class JIT;
public:
CopiedAllocator();
- void* allocate(size_t);
- bool fitsInCurrentBlock(size_t);
- bool wasLastAllocation(void*, size_t);
- void startedCopying();
- void resetCurrentBlock(CopiedBlock*);
+
+ bool fastPathShouldSucceed(size_t bytes) const;
+ CheckedBoolean tryAllocate(size_t bytes, void** outPtr);
+ CheckedBoolean tryReallocate(void *oldPtr, size_t oldBytes, size_t newBytes);
+ void* forceAllocate(size_t bytes);
+ CopiedBlock* resetCurrentBlock();
+ void setCurrentBlock(CopiedBlock*);
size_t currentCapacity();
+
+ bool isValid() { return !!m_currentBlock; }
-private:
CopiedBlock* currentBlock() { return m_currentBlock; }
- char* m_currentOffset;
+ // Yes, these are public. No, that doesn't mean you can play with them.
+ // If I had made them private then I'd have to list off all of the JIT
+ // classes and functions that are entitled to modify these directly, and
+ // that would have been gross.
+ size_t m_currentRemaining;
+ char* m_currentPayloadEnd;
CopiedBlock* m_currentBlock;
};
inline CopiedAllocator::CopiedAllocator()
- : m_currentOffset(0)
+ : m_currentRemaining(0)
+ , m_currentPayloadEnd(0)
, m_currentBlock(0)
{
}
-inline void* CopiedAllocator::allocate(size_t bytes)
+inline bool CopiedAllocator::fastPathShouldSucceed(size_t bytes) const
{
- ASSERT(m_currentOffset);
ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes)));
- ASSERT(fitsInCurrentBlock(bytes));
- void* ptr = static_cast<void*>(m_currentOffset);
- m_currentOffset += bytes;
- ASSERT(is8ByteAligned(ptr));
- return ptr;
+
+ return bytes <= m_currentRemaining;
}
-inline bool CopiedAllocator::fitsInCurrentBlock(size_t bytes)
+inline CheckedBoolean CopiedAllocator::tryAllocate(size_t bytes, void** outPtr)
{
- return m_currentOffset + bytes < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize && m_currentOffset + bytes > m_currentOffset;
+ ASSERT(is8ByteAligned(reinterpret_cast<void*>(bytes)));
+
+ // This code is written in a gratuitously low-level manner, in order to
+ // serve as a kind of template for what the JIT would do. Note that the
+ // way it's written it ought to only require one register, which doubles
+ // as the result, provided that the compiler does a minimal amount of
+ // control flow simplification and the bytes argument is a constant.
+
+ size_t currentRemaining = m_currentRemaining;
+ if (bytes > currentRemaining)
+ return false;
+ currentRemaining -= bytes;
+ m_currentRemaining = currentRemaining;
+ *outPtr = m_currentPayloadEnd - currentRemaining - bytes;
+
+ ASSERT(is8ByteAligned(*outPtr));
+
+ return true;
+}
+
+inline CheckedBoolean CopiedAllocator::tryReallocate(
+ void* oldPtr, size_t oldBytes, size_t newBytes)
+{
+ ASSERT(is8ByteAligned(oldPtr));
+ ASSERT(is8ByteAligned(reinterpret_cast<void*>(oldBytes)));
+ ASSERT(is8ByteAligned(reinterpret_cast<void*>(newBytes)));
+
+ ASSERT(newBytes > oldBytes);
+
+ size_t additionalBytes = newBytes - oldBytes;
+
+ size_t currentRemaining = m_currentRemaining;
+ if (m_currentPayloadEnd - currentRemaining - oldBytes != static_cast<char*>(oldPtr))
+ return false;
+
+ if (additionalBytes > currentRemaining)
+ return false;
+
+ m_currentRemaining = currentRemaining - additionalBytes;
+
+ return true;
}
-inline bool CopiedAllocator::wasLastAllocation(void* ptr, size_t size)
+inline void* CopiedAllocator::forceAllocate(size_t bytes)
{
- return static_cast<char*>(ptr) + size == m_currentOffset && ptr > m_currentBlock && ptr < reinterpret_cast<char*>(m_currentBlock) + HeapBlock::s_blockSize;
+ void* result = 0; // Needed because compilers don't realize this will always be assigned.
+ CheckedBoolean didSucceed = tryAllocate(bytes, &result);
+ ASSERT(didSucceed);
+ return result;
}
-inline void CopiedAllocator::startedCopying()
+inline CopiedBlock* CopiedAllocator::resetCurrentBlock()
{
- if (m_currentBlock)
- m_currentBlock->m_offset = static_cast<void*>(m_currentOffset);
- m_currentOffset = 0;
- m_currentBlock = 0;
+ CopiedBlock* result = m_currentBlock;
+ if (result) {
+ result->m_remaining = m_currentRemaining;
+ m_currentBlock = 0;
+ m_currentRemaining = 0;
+ m_currentPayloadEnd = 0;
+ }
+ return result;
}
-inline void CopiedAllocator::resetCurrentBlock(CopiedBlock* newBlock)
+inline void CopiedAllocator::setCurrentBlock(CopiedBlock* newBlock)
{
- if (m_currentBlock)
- m_currentBlock->m_offset = static_cast<void*>(m_currentOffset);
+ ASSERT(!m_currentBlock);
m_currentBlock = newBlock;
- m_currentOffset = static_cast<char*>(newBlock->m_offset);
+ ASSERT(newBlock);
+ m_currentRemaining = newBlock->m_remaining;
+ m_currentPayloadEnd = newBlock->payloadEnd();
}
inline size_t CopiedAllocator::currentCapacity()
{
+ if (!m_currentBlock)
+ return 0;
return m_currentBlock->capacity();
}
diff --git a/Source/JavaScriptCore/heap/CopiedBlock.h b/Source/JavaScriptCore/heap/CopiedBlock.h
index 5ed58008e..6717a6835 100644
--- a/Source/JavaScriptCore/heap/CopiedBlock.h
+++ b/Source/JavaScriptCore/heap/CopiedBlock.h
@@ -42,15 +42,30 @@ public:
static CopiedBlock* createNoZeroFill(const PageAllocationAligned&);
static PageAllocationAligned destroy(CopiedBlock*);
+ // The payload is the region of the block that is usable for allocations.
char* payload();
+ char* payloadEnd();
+ size_t payloadCapacity();
+
+ // The data is the region of the block that has been used for allocations.
+ char* data();
+ char* dataEnd();
+ size_t dataSize();
+
+ // The wilderness is the region of the block that is usable for allocations
+ // but has not been so used.
+ char* wilderness();
+ char* wildernessEnd();
+ size_t wildernessSize();
+
size_t size();
size_t capacity();
private:
CopiedBlock(const PageAllocationAligned&);
- void zeroFillToEnd(); // Can be called at any time to zero-fill to the end of the block.
+ void zeroFillWilderness(); // Can be called at any time to zero-fill to the end of the block.
- void* m_offset;
+ size_t m_remaining;
uintptr_t m_isPinned;
};
@@ -62,19 +77,18 @@ inline CopiedBlock* CopiedBlock::createNoZeroFill(const PageAllocationAligned& a
inline CopiedBlock* CopiedBlock::create(const PageAllocationAligned& allocation)
{
CopiedBlock* block = createNoZeroFill(allocation);
- block->zeroFillToEnd();
+ block->zeroFillWilderness();
return block;
}
-inline void CopiedBlock::zeroFillToEnd()
+inline void CopiedBlock::zeroFillWilderness()
{
#if USE(JSVALUE64)
- char* offset = static_cast<char*>(m_offset);
- memset(static_cast<void*>(offset), 0, static_cast<size_t>((reinterpret_cast<char*>(this) + m_allocation.size()) - offset));
+ memset(wilderness(), 0, wildernessSize());
#else
JSValue emptyValue;
- JSValue* limit = reinterpret_cast_ptr<JSValue*>(reinterpret_cast<char*>(this) + m_allocation.size());
- for (JSValue* currentValue = reinterpret_cast<JSValue*>(m_offset); currentValue < limit; currentValue++)
+ JSValue* limit = reinterpret_cast_ptr<JSValue*>(wildernessEnd());
+ for (JSValue* currentValue = reinterpret_cast<JSValue*>(wilderness()); currentValue < limit; currentValue++)
*currentValue = emptyValue;
#endif
}
@@ -90,10 +104,10 @@ inline PageAllocationAligned CopiedBlock::destroy(CopiedBlock* block)
inline CopiedBlock::CopiedBlock(const PageAllocationAligned& allocation)
: HeapBlock(allocation)
- , m_offset(payload())
+ , m_remaining(payloadCapacity())
, m_isPinned(false)
{
- ASSERT(is8ByteAligned(static_cast<void*>(m_offset)));
+ ASSERT(is8ByteAligned(reinterpret_cast<void*>(m_remaining)));
}
inline char* CopiedBlock::payload()
@@ -101,9 +115,49 @@ inline char* CopiedBlock::payload()
return reinterpret_cast<char*>(this) + ((sizeof(CopiedBlock) + 7) & ~7);
}
+inline char* CopiedBlock::payloadEnd()
+{
+ return reinterpret_cast<char*>(this) + m_allocation.size();
+}
+
+inline size_t CopiedBlock::payloadCapacity()
+{
+ return payloadEnd() - payload();
+}
+
+inline char* CopiedBlock::data()
+{
+ return payload();
+}
+
+inline char* CopiedBlock::dataEnd()
+{
+ return payloadEnd() - m_remaining;
+}
+
+inline size_t CopiedBlock::dataSize()
+{
+ return dataEnd() - data();
+}
+
+inline char* CopiedBlock::wilderness()
+{
+ return dataEnd();
+}
+
+inline char* CopiedBlock::wildernessEnd()
+{
+ return payloadEnd();
+}
+
+inline size_t CopiedBlock::wildernessSize()
+{
+ return wildernessEnd() - wilderness();
+}
+
inline size_t CopiedBlock::size()
{
- return static_cast<size_t>(static_cast<char*>(m_offset) - payload());
+ return dataSize();
}
inline size_t CopiedBlock::capacity()
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.cpp b/Source/JavaScriptCore/heap/CopiedSpace.cpp
index 9eb70a556..147dfa4b3 100644
--- a/Source/JavaScriptCore/heap/CopiedSpace.cpp
+++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp
@@ -71,8 +71,7 @@ CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr)
allocateBlock();
- *outPtr = m_allocator.allocate(bytes);
- ASSERT(*outPtr);
+ *outPtr = m_allocator.forceAllocate(bytes);
return true;
}
@@ -93,7 +92,10 @@ CheckedBoolean CopiedSpace::tryAllocateOversize(size_t bytes, void** outPtr)
m_blockFilter.add(reinterpret_cast<Bits>(block));
m_blockSet.add(block);
- *outPtr = allocateFromBlock(block, bytes);
+ CopiedAllocator allocator;
+ allocator.setCurrentBlock(block);
+ *outPtr = allocator.forceAllocate(bytes);
+ allocator.resetCurrentBlock();
m_heap->didAllocate(blockSize);
@@ -107,17 +109,12 @@ CheckedBoolean CopiedSpace::tryReallocate(void** ptr, size_t oldSize, size_t new
void* oldPtr = *ptr;
ASSERT(!m_heap->globalData()->isInitializingObject());
-
+
if (isOversize(oldSize) || isOversize(newSize))
return tryReallocateOversize(ptr, oldSize, newSize);
-
- if (m_allocator.wasLastAllocation(oldPtr, oldSize)) {
- size_t delta = newSize - oldSize;
- if (m_allocator.fitsInCurrentBlock(delta)) {
- (void)m_allocator.allocate(delta);
- return true;
- }
- }
+
+ if (m_allocator.tryReallocate(oldPtr, oldSize, newSize))
+ return true;
void* result = 0;
if (!tryAllocate(newSize, &result)) {
@@ -157,16 +154,17 @@ CheckedBoolean CopiedSpace::tryReallocateOversize(void** ptr, size_t oldSize, si
void CopiedSpace::doneFillingBlock(CopiedBlock* block)
{
- ASSERT(block);
- ASSERT(block->m_offset < reinterpret_cast<char*>(block) + HeapBlock::s_blockSize);
ASSERT(m_inCopyingPhase);
+
+ if (!block)
+ return;
- if (block->m_offset == block->payload()) {
+ if (!block->dataSize()) {
recycleBlock(block);
return;
}
- block->zeroFillToEnd();
+ block->zeroFillWilderness();
{
SpinLockHolder locker(&m_toSpaceLock);
@@ -226,7 +224,7 @@ void CopiedSpace::doneCopying()
if (!m_toSpace->head())
allocateBlock();
else
- m_allocator.resetCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
+ m_allocator.setCurrentBlock(static_cast<CopiedBlock*>(m_toSpace->head()));
}
size_t CopiedSpace::size()
diff --git a/Source/JavaScriptCore/heap/CopiedSpace.h b/Source/JavaScriptCore/heap/CopiedSpace.h
index 530e989da..de682a4c1 100644
--- a/Source/JavaScriptCore/heap/CopiedSpace.h
+++ b/Source/JavaScriptCore/heap/CopiedSpace.h
@@ -77,9 +77,7 @@ public:
static CopiedBlock* blockFor(void*);
private:
- static void* allocateFromBlock(CopiedBlock*, size_t);
static bool isOversize(size_t);
- static bool fitsInBlock(CopiedBlock*, size_t);
static CopiedBlock* oversizeBlockFor(void* ptr);
CheckedBoolean tryAllocateSlowCase(size_t, void**);
diff --git a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
index 1366cd8a7..f702e1dd9 100644
--- a/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
+++ b/Source/JavaScriptCore/heap/CopiedSpaceInlineMethods.h
@@ -64,7 +64,7 @@ inline void CopiedSpace::startedCopying()
m_toSpace = temp;
m_blockFilter.reset();
- m_allocator.startedCopying();
+ m_allocator.resetCurrentBlock();
ASSERT(!m_inCopyingPhase);
ASSERT(!m_numberOfLoanedBlocks);
@@ -94,7 +94,7 @@ inline CopiedBlock* CopiedSpace::allocateBlockForCopyingPhase()
m_numberOfLoanedBlocks++;
}
- ASSERT(block->m_offset == block->payload());
+ ASSERT(!block->dataSize());
return block;
}
@@ -103,45 +103,27 @@ inline void CopiedSpace::allocateBlock()
if (m_heap->shouldCollect())
m_heap->collect(Heap::DoNotSweep);
+ m_allocator.resetCurrentBlock();
+
CopiedBlock* block = CopiedBlock::create(m_heap->blockAllocator().allocate());
m_toSpace->push(block);
m_blockFilter.add(reinterpret_cast<Bits>(block));
m_blockSet.add(block);
- m_allocator.resetCurrentBlock(block);
-}
-
-inline bool CopiedSpace::fitsInBlock(CopiedBlock* block, size_t bytes)
-{
- return static_cast<char*>(block->m_offset) + bytes < reinterpret_cast<char*>(block) + block->capacity() && static_cast<char*>(block->m_offset) + bytes > block->m_offset;
+ m_allocator.setCurrentBlock(block);
}
inline CheckedBoolean CopiedSpace::tryAllocate(size_t bytes, void** outPtr)
{
ASSERT(!m_heap->globalData()->isInitializingObject());
- if (isOversize(bytes) || !m_allocator.fitsInCurrentBlock(bytes))
+ if (isOversize(bytes) || !m_allocator.tryAllocate(bytes, outPtr))
return tryAllocateSlowCase(bytes, outPtr);
- *outPtr = m_allocator.allocate(bytes);
ASSERT(*outPtr);
return true;
}
-inline void* CopiedSpace::allocateFromBlock(CopiedBlock* block, size_t bytes)
-{
- ASSERT(fitsInBlock(block, bytes));
- ASSERT(is8ByteAligned(block->m_offset));
-
- void* ptr = block->m_offset;
- ASSERT(block->m_offset >= block->payload() && block->m_offset < reinterpret_cast<char*>(block) + block->capacity());
- block->m_offset = static_cast<void*>((static_cast<char*>(ptr) + bytes));
- ASSERT(block->m_offset >= block->payload() && block->m_offset < reinterpret_cast<char*>(block) + block->capacity());
-
- ASSERT(is8ByteAligned(ptr));
- return ptr;
-}
-
inline bool CopiedSpace::isOversize(size_t bytes)
{
return bytes > s_maxAllocationSize;
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.cpp b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
index 8e0c57b6a..7eb57479b 100644
--- a/Source/JavaScriptCore/heap/MachineStackMarker.cpp
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.cpp
@@ -141,8 +141,10 @@ MachineThreads::MachineThreads(Heap* heap)
MachineThreads::~MachineThreads()
{
- if (m_threadSpecific)
- ThreadSpecificKeyDelete(m_threadSpecific);
+ if (m_threadSpecific) {
+ int error = pthread_key_delete(m_threadSpecific);
+ ASSERT_UNUSED(error, !error);
+ }
MutexLocker registeredThreadsLock(m_registeredThreadsMutex);
for (Thread* t = m_registeredThreads; t;) {
@@ -179,17 +181,19 @@ void MachineThreads::makeUsableFromMultipleThreads()
if (m_threadSpecific)
return;
- ThreadSpecificKeyCreate(&m_threadSpecific, removeThread);
+ int error = pthread_key_create(&m_threadSpecific, removeThread);
+ if (error)
+ CRASH();
}
void MachineThreads::addCurrentThread()
{
ASSERT(!m_heap->globalData()->exclusiveThread || m_heap->globalData()->exclusiveThread == currentThread());
- if (!m_threadSpecific || ThreadSpecificGet(m_threadSpecific))
+ if (!m_threadSpecific || pthread_getspecific(m_threadSpecific))
return;
- ThreadSpecificSet(m_threadSpecific, this);
+ pthread_setspecific(m_threadSpecific, this);
Thread* thread = new Thread(getCurrentPlatformThread(), wtfThreadData().stack().origin());
MutexLocker lock(m_registeredThreadsMutex);
diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.h b/Source/JavaScriptCore/heap/MachineStackMarker.h
index 3d4aa22d4..5c7705fcf 100644
--- a/Source/JavaScriptCore/heap/MachineStackMarker.h
+++ b/Source/JavaScriptCore/heap/MachineStackMarker.h
@@ -22,8 +22,8 @@
#ifndef MachineThreads_h
#define MachineThreads_h
+#include <pthread.h>
#include <wtf/Noncopyable.h>
-#include <wtf/ThreadSpecific.h>
#include <wtf/ThreadingPrimitives.h>
namespace JSC {
@@ -55,7 +55,7 @@ namespace JSC {
Heap* m_heap;
Mutex m_registeredThreadsMutex;
Thread* m_registeredThreads;
- WTF::ThreadSpecificKey m_threadSpecific;
+ pthread_key_t m_threadSpecific;
};
} // namespace JSC
diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp
index 9d9130026..9c679b0ed 100644
--- a/Source/JavaScriptCore/heap/MarkStack.cpp
+++ b/Source/JavaScriptCore/heap/MarkStack.cpp
@@ -515,9 +515,8 @@ void MarkStack::mergeOpaqueRoots()
void SlotVisitor::startCopying()
{
- ASSERT(!m_copyBlock);
- m_copyBlock = m_shared.m_copiedSpace->allocateBlockForCopyingPhase();
-}
+ ASSERT(!m_copiedAllocator.isValid());
+}
void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
{
@@ -528,18 +527,17 @@ void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes)
if (m_shared.m_copiedSpace->isPinned(ptr))
return 0;
+
+ void* result = 0; // Compilers don't realize that this will be assigned.
+ if (m_copiedAllocator.tryAllocate(bytes, &result))
+ return result;
+
+ m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
+ m_copiedAllocator.setCurrentBlock(m_shared.m_copiedSpace->allocateBlockForCopyingPhase());
- // The only time it's possible to have a null copy block is if we have just started copying.
- if (!m_copyBlock)
- startCopying();
-
- if (!CopiedSpace::fitsInBlock(m_copyBlock, bytes)) {
- // We don't need to lock across these two calls because the master thread won't
- // call doneCopying() because this thread is considered active.
- m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
- m_copyBlock = m_shared.m_copiedSpace->allocateBlockForCopyingPhase();
- }
- return CopiedSpace::allocateFromBlock(m_copyBlock, bytes);
+ CheckedBoolean didSucceed = m_copiedAllocator.tryAllocate(bytes, &result);
+ ASSERT(didSucceed);
+ return result;
}
ALWAYS_INLINE bool JSString::tryHashConstLock()
@@ -639,12 +637,10 @@ void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsig
void SlotVisitor::doneCopying()
{
- if (!m_copyBlock)
+ if (!m_copiedAllocator.isValid())
return;
- m_shared.m_copiedSpace->doneFillingBlock(m_copyBlock);
-
- m_copyBlock = 0;
+ m_shared.m_copiedSpace->doneFillingBlock(m_copiedAllocator.resetCurrentBlock());
}
void SlotVisitor::harvestWeakReferences()
diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h
index 70d68bb04..d16602f15 100644
--- a/Source/JavaScriptCore/heap/SlotVisitor.h
+++ b/Source/JavaScriptCore/heap/SlotVisitor.h
@@ -70,12 +70,11 @@ private:
void donateKnownParallel();
- CopiedBlock* m_copyBlock;
+ CopiedAllocator m_copiedAllocator;
};
inline SlotVisitor::SlotVisitor(MarkStackThreadSharedData& shared)
: MarkStack(shared)
- , m_copyBlock(0)
{
}
diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h
index 987c4a163..5529551d6 100644
--- a/Source/JavaScriptCore/jit/JIT.h
+++ b/Source/JavaScriptCore/jit/JIT.h
@@ -435,7 +435,7 @@ namespace JSC {
void emitWriteBarrier(JSCell* owner, RegisterID value, RegisterID scratch, WriteBarrierMode, WriteBarrierUseKind);
template<typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr);
- void emitAllocateBasicStorage(size_t, RegisterID result, RegisterID storagePtr);
+ void emitAllocateBasicStorage(size_t, RegisterID result);
template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr);
void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr);
diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h
index d1cee7ef7..42a61ecdb 100644
--- a/Source/JavaScriptCore/jit/JITInlineMethods.h
+++ b/Source/JavaScriptCore/jit/JITInlineMethods.h
@@ -437,25 +437,16 @@ template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, Re
emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch);
}
-inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr)
+inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result)
{
CopiedAllocator* allocator = &m_globalData->heap.storageAllocator();
- // FIXME: We need to check for wrap-around.
- // Check to make sure that the allocation will fit in the current block.
- loadPtr(&allocator->m_currentOffset, result);
- addPtr(TrustedImm32(size), result);
- loadPtr(&allocator->m_currentBlock, storagePtr);
- addPtr(TrustedImm32(HeapBlock::s_blockSize), storagePtr);
- addSlowCase(branchPtr(AboveOrEqual, result, storagePtr));
-
- // Load the original offset.
- loadPtr(&allocator->m_currentOffset, result);
-
- // Bump the pointer forward.
- move(result, storagePtr);
- addPtr(TrustedImm32(size), storagePtr);
- storePtr(storagePtr, &allocator->m_currentOffset);
+ loadPtr(&allocator->m_currentRemaining, result);
+ addSlowCase(branchSubPtr(Signed, TrustedImm32(size), result));
+ storePtr(result, &allocator->m_currentRemaining);
+ negPtr(result);
+ addPtr(AbsoluteAddress(&allocator->m_currentPayloadEnd), result);
+ subPtr(TrustedImm32(size), result);
}
inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr)
@@ -465,7 +456,7 @@ inline void JIT::emitAllocateJSArray(unsigned valuesRegister, unsigned length, R
// We allocate the backing store first to ensure that garbage collection
// doesn't happen during JSArray initialization.
- emitAllocateBasicStorage(initialStorage, storageResult, storagePtr);
+ emitAllocateBasicStorage(initialStorage, storageResult);
// Allocate the cell for the array.
emitAllocateBasicJSObject<JSArray, false>(TrustedImmPtr(m_codeBlock->globalObject()->arrayStructure()), cellResult, storagePtr);