diff options
Diffstat (limited to 'Source/JavaScriptCore')
187 files changed, 8926 insertions, 2736 deletions
diff --git a/Source/JavaScriptCore/API/APIShims.h b/Source/JavaScriptCore/API/APIShims.h index 02495110b..ef5f10466 100644 --- a/Source/JavaScriptCore/API/APIShims.h +++ b/Source/JavaScriptCore/API/APIShims.h @@ -28,31 +28,40 @@ #include "CallFrame.h" #include "GCActivityCallback.h" +#include "IncrementalSweeper.h" #include "JSLock.h" #include <wtf/WTFThreadData.h> namespace JSC { class APIEntryShimWithoutLock { +public: + enum RefGlobalDataTag { DontRefGlobalData = 0, RefGlobalData }; + protected: - APIEntryShimWithoutLock(JSGlobalData* globalData, bool registerThread) - : m_globalData(globalData) + APIEntryShimWithoutLock(JSGlobalData* globalData, bool registerThread, RefGlobalDataTag shouldRefGlobalData) + : m_shouldRefGlobalData(shouldRefGlobalData) + , m_globalData(globalData) , m_entryIdentifierTable(wtfThreadData().setCurrentIdentifierTable(globalData->identifierTable)) { + if (shouldRefGlobalData) + m_globalData->ref(); UNUSED_PARAM(registerThread); if (registerThread) globalData->heap.machineThreads().addCurrentThread(); m_globalData->heap.activityCallback()->synchronize(); - m_globalData->timeoutChecker.start(); + m_globalData->heap.sweeper()->synchronize(); } ~APIEntryShimWithoutLock() { - m_globalData->timeoutChecker.stop(); wtfThreadData().setCurrentIdentifierTable(m_entryIdentifierTable); + if (m_shouldRefGlobalData) + m_globalData->deref(); } -private: +protected: + RefGlobalDataTag m_shouldRefGlobalData; JSGlobalData* m_globalData; IdentifierTable* m_entryIdentifierTable; }; @@ -61,20 +70,38 @@ class APIEntryShim : public APIEntryShimWithoutLock { public: // Normal API entry APIEntryShim(ExecState* exec, bool registerThread = true) - : APIEntryShimWithoutLock(&exec->globalData(), registerThread) - , m_lock(exec) + : APIEntryShimWithoutLock(&exec->globalData(), registerThread, RefGlobalData) + { + init(); + } + + // This constructor is necessary for HeapTimer to prevent it from accidentally resurrecting + // the ref count of a "dead" JSGlobalData. + APIEntryShim(JSGlobalData* globalData, RefGlobalDataTag refGlobalData, bool registerThread = true) + : APIEntryShimWithoutLock(globalData, registerThread, refGlobalData) { + init(); } // JSPropertyNameAccumulator only has a globalData. APIEntryShim(JSGlobalData* globalData, bool registerThread = true) - : APIEntryShimWithoutLock(globalData, registerThread) - , m_lock(globalData->isSharedInstance() ? LockForReal : SilenceAssertionsOnly) + : APIEntryShimWithoutLock(globalData, registerThread, RefGlobalData) { + init(); + } + + ~APIEntryShim() + { + m_globalData->timeoutChecker.stop(); + m_globalData->apiLock().unlock(); } private: - JSLock m_lock; + void init() + { + m_globalData->apiLock().lock(); + m_globalData->timeoutChecker.start(); + } }; class APICallbackShim { @@ -88,7 +115,6 @@ public: ~APICallbackShim() { - m_globalData->heap.activityCallback()->synchronize(); wtfThreadData().setCurrentIdentifierTable(m_globalData->identifierTable); } diff --git a/Source/JavaScriptCore/API/JSContextRef.cpp b/Source/JavaScriptCore/API/JSContextRef.cpp index 92e03a671..7a57287de 100644 --- a/Source/JavaScriptCore/API/JSContextRef.cpp +++ b/Source/JavaScriptCore/API/JSContextRef.cpp @@ -78,7 +78,6 @@ JSGlobalContextRef JSGlobalContextCreate(JSClassRef globalObjectClass) // If the application was linked before JSGlobalContextCreate was changed to use a unique JSGlobalData, // we use a shared one for backwards compatibility. if (NSVersionOfLinkTimeLibrary("JavaScriptCore") <= webkitFirstVersionWithConcurrentGlobalContexts) { - JSLock lock(LockForReal); return JSGlobalContextCreateInGroup(toRef(&JSGlobalData::sharedInstance()), globalObjectClass); } #endif // OS(DARWIN) @@ -90,11 +89,9 @@ JSGlobalContextRef JSGlobalContextCreateInGroup(JSContextGroupRef group, JSClass { initializeThreading(); - JSLock lock(LockForReal); RefPtr<JSGlobalData> globalData = group ? PassRefPtr<JSGlobalData>(toJS(group)) : JSGlobalData::createContextGroup(ThreadStackTypeSmall); APIEntryShim entryShim(globalData.get(), false); - globalData->makeUsableFromMultipleThreads(); if (!globalObjectClass) { @@ -124,18 +121,19 @@ JSGlobalContextRef JSGlobalContextRetain(JSGlobalContextRef ctx) void JSGlobalContextRelease(JSGlobalContextRef ctx) { + IdentifierTable* savedIdentifierTable; ExecState* exec = toJS(ctx); - JSLock lock(exec); + { + JSLockHolder lock(exec); - JSGlobalData& globalData = exec->globalData(); - IdentifierTable* savedIdentifierTable = wtfThreadData().setCurrentIdentifierTable(globalData.identifierTable); + JSGlobalData& globalData = exec->globalData(); + savedIdentifierTable = wtfThreadData().setCurrentIdentifierTable(globalData.identifierTable); - bool protectCountIsZero = Heap::heap(exec->dynamicGlobalObject())->unprotect(exec->dynamicGlobalObject()); - if (protectCountIsZero) { - globalData.heap.activityCallback()->synchronize(); - globalData.heap.reportAbandonedObjectGraph(); + bool protectCountIsZero = Heap::heap(exec->dynamicGlobalObject())->unprotect(exec->dynamicGlobalObject()); + if (protectCountIsZero) + globalData.heap.reportAbandonedObjectGraph(); + globalData.deref(); } - globalData.deref(); wtfThreadData().setCurrentIdentifierTable(savedIdentifierTable); } @@ -166,7 +164,7 @@ JSGlobalContextRef JSContextGetGlobalContext(JSContextRef ctx) JSStringRef JSContextCreateBacktrace(JSContextRef ctx, unsigned maxStackSize) { ExecState* exec = toJS(ctx); - JSLock lock(exec); + JSLockHolder lock(exec); unsigned count = 0; UStringBuilder builder; diff --git a/Source/JavaScriptCore/API/JSObjectRef.cpp b/Source/JavaScriptCore/API/JSObjectRef.cpp index 91aa3c6bd..e6c0c528a 100644 --- a/Source/JavaScriptCore/API/JSObjectRef.cpp +++ b/Source/JavaScriptCore/API/JSObjectRef.cpp @@ -428,6 +428,8 @@ JSValueRef JSObjectCallAsFunction(JSContextRef ctx, JSObjectRef object, JSObject if (!jsThisObject) jsThisObject = exec->globalThisValue(); + jsThisObject = jsThisObject->methodTable()->toThisObject(jsThisObject, exec); + MarkedArgumentBuffer argList; for (size_t i = 0; i < argumentCount; i++) argList.append(toJS(exec, arguments[i])); diff --git a/Source/JavaScriptCore/CMakeLists.txt b/Source/JavaScriptCore/CMakeLists.txt index 06139a4d2..412be293f 100644 --- a/Source/JavaScriptCore/CMakeLists.txt +++ b/Source/JavaScriptCore/CMakeLists.txt @@ -78,6 +78,7 @@ SET(JavaScriptCore_SOURCES dfg/DFGFixupPhase.cpp dfg/DFGGraph.cpp dfg/DFGJITCompiler.cpp + dfg/DFGMinifiedNode.cpp dfg/DFGNodeFlags.cpp dfg/DFGOSREntry.cpp dfg/DFGOSRExit.cpp @@ -93,6 +94,9 @@ SET(JavaScriptCore_SOURCES dfg/DFGSpeculativeJIT32_64.cpp dfg/DFGSpeculativeJIT64.cpp dfg/DFGThunks.cpp + dfg/DFGValueSource.cpp + dfg/DFGVariableEvent.cpp + dfg/DFGVariableEventStream.cpp dfg/DFGValidate.cpp dfg/DFGVirtualRegisterAllocationPhase.cpp @@ -105,6 +109,7 @@ SET(JavaScriptCore_SOURCES heap/Heap.cpp heap/HeapTimer.cpp heap/IncrementalSweeper.cpp + heap/JITStubRoutineSet.cpp heap/MachineStackMarker.cpp heap/MarkedAllocator.cpp heap/MarkedBlock.cpp @@ -125,6 +130,7 @@ SET(JavaScriptCore_SOURCES jit/ExecutableAllocator.cpp jit/HostCallReturnValue.cpp + jit/GCAwareJITStubRoutine.cpp jit/JITArithmetic32_64.cpp jit/JITArithmetic.cpp jit/JITCall32_64.cpp @@ -135,6 +141,7 @@ SET(JavaScriptCore_SOURCES jit/JITOpcodes.cpp jit/JITPropertyAccess32_64.cpp jit/JITPropertyAccess.cpp + jit/JITStubRoutine.cpp jit/JITStubs.cpp jit/ThunkGenerators.cpp diff --git a/Source/JavaScriptCore/ChangeLog b/Source/JavaScriptCore/ChangeLog index 5de8f1d11..a1cbefa36 100644 --- a/Source/JavaScriptCore/ChangeLog +++ b/Source/JavaScriptCore/ChangeLog @@ -1,33 +1,2074 @@ -2012-06-19 Joel Dillon <joel.dillon@codethink.co.uk> Jocelyn Turcotte <jocelyn.turcotte@nokia.com> +2012-07-10 Filip Pizlo <fpizlo@apple.com> + + REGRESSION(r122166): It made 170 tests crash on 32 bit platforms + https://bugs.webkit.org/show_bug.cgi?id=90852 + + Reviewed by Zoltan Herczeg. + + If we can't use the range filter, we should still make sure that the + address is remotely sane, otherwise the hashtables will assert. + + * jit/JITStubRoutine.h: + (JSC::JITStubRoutine::passesFilter): + +2012-07-10 Filip Pizlo <fpizlo@apple.com> + + DFG recompilation heuristics should be based on count, not rate + https://bugs.webkit.org/show_bug.cgi?id=90146 + + Reviewed by Oliver Hunt. + + Rolling r121511 back in after fixing the DFG's interpretation of op_div + profiling, with Gavin's rubber stamp. + + This removes a bunch of code that was previously trying to prevent spurious + reoptimizations if a large enough majority of executions of a code block did + not result in OSR exit. It turns out that this code was purely harmful. This + patch removes all of that logic and replaces it with a dead-simple + heuristic: if you exit more than N times (where N is an exponential function + of the number of times the code block has already been recompiled) then we + will recompile. + + This appears to be a broad ~1% win on many benchmarks large and small. + + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::CodeBlock): + * bytecode/CodeBlock.h: + (JSC::CodeBlock::couldTakeSpecialFastCase): + (CodeBlock): + (JSC::CodeBlock::osrExitCounter): + (JSC::CodeBlock::countOSRExit): + (JSC::CodeBlock::addressOfOSRExitCounter): + (JSC::CodeBlock::offsetOfOSRExitCounter): + (JSC::CodeBlock::adjustedExitCountThreshold): + (JSC::CodeBlock::exitCountThresholdForReoptimization): + (JSC::CodeBlock::exitCountThresholdForReoptimizationFromLoop): + (JSC::CodeBlock::shouldReoptimizeNow): + (JSC::CodeBlock::shouldReoptimizeFromLoopNow): + * bytecode/ExecutionCounter.cpp: + (JSC::ExecutionCounter::setThreshold): + * bytecode/ExecutionCounter.h: + (ExecutionCounter): + (JSC::ExecutionCounter::clippedThreshold): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::makeDivSafe): + * dfg/DFGJITCompiler.cpp: + (JSC::DFG::JITCompiler::compileBody): + * dfg/DFGOSRExit.cpp: + (JSC::DFG::OSRExit::considerAddingAsFrequentExitSiteSlow): + * dfg/DFGOSRExitCompiler.cpp: + (JSC::DFG::OSRExitCompiler::handleExitCounts): + * dfg/DFGOperations.cpp: + * jit/JITStubs.cpp: + (JSC::DEFINE_STUB_FUNCTION): + * runtime/Options.h: + (JSC): + +2012-07-09 Matt Falkenhagen <falken@chromium.org> + + Add ENABLE_DIALOG_ELEMENT and skeleton files + https://bugs.webkit.org/show_bug.cgi?id=90521 + + Reviewed by Kent Tamura. + + * Configurations/FeatureDefines.xcconfig: + +2012-07-09 Filip Pizlo <fpizlo@apple.com> + + Unreviewed, roll out http://trac.webkit.org/changeset/121511 + It made in-browser V8v7 10% slower. + + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::CodeBlock): + * bytecode/CodeBlock.h: + (CodeBlock): + (JSC::CodeBlock::countSpeculationSuccess): + (JSC::CodeBlock::countSpeculationFailure): + (JSC::CodeBlock::speculativeSuccessCounter): + (JSC::CodeBlock::speculativeFailCounter): + (JSC::CodeBlock::forcedOSRExitCounter): + (JSC::CodeBlock::addressOfSpeculativeSuccessCounter): + (JSC::CodeBlock::addressOfSpeculativeFailCounter): + (JSC::CodeBlock::addressOfForcedOSRExitCounter): + (JSC::CodeBlock::offsetOfSpeculativeSuccessCounter): + (JSC::CodeBlock::offsetOfSpeculativeFailCounter): + (JSC::CodeBlock::offsetOfForcedOSRExitCounter): + (JSC::CodeBlock::largeFailCountThreshold): + (JSC::CodeBlock::largeFailCountThresholdForLoop): + (JSC::CodeBlock::shouldReoptimizeNow): + (JSC::CodeBlock::shouldReoptimizeFromLoopNow): + * bytecode/ExecutionCounter.cpp: + (JSC::ExecutionCounter::setThreshold): + * bytecode/ExecutionCounter.h: + (ExecutionCounter): + * dfg/DFGJITCompiler.cpp: + (JSC::DFG::JITCompiler::compileBody): + * dfg/DFGOSRExit.cpp: + (JSC::DFG::OSRExit::considerAddingAsFrequentExitSiteSlow): + * dfg/DFGOSRExitCompiler.cpp: + (JSC::DFG::OSRExitCompiler::handleExitCounts): + * dfg/DFGOperations.cpp: + * jit/JITStubs.cpp: + (JSC::DEFINE_STUB_FUNCTION): + * runtime/Options.h: + (JSC): + +2012-07-09 Filip Pizlo <fpizlo@apple.com> + + DFG may get stuck in an infinite fix point if it constant folds a mispredicted node + https://bugs.webkit.org/show_bug.cgi?id=90829 + <rdar://problem/11823843> + + Reviewed by Oliver Hunt. + + If a node is shown to have been mispredicted during CFA, then don't allow constant + folding to make the graph even more degenerate. Instead, pull back on constant folding + and allow the normal OSR machinery to fix our profiling so that a future recompilation + doesn't see the same mistake. + + * dfg/DFGAbstractState.cpp: + (JSC::DFG::AbstractState::execute): + * dfg/DFGAbstractState.h: + (JSC::DFG::AbstractState::trySetConstant): + (AbstractState): + * dfg/DFGPhase.h: + (JSC::DFG::Phase::name): + (Phase): + (JSC::DFG::runAndLog): + (DFG): + (JSC::DFG::runPhase): + +2012-07-09 Filip Pizlo <fpizlo@apple.com> + + It should be possible to jettison JIT stub routines even if they are currently running + https://bugs.webkit.org/show_bug.cgi?id=90731 + + Reviewed by Gavin Barraclough. + + This gives the GC awareness of all JIT-generated stubs for inline caches. That + means that if you want to delete a JIT-generated stub, you don't have to worry + about whether or not it is currently running: if there is a chance that it might + be, the GC will kindly defer deletion until non-running-ness is proved. + + * CMakeLists.txt: + * GNUmakefile.list.am: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * Target.pri: + * bytecode/Instruction.h: + (JSC): + (PolymorphicStubInfo): + (JSC::PolymorphicAccessStructureList::PolymorphicStubInfo::set): + (JSC::PolymorphicAccessStructureList::PolymorphicAccessStructureList): + * bytecode/PolymorphicPutByIdList.cpp: + (JSC::PutByIdAccess::fromStructureStubInfo): + * bytecode/PolymorphicPutByIdList.h: + (JSC::PutByIdAccess::transition): + (JSC::PutByIdAccess::replace): + (JSC::PutByIdAccess::stubRoutine): + (PutByIdAccess): + (JSC::PolymorphicPutByIdList::currentSlowPathTarget): + * bytecode/StructureStubInfo.h: + (JSC::StructureStubInfo::reset): + * dfg/DFGRepatch.cpp: + (JSC::DFG::generateProtoChainAccessStub): + (JSC::DFG::tryCacheGetByID): + (JSC::DFG::tryBuildGetByIDList): + (JSC::DFG::tryBuildGetByIDProtoList): + (JSC::DFG::emitPutReplaceStub): + (JSC::DFG::emitPutTransitionStub): + (JSC::DFG::tryCachePutByID): + (JSC::DFG::tryBuildPutByIdList): + * heap/ConservativeRoots.cpp: + (JSC): + (DummyMarkHook): + (JSC::DummyMarkHook::mark): + (JSC::ConservativeRoots::add): + (CompositeMarkHook): + (JSC::CompositeMarkHook::CompositeMarkHook): + (JSC::CompositeMarkHook::mark): + * heap/ConservativeRoots.h: + (JSC): + (ConservativeRoots): + * heap/Heap.cpp: + (JSC::Heap::markRoots): + (JSC::Heap::deleteUnmarkedCompiledCode): + * heap/Heap.h: + (JSC): + (Heap): + * heap/JITStubRoutineSet.cpp: Added. + (JSC): + (JSC::JITStubRoutineSet::JITStubRoutineSet): + (JSC::JITStubRoutineSet::~JITStubRoutineSet): + (JSC::JITStubRoutineSet::add): + (JSC::JITStubRoutineSet::clearMarks): + (JSC::JITStubRoutineSet::markSlow): + (JSC::JITStubRoutineSet::deleteUnmarkedJettisonedStubRoutines): + (JSC::JITStubRoutineSet::traceMarkedStubRoutines): + * heap/JITStubRoutineSet.h: Added. + (JSC): + (JITStubRoutineSet): + (JSC::JITStubRoutineSet::mark): + * heap/MachineStackMarker.h: + (JSC): + * interpreter/RegisterFile.cpp: + (JSC::RegisterFile::gatherConservativeRoots): + * interpreter/RegisterFile.h: + (JSC): + * jit/ExecutableAllocator.cpp: + (JSC::DemandExecutableAllocator::DemandExecutableAllocator): + * jit/ExecutableAllocator.h: + (JSC): + * jit/ExecutableAllocatorFixedVMPool.cpp: + (JSC): + (JSC::FixedVMPoolExecutableAllocator::FixedVMPoolExecutableAllocator): + * jit/GCAwareJITStubRoutine.cpp: Added. + (JSC): + (JSC::GCAwareJITStubRoutine::GCAwareJITStubRoutine): + (JSC::GCAwareJITStubRoutine::~GCAwareJITStubRoutine): + (JSC::GCAwareJITStubRoutine::observeZeroRefCount): + (JSC::GCAwareJITStubRoutine::deleteFromGC): + (JSC::GCAwareJITStubRoutine::markRequiredObjectsInternal): + (JSC::MarkingGCAwareJITStubRoutineWithOneObject::MarkingGCAwareJITStubRoutineWithOneObject): + (JSC::MarkingGCAwareJITStubRoutineWithOneObject::~MarkingGCAwareJITStubRoutineWithOneObject): + (JSC::MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal): + (JSC::createJITStubRoutine): + * jit/GCAwareJITStubRoutine.h: Added. + (JSC): + (GCAwareJITStubRoutine): + (JSC::GCAwareJITStubRoutine::markRequiredObjects): + (MarkingGCAwareJITStubRoutineWithOneObject): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::privateCompilePatchGetArrayLength): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdSelfList): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + * jit/JITPropertyAccess32_64.cpp: + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::privateCompilePatchGetArrayLength): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdSelfList): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + * jit/JITStubRoutine.cpp: Added. + (JSC): + (JSC::JITStubRoutine::~JITStubRoutine): + (JSC::JITStubRoutine::observeZeroRefCount): + * jit/JITStubRoutine.h: Added. + (JSC): + (JITStubRoutine): + (JSC::JITStubRoutine::JITStubRoutine): + (JSC::JITStubRoutine::createSelfManagedRoutine): + (JSC::JITStubRoutine::code): + (JSC::JITStubRoutine::asCodePtr): + (JSC::JITStubRoutine::ref): + (JSC::JITStubRoutine::deref): + (JSC::JITStubRoutine::startAddress): + (JSC::JITStubRoutine::endAddress): + (JSC::JITStubRoutine::addressStep): + (JSC::JITStubRoutine::canPerformRangeFilter): + (JSC::JITStubRoutine::filteringStartAddress): + (JSC::JITStubRoutine::filteringExtentSize): + (JSC::JITStubRoutine::passesFilter): + * jit/JITStubs.cpp: + (JSC::DEFINE_STUB_FUNCTION): + (JSC::getPolymorphicAccessStructureListSlot): + +2012-07-09 Sheriff Bot <webkit.review.bot@gmail.com> + + Unreviewed, rolling out r122107. + http://trac.webkit.org/changeset/122107 + https://bugs.webkit.org/show_bug.cgi?id=90794 + + Build failure on Mac debug bots (Requested by falken_ on + #webkit). + + * Configurations/FeatureDefines.xcconfig: + +2012-07-09 Matt Falkenhagen <falken@chromium.org> + + Add ENABLE_DIALOG_ELEMENT and skeleton files + https://bugs.webkit.org/show_bug.cgi?id=90521 + + Reviewed by Kent Tamura. + + * Configurations/FeatureDefines.xcconfig: + +2012-07-08 Ryosuke Niwa <rniwa@webkit.org> + + gcc build fix after r121925. + + * runtime/JSObject.h: + (JSC::JSFinalObject::finishCreation): + +2012-07-08 Zoltan Herczeg <zherczeg@webkit.org> + + [Qt][ARM] Implementing missing macro assembler instructions after r121925 + https://bugs.webkit.org/show_bug.cgi?id=90657 + + Reviewed by Csaba Osztrogonác. + + Implementing convertibleLoadPtr, replaceWithLoad and + replaceWithAddressComputation. + + * assembler/ARMAssembler.h: + (JSC::ARMAssembler::replaceWithLoad): + (ARMAssembler): + (JSC::ARMAssembler::replaceWithAddressComputation): + * assembler/MacroAssemblerARM.h: + (JSC::MacroAssemblerARM::convertibleLoadPtr): + (MacroAssemblerARM): + +2012-07-06 Filip Pizlo <fpizlo@apple.com> + + WebKit Version 5.1.7 (6534.57.2, r121935): Double-click no longer works on OpenStreetMap + https://bugs.webkit.org/show_bug.cgi?id=90703 + + Reviewed by Michael Saboff. + + It turns out that in my object model refactoring, I managed to fix get_by_pname in all + execution engines except 64-bit baseline JIT. + + * jit/JITPropertyAccess.cpp: + (JSC::JIT::emit_op_get_by_pname): + +2012-07-06 Pravin D <pravind.2k4@gmail.com> + + Build Error on Qt Linux build + https://bugs.webkit.org/show_bug.cgi?id=90699 + + Reviewed by Laszlo Gombos. + + * parser/Parser.cpp: + (JSC::::parseForStatement): + Removed unused boolean variable as this was causing build error on Qt Linux. + +2012-07-06 Nuno Lopes <nlopes@apple.com> + + Fix build with recent clang. + https://bugs.webkit.org/show_bug.cgi?id=90634 + + Reviewed by Oliver Hunt. + + * jit/SpecializedThunkJIT.h: + (JSC::SpecializedThunkJIT::SpecializedThunkJIT): + (SpecializedThunkJIT): + * jit/ThunkGenerators.cpp: + (JSC::charCodeAtThunkGenerator): + (JSC::charAtThunkGenerator): + (JSC::fromCharCodeThunkGenerator): + (JSC::sqrtThunkGenerator): + (JSC::floorThunkGenerator): + (JSC::ceilThunkGenerator): + (JSC::roundThunkGenerator): + (JSC::expThunkGenerator): + (JSC::logThunkGenerator): + (JSC::absThunkGenerator): + (JSC::powThunkGenerator): + * parser/ASTBuilder.h: + (JSC::ASTBuilder::createAssignResolve): + (JSC::ASTBuilder::createForLoop): + (JSC::ASTBuilder::createForInLoop): + (JSC::ASTBuilder::makeAssignNode): + (JSC::ASTBuilder::makePrefixNode): + (JSC::ASTBuilder::makePostfixNode): + * parser/NodeConstructors.h: + (JSC::PostfixErrorNode::PostfixErrorNode): + (JSC::PrefixErrorNode::PrefixErrorNode): + (JSC::AssignResolveNode::AssignResolveNode): + (JSC::AssignErrorNode::AssignErrorNode): + (JSC::ForNode::ForNode): + (JSC::ForInNode::ForInNode): + * parser/Nodes.h: + (FunctionCallResolveNode): + (PostfixErrorNode): + (PrefixErrorNode): + (ReadModifyResolveNode): + (AssignResolveNode): + (AssignErrorNode): + (ForNode): + (ForInNode): + * parser/Parser.cpp: + (JSC::::parseVarDeclarationList): + (JSC::::parseForStatement): + * parser/SyntaxChecker.h: + (JSC::SyntaxChecker::createAssignResolve): + (JSC::SyntaxChecker::createForLoop): + +2012-07-06 Zoltan Herczeg <zherczeg@webkit.org> + + [Qt][ARM] REGRESSION(r121885): It broke 30 jsc tests, 500+ layout tests + https://bugs.webkit.org/show_bug.cgi?id=90656 + + Reviewed by Csaba Osztrogonác. + + Typo fixes. + + * assembler/MacroAssemblerARM.cpp: + (JSC::MacroAssemblerARM::load32WithUnalignedHalfWords): + Rename getOp2Byte() -> getOp2Half() + * assembler/MacroAssemblerARMv7.h: + (JSC::MacroAssemblerARMv7::convertibleLoadPtr): + Add a necessary space. + * jit/JITStubs.cpp: + (JSC): + Revert INLINE_ARM_FUNCTION macro. + +2012-07-05 Filip Pizlo <fpizlo@apple.com> + + REGRESSION(r121925): It broke 5 sputnik tests on x86 platforms + https://bugs.webkit.org/show_bug.cgi?id=90658 + + Reviewed by Zoltan Herczeg. + + Under the new object model, out-of-line property accesses such as those + in ResolveGlobal must account for the fact that the offset to the Kth + property is represented by K + inlineStorageCapacity. Hence, the property + loads in ResolveGlobal must have an additional -inlineStorageCapacity * + sizeof(JSValue) offset. + + * dfg/DFGSpeculativeJIT32_64.cpp: + (JSC::DFG::SpeculativeJIT::compile): + +2012-07-05 Csaba Osztrogonác <ossy@webkit.org> + + [Qt] Unreviewed 64 bit buildfix after r121925. + + * bytecode/PutByIdStatus.cpp: + (JSC::PutByIdStatus::computeFromLLInt): + +2012-07-05 Michael Saboff <msaboff@apple.com> + + JSString::tryHashConstLock() fails to get exclusive lock + https://bugs.webkit.org/show_bug.cgi?id=90639 + + Reviewed by Oliver Hunt. + + Added check that the string is already locked even before compare and swap. + + * heap/MarkStack.cpp: + (JSC::JSString::tryHashConstLock): + +2012-07-04 Filip Pizlo <fpizlo@apple.com> + + Inline property storage should not be wasted when it is exhausted + https://bugs.webkit.org/show_bug.cgi?id=90347 + + Reviewed by Gavin Barraclough. + + Previously, if we switched an object from using inline storage to out-of-line + storage, we would abandon the inline storage. This would have two main implications: + (i) all accesses to the object, even for properties that were previously in inline + storage, must now take an extra indirection; and (ii) we waste a non-trivial amount + of space since we must allocate additional out-of-line storage to hold properties + that would have fit in the inline storage. There's also the copying cost when + switching to out-of-line storage - we must copy all inline properties into ouf-of-line + storage. + + This patch changes the way that object property storage works so that we can use both + inline and out-of-line storage concurrently. This is accomplished by introducing a + new notion of property offset. This PropertyOffset is a 32-bit signed integer and it + behaves as follows: + + offset == -1: invalid offset, indicating a property that does not exist. + + 0 <= offset <= inlineStorageCapacity: offset into inline storage. + + inlineStorageCapacity < offset: offset into out-of-line storage. + + Because non-final objects don't have inline storage, the only valid PropertyOffsets + for those objects' properties are -1 or > inlineStorageCapacity. + + This now means that the decision to use inline or out-of-line storage for an access is + made based on the offset, rather than the structure. It also means that any access + where the offset is a variable must have an extra branch, unless the type of the + object is also known (if it's known to be a non-final object then we can just assert + that the offset is >= inlineStorageCapacity). + + This looks like a big Kraken speed-up and a slight V8 speed-up. + + * GNUmakefile.list.am: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * assembler/ARMv7Assembler.h: + (ARMv7Assembler): + (JSC::ARMv7Assembler::ldrWide8BitImmediate): + (JSC::ARMv7Assembler::replaceWithLoad): + (JSC::ARMv7Assembler::replaceWithAddressComputation): + * assembler/AbstractMacroAssembler.h: + (AbstractMacroAssembler): + (ConvertibleLoadLabel): + (JSC::AbstractMacroAssembler::ConvertibleLoadLabel::ConvertibleLoadLabel): + (JSC::AbstractMacroAssembler::ConvertibleLoadLabel::isSet): + (JSC::AbstractMacroAssembler::labelIgnoringWatchpoints): + (JSC::AbstractMacroAssembler::replaceWithLoad): + (JSC::AbstractMacroAssembler::replaceWithAddressComputation): + * assembler/CodeLocation.h: + (JSC): + (CodeLocationCommon): + (CodeLocationConvertibleLoad): + (JSC::CodeLocationConvertibleLoad::CodeLocationConvertibleLoad): + (JSC::CodeLocationCommon::convertibleLoadAtOffset): + * assembler/LinkBuffer.cpp: + (JSC::LinkBuffer::finalizeCodeWithDisassembly): + * assembler/LinkBuffer.h: + (LinkBuffer): + (JSC::LinkBuffer::locationOf): + * assembler/MacroAssemblerARMv7.h: + (MacroAssemblerARMv7): + (JSC::MacroAssemblerARMv7::convertibleLoadPtr): + * assembler/MacroAssemblerX86.h: + (JSC::MacroAssemblerX86::convertibleLoadPtr): + (MacroAssemblerX86): + * assembler/MacroAssemblerX86_64.h: + (JSC::MacroAssemblerX86_64::convertibleLoadPtr): + (MacroAssemblerX86_64): + * assembler/RepatchBuffer.h: + (RepatchBuffer): + (JSC::RepatchBuffer::replaceWithLoad): + (JSC::RepatchBuffer::replaceWithAddressComputation): + (JSC::RepatchBuffer::setLoadInstructionIsActive): + * assembler/X86Assembler.h: + (JSC::X86Assembler::replaceWithLoad): + (X86Assembler): + (JSC::X86Assembler::replaceWithAddressComputation): + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::printGetByIdOp): + (JSC::CodeBlock::dump): + (JSC::CodeBlock::finalizeUnconditionally): + * bytecode/GetByIdStatus.cpp: + (JSC::GetByIdStatus::computeFromLLInt): + (JSC::GetByIdStatus::computeForChain): + (JSC::GetByIdStatus::computeFor): + * bytecode/GetByIdStatus.h: + (JSC::GetByIdStatus::GetByIdStatus): + (JSC::GetByIdStatus::offset): + (GetByIdStatus): + * bytecode/Opcode.h: + (JSC): + (JSC::padOpcodeName): + * bytecode/PutByIdStatus.cpp: + (JSC::PutByIdStatus::computeFromLLInt): + (JSC::PutByIdStatus::computeFor): + * bytecode/PutByIdStatus.h: + (JSC::PutByIdStatus::PutByIdStatus): + (JSC::PutByIdStatus::offset): + (PutByIdStatus): + * bytecode/ResolveGlobalStatus.cpp: + (JSC): + (JSC::computeForStructure): + * bytecode/ResolveGlobalStatus.h: + (JSC::ResolveGlobalStatus::ResolveGlobalStatus): + (JSC::ResolveGlobalStatus::offset): + (ResolveGlobalStatus): + * bytecode/StructureSet.h: + (StructureSet): + * bytecode/StructureStubInfo.h: + * dfg/DFGByteCodeParser.cpp: + (ByteCodeParser): + (JSC::DFG::ByteCodeParser::handleGetByOffset): + (JSC::DFG::ByteCodeParser::handleGetById): + (JSC::DFG::ByteCodeParser::parseBlock): + * dfg/DFGCapabilities.h: + (JSC::DFG::canCompileOpcode): + * dfg/DFGJITCompiler.cpp: + (JSC::DFG::JITCompiler::link): + * dfg/DFGJITCompiler.h: + (JSC::DFG::PropertyAccessRecord::PropertyAccessRecord): + (PropertyAccessRecord): + * dfg/DFGRepatch.cpp: + (JSC::DFG::dfgRepatchByIdSelfAccess): + (JSC::DFG::generateProtoChainAccessStub): + (JSC::DFG::tryCacheGetByID): + (JSC::DFG::tryBuildGetByIDList): + (JSC::DFG::tryBuildGetByIDProtoList): + (JSC::DFG::emitPutReplaceStub): + (JSC::DFG::emitPutTransitionStub): + (JSC::DFG::tryCachePutByID): + (JSC::DFG::tryBuildPutByIdList): + * dfg/DFGSpeculativeJIT.h: + (JSC::DFG::SpeculativeJIT::emitAllocateBasicJSObject): + * dfg/DFGSpeculativeJIT32_64.cpp: + (JSC::DFG::SpeculativeJIT::cachedGetById): + (JSC::DFG::SpeculativeJIT::cachedPutById): + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGSpeculativeJIT64.cpp: + (JSC::DFG::SpeculativeJIT::cachedGetById): + (JSC::DFG::SpeculativeJIT::cachedPutById): + (JSC::DFG::SpeculativeJIT::compile): + * heap/MarkStack.cpp: + (JSC::visitChildren): + * interpreter/Interpreter.cpp: + (JSC::Interpreter::tryCacheGetByID): + (JSC::Interpreter::privateExecute): + * jit/JIT.cpp: + (JSC::JIT::privateCompileMainPass): + (JSC::JIT::privateCompileSlowCases): + (JSC::PropertyStubCompilationInfo::copyToStubInfo): + * jit/JIT.h: + (JSC::PropertyStubCompilationInfo::PropertyStubCompilationInfo): + (JSC::JIT::compileGetByIdProto): + (JSC::JIT::compileGetByIdSelfList): + (JSC::JIT::compileGetByIdProtoList): + (JSC::JIT::compileGetByIdChainList): + (JSC::JIT::compileGetByIdChain): + (JSC::JIT::compilePutByIdTransition): + (JIT): + * jit/JITInlineMethods.h: + (JSC::JIT::emitAllocateBasicJSObject): + * jit/JITOpcodes.cpp: + (JSC::JIT::emit_op_resolve_global): + * jit/JITOpcodes32_64.cpp: + (JSC::JIT::emit_op_resolve_global): + * jit/JITPropertyAccess.cpp: + (JSC::JIT::compileGetDirectOffset): + (JSC::JIT::emit_op_method_check): + (JSC::JIT::compileGetByIdHotPath): + (JSC::JIT::emit_op_put_by_id): + (JSC::JIT::compilePutDirectOffset): + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::patchGetByIdSelf): + (JSC::JIT::patchPutByIdReplace): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdSelfList): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + * jit/JITPropertyAccess32_64.cpp: + (JSC::JIT::emit_op_method_check): + (JSC::JIT::compileGetByIdHotPath): + (JSC::JIT::emit_op_put_by_id): + (JSC::JIT::compilePutDirectOffset): + (JSC::JIT::compileGetDirectOffset): + (JSC::JIT::privateCompilePutByIdTransition): + (JSC::JIT::patchGetByIdSelf): + (JSC::JIT::patchPutByIdReplace): + (JSC::JIT::privateCompileGetByIdProto): + (JSC::JIT::privateCompileGetByIdSelfList): + (JSC::JIT::privateCompileGetByIdProtoList): + (JSC::JIT::privateCompileGetByIdChainList): + (JSC::JIT::privateCompileGetByIdChain): + (JSC::JIT::emit_op_get_by_pname): + * jit/JITStubs.cpp: + (JSC::JITThunks::tryCacheGetByID): + (JSC::DEFINE_STUB_FUNCTION): + * llint/LLIntSlowPaths.cpp: + (JSC::LLInt::LLINT_SLOW_PATH_DECL): + * llint/LowLevelInterpreter.asm: + * llint/LowLevelInterpreter32_64.asm: + * llint/LowLevelInterpreter64.asm: + * offlineasm/x86.rb: + * runtime/JSGlobalObject.h: + (JSGlobalObject): + (JSC::JSGlobalObject::functionNameOffset): + * runtime/JSObject.cpp: + (JSC::JSObject::visitChildren): + (JSC): + (JSC::JSFinalObject::visitChildren): + (JSC::JSObject::put): + (JSC::JSObject::deleteProperty): + (JSC::JSObject::getPropertySpecificValue): + (JSC::JSObject::removeDirect): + (JSC::JSObject::growOutOfLineStorage): + (JSC::JSObject::getOwnPropertyDescriptor): + * runtime/JSObject.h: + (JSObject): + (JSC::JSObject::getDirect): + (JSC::JSObject::getDirectLocation): + (JSC::JSObject::hasInlineStorage): + (JSC::JSObject::inlineStorageUnsafe): + (JSC::JSObject::inlineStorage): + (JSC::JSObject::outOfLineStorage): + (JSC::JSObject::locationForOffset): + (JSC::JSObject::offsetForLocation): + (JSC::JSObject::getDirectOffset): + (JSC::JSObject::putDirectOffset): + (JSC::JSObject::putUndefinedAtDirectOffset): + (JSC::JSObject::addressOfOutOfLineStorage): + (JSC::JSObject::finishCreation): + (JSC::JSNonFinalObject::JSNonFinalObject): + (JSC::JSNonFinalObject::finishCreation): + (JSFinalObject): + (JSC::JSFinalObject::finishCreation): + (JSC::JSFinalObject::JSFinalObject): + (JSC::JSObject::offsetOfOutOfLineStorage): + (JSC::JSObject::setOutOfLineStorage): + (JSC::JSObject::JSObject): + (JSC): + (JSC::JSCell::fastGetOwnProperty): + (JSC::JSObject::putDirectInternal): + (JSC::JSObject::setStructureAndReallocateStorageIfNecessary): + (JSC::JSObject::putDirectWithoutTransition): + (JSC::offsetRelativeToPatchedStorage): + (JSC::indexRelativeToBase): + (JSC::offsetRelativeToBase): + * runtime/JSPropertyNameIterator.cpp: + (JSC::JSPropertyNameIterator::create): + * runtime/JSPropertyNameIterator.h: + (JSPropertyNameIterator): + (JSC::JSPropertyNameIterator::getOffset): + (JSC::JSPropertyNameIterator::finishCreation): + * runtime/JSValue.cpp: + (JSC::JSValue::putToPrimitive): + * runtime/Operations.h: + (JSC::normalizePrototypeChain): + * runtime/Options.cpp: + (JSC): + (JSC::Options::initialize): + * runtime/PropertyMapHashTable.h: + (PropertyMapEntry): + (JSC::PropertyMapEntry::PropertyMapEntry): + (PropertyTable): + (JSC::PropertyTable::PropertyTable): + (JSC::PropertyTable::getDeletedOffset): + (JSC::PropertyTable::addDeletedOffset): + (JSC::PropertyTable::nextOffset): + (JSC): + (JSC::PropertyTable::sizeInMemory): + * runtime/PropertyOffset.h: Added. + (JSC): + (JSC::checkOffset): + (JSC::validateOffset): + (JSC::isValidOffset): + (JSC::isInlineOffset): + (JSC::isOutOfLineOffset): + (JSC::offsetInInlineStorage): + (JSC::offsetInOutOfLineStorage): + (JSC::offsetInRespectiveStorage): + (JSC::numberOfOutOfLineSlotsForLastOffset): + (JSC::numberOfSlotsForLastOffset): + (JSC::nextPropertyOffsetFor): + (JSC::firstPropertyOffsetFor): + * runtime/PropertySlot.h: + (JSC::PropertySlot::cachedOffset): + (JSC::PropertySlot::setValue): + (JSC::PropertySlot::setCacheableGetterSlot): + (JSC::PropertySlot::clearOffset): + * runtime/PutPropertySlot.h: + (JSC::PutPropertySlot::setExistingProperty): + (JSC::PutPropertySlot::setNewProperty): + (JSC::PutPropertySlot::cachedOffset): + (PutPropertySlot): + * runtime/Structure.cpp: + (JSC::Structure::Structure): + (JSC::Structure::materializePropertyMap): + (JSC::nextOutOfLineStorageCapacity): + (JSC::Structure::growOutOfLineCapacity): + (JSC::Structure::suggestedNewOutOfLineStorageCapacity): + (JSC::Structure::addPropertyTransitionToExistingStructure): + (JSC::Structure::addPropertyTransition): + (JSC::Structure::removePropertyTransition): + (JSC::Structure::flattenDictionaryStructure): + (JSC::Structure::addPropertyWithoutTransition): + (JSC::Structure::removePropertyWithoutTransition): + (JSC::Structure::copyPropertyTableForPinning): + (JSC::Structure::get): + (JSC::Structure::putSpecificValue): + (JSC::Structure::remove): + * runtime/Structure.h: + (Structure): + (JSC::Structure::putWillGrowOutOfLineStorage): + (JSC::Structure::previousID): + (JSC::Structure::outOfLineCapacity): + (JSC::Structure::outOfLineSizeForKnownFinalObject): + (JSC::Structure::outOfLineSizeForKnownNonFinalObject): + (JSC::Structure::outOfLineSize): + (JSC::Structure::hasInlineStorage): + (JSC::Structure::inlineCapacity): + (JSC::Structure::inlineSizeForKnownFinalObject): + (JSC::Structure::inlineSize): + (JSC::Structure::totalStorageSize): + (JSC::Structure::totalStorageCapacity): + (JSC::Structure::firstValidOffset): + (JSC::Structure::lastValidOffset): + (JSC::Structure::isValidOffset): + (JSC::Structure::isEmpty): + (JSC::Structure::transitionCount): + (JSC::Structure::get): + +2012-07-05 Oliver Hunt <oliver@apple.com> + + JSObjectCallAsFunction should thisConvert the provided thisObject + https://bugs.webkit.org/show_bug.cgi?id=90628 + + Reviewed by Gavin Barraclough. + + Perform this conversion on the provided this object. + + * API/JSObjectRef.cpp: + (JSObjectCallAsFunction): + +2012-07-05 Zoltan Herczeg <zherczeg@webkit.org> + + [Qt] Unreviewed buildfix after r121886. Typo fix. + + * assembler/MacroAssemblerARM.cpp: + (JSC::MacroAssemblerARM::load32WithUnalignedHalfWords): + +2012-07-05 Zoltan Herczeg <zherczeg@webkit.org> + + Port DFG JIT to traditional ARM + https://bugs.webkit.org/show_bug.cgi?id=90198 + + Reviewed by Filip Pizlo. + + This patch contains the macro assembler part of the + DFG JIT support on ARM systems with fixed 32 bit instruction + width. A large amount of old code was refactored, and the ARMv4 + or lower support is removed from the macro assembler. + + Sunspider is improved by 8%, and V8 is 92%. + + * assembler/ARMAssembler.cpp: + (JSC::ARMAssembler::dataTransfer32): + (JSC::ARMAssembler::baseIndexTransfer32): + (JSC): + (JSC::ARMAssembler::dataTransfer16): + (JSC::ARMAssembler::baseIndexTransfer16): + (JSC::ARMAssembler::dataTransferFloat): + (JSC::ARMAssembler::baseIndexTransferFloat): + (JSC::ARMAssembler::executableCopy): + * assembler/ARMAssembler.h: + (JSC::ARMAssembler::ARMAssembler): + (JSC::ARMAssembler::emitInst): + (JSC::ARMAssembler::vmov_f64_r): + (ARMAssembler): + (JSC::ARMAssembler::vabs_f64_r): + (JSC::ARMAssembler::vneg_f64_r): + (JSC::ARMAssembler::ldr_imm): + (JSC::ARMAssembler::ldr_un_imm): + (JSC::ARMAssembler::dtr_u): + (JSC::ARMAssembler::dtr_ur): + (JSC::ARMAssembler::dtr_d): + (JSC::ARMAssembler::dtr_dr): + (JSC::ARMAssembler::dtrh_u): + (JSC::ARMAssembler::dtrh_ur): + (JSC::ARMAssembler::dtrh_d): + (JSC::ARMAssembler::dtrh_dr): + (JSC::ARMAssembler::fdtr_u): + (JSC::ARMAssembler::fdtr_d): + (JSC::ARMAssembler::push_r): + (JSC::ARMAssembler::pop_r): + (JSC::ARMAssembler::poke_r): + (JSC::ARMAssembler::peek_r): + (JSC::ARMAssembler::vmov_vfp64_r): + (JSC::ARMAssembler::vmov_arm64_r): + (JSC::ARMAssembler::vmov_vfp32_r): + (JSC::ARMAssembler::vmov_arm32_r): + (JSC::ARMAssembler::vcvt_u32_f64_r): + (JSC::ARMAssembler::vcvt_f64_f32_r): + (JSC::ARMAssembler::vcvt_f32_f64_r): + (JSC::ARMAssembler::clz_r): + (JSC::ARMAssembler::bkpt): + (JSC::ARMAssembler::bx): + (JSC::ARMAssembler::blx): + (JSC::ARMAssembler::labelIgnoringWatchpoints): + (JSC::ARMAssembler::labelForWatchpoint): + (JSC::ARMAssembler::label): + (JSC::ARMAssembler::getLdrImmAddress): + (JSC::ARMAssembler::replaceWithJump): + (JSC::ARMAssembler::maxJumpReplacementSize): + (JSC::ARMAssembler::getOp2Byte): + (JSC::ARMAssembler::getOp2Half): + (JSC::ARMAssembler::RM): + (JSC::ARMAssembler::RS): + (JSC::ARMAssembler::RD): + (JSC::ARMAssembler::RN): + * assembler/AssemblerBufferWithConstantPool.h: + (JSC::AssemblerBufferWithConstantPool::ensureSpaceForAnyInstruction): + * assembler/MacroAssemblerARM.cpp: + (JSC::MacroAssemblerARM::load32WithUnalignedHalfWords): + * assembler/MacroAssemblerARM.h: + (JSC::MacroAssemblerARM::add32): + (MacroAssemblerARM): + (JSC::MacroAssemblerARM::and32): + (JSC::MacroAssemblerARM::lshift32): + (JSC::MacroAssemblerARM::mul32): + (JSC::MacroAssemblerARM::neg32): + (JSC::MacroAssemblerARM::rshift32): + (JSC::MacroAssemblerARM::urshift32): + (JSC::MacroAssemblerARM::xor32): + (JSC::MacroAssemblerARM::load8): + (JSC::MacroAssemblerARM::load8Signed): + (JSC::MacroAssemblerARM::load16): + (JSC::MacroAssemblerARM::load16Signed): + (JSC::MacroAssemblerARM::load32): + (JSC::MacroAssemblerARM::load32WithAddressOffsetPatch): + (JSC::MacroAssemblerARM::store32WithAddressOffsetPatch): + (JSC::MacroAssemblerARM::store8): + (JSC::MacroAssemblerARM::store16): + (JSC::MacroAssemblerARM::store32): + (JSC::MacroAssemblerARM::move): + (JSC::MacroAssemblerARM::jump): + (JSC::MacroAssemblerARM::branchAdd32): + (JSC::MacroAssemblerARM::mull32): + (JSC::MacroAssemblerARM::branchMul32): + (JSC::MacroAssemblerARM::nearCall): + (JSC::MacroAssemblerARM::compare32): + (JSC::MacroAssemblerARM::test32): + (JSC::MacroAssemblerARM::sub32): + (JSC::MacroAssemblerARM::call): + (JSC::MacroAssemblerARM::loadFloat): + (JSC::MacroAssemblerARM::loadDouble): + (JSC::MacroAssemblerARM::storeFloat): + (JSC::MacroAssemblerARM::storeDouble): + (JSC::MacroAssemblerARM::moveDouble): + (JSC::MacroAssemblerARM::addDouble): + (JSC::MacroAssemblerARM::divDouble): + (JSC::MacroAssemblerARM::subDouble): + (JSC::MacroAssemblerARM::mulDouble): + (JSC::MacroAssemblerARM::absDouble): + (JSC::MacroAssemblerARM::negateDouble): + (JSC::MacroAssemblerARM::convertInt32ToDouble): + (JSC::MacroAssemblerARM::convertFloatToDouble): + (JSC::MacroAssemblerARM::convertDoubleToFloat): + (JSC::MacroAssemblerARM::branchTruncateDoubleToInt32): + (JSC::MacroAssemblerARM::branchTruncateDoubleToUint32): + (JSC::MacroAssemblerARM::truncateDoubleToInt32): + (JSC::MacroAssemblerARM::truncateDoubleToUint32): + (JSC::MacroAssemblerARM::branchConvertDoubleToInt32): + (JSC::MacroAssemblerARM::branchDoubleNonZero): + (JSC::MacroAssemblerARM::branchDoubleZeroOrNaN): + (JSC::MacroAssemblerARM::invert): + (JSC::MacroAssemblerARM::replaceWithJump): + (JSC::MacroAssemblerARM::maxJumpReplacementSize): + (JSC::MacroAssemblerARM::call32): + * assembler/SH4Assembler.h: + (JSC::SH4Assembler::label): + * dfg/DFGAssemblyHelpers.h: + (JSC::DFG::AssemblyHelpers::debugCall): + (JSC::DFG::AssemblyHelpers::boxDouble): + (JSC::DFG::AssemblyHelpers::unboxDouble): + * dfg/DFGCCallHelpers.h: + (CCallHelpers): + (JSC::DFG::CCallHelpers::setupArguments): + * dfg/DFGFPRInfo.h: + (DFG): + * dfg/DFGGPRInfo.h: + (DFG): + (GPRInfo): + * dfg/DFGOperations.cpp: + (JSC): + * dfg/DFGSpeculativeJIT.h: + (SpeculativeJIT): + (JSC::DFG::SpeculativeJIT::appendCallWithExceptionCheckSetResult): + (JSC::DFG::SpeculativeJIT::appendCallSetResult): + * jit/JITStubs.cpp: + (JSC): + * jit/JITStubs.h: + (JITStackFrame): + * jit/JSInterfaceJIT.h: + (JSInterfaceJIT): + +2012-07-04 Anthony Scian <ascian@rim.com> + + Web Inspector [JSC]: Implement ScriptCallStack::stackTrace + https://bugs.webkit.org/show_bug.cgi?id=40118 + + Reviewed by Yong Li. + + Added member functions to expose function name, urlString, and line #. + Refactored toString to make use of these member functions to reduce + duplicated code for future maintenance. + + Manually tested refactoring of toString by tracing thrown exceptions. + + * interpreter/Interpreter.h: + (JSC::StackFrame::toString): + (JSC::StackFrame::friendlySourceURL): + (JSC::StackFrame::friendlyFunctionName): + (JSC::StackFrame::friendlyLineNumber): + +2012-07-04 Andy Wingo <wingo@igalia.com> + + [GTK] Enable parallel GC + https://bugs.webkit.org/show_bug.cgi?id=90568 + + Reviewed by Martin Robinson. + + * runtime/Options.cpp: Include <algorithm.h> for std::min. + +2012-07-04 John Mellor <johnme@chromium.org> + + Text Autosizing: Add compile flag and runtime setting + https://bugs.webkit.org/show_bug.cgi?id=87394 + + This patch renames Font Boosting to Text Autosizing. + + Reviewed by Adam Barth. + + * Configurations/FeatureDefines.xcconfig: + +2012-07-03 Michael Saboff <msaboff@apple.com> + + Enh: Hash Const JSString in Backing Stores to Save Memory + https://bugs.webkit.org/show_bug.cgi?id=86024 + + Reviewed by Oliver Hunt. + + During garbage collection, each marking thread keeps a HashMap of + strings. While visiting via MarkStack::copyAndAppend(), we check to + see if the string we are visiting is already in the HashMap. If not + we add it. If so, we change the reference to the current string we're + visiting to the prior string. + + To reduce the performance impact of this change, two throttles have + ben added. 1) We only try hash consting if a significant number of new + strings have been created since the last hash const. Currently this is + set at 100 strings. 2) If a string is unique at the end of a marking + it will not be checked during further GC phases. In some cases this + won't catch all duplicates, but we are trying to catch the growth of + duplicate strings. + + * heap/Heap.cpp: + (JSC::Heap::markRoots): + * heap/MarkStack.cpp: + (JSC::MarkStackThreadSharedData::resetChildren): + (JSC::MarkStackThreadSharedData::MarkStackThreadSharedData): + (JSC::MarkStackThreadSharedData::reset): + (JSC::MarkStack::setup): Check to see if enough strings have been created + to hash const. + (JSC::MarkStack::reset): Added call to clear m_uniqueStrings. + (JSC::JSString::tryHashConstLock): New method to lock JSString for + hash consting. + (JSC::JSString::releaseHashConstLock): New unlock method. + (JSC::JSString::shouldTryHashConst): Set of checks to see if we should + try to hash const the string. + (JSC::MarkStack::internalAppend): New method that performs the hash consting. + (JSC::SlotVisitor::copyAndAppend): Changed to call the new hash + consting internalAppend(). + * heap/MarkStack.h: + (MarkStackThreadSharedData): + (MarkStack): + * runtime/JSGlobalData.cpp: + (JSC::JSGlobalData::JSGlobalData): + * runtime/JSGlobalData.h: + (JSGlobalData): + (JSC::JSGlobalData::haveEnoughNewStringsToHashConst): + (JSC::JSGlobalData::resetNewStringsSinceLastHashConst): + * runtime/JSString.h: + (JSString): Changed from using bool flags to using an unsigned + m_flags field. This works better with the weakCompareAndSwap in + JSString::tryHashConstLock(). Changed the 8bitness setting and + checking to use new accessors. + (JSC::JSString::JSString): + (JSC::JSString::finishCreation): + (JSC::JSString::is8Bit): Updated for new m_flags. + (JSC::JSString::setIs8Bit): New setter. + New hash const flags accessors: + (JSC::JSString::isHashConstSingleton): + (JSC::JSString::clearHashConstSingleton): + (JSC::JSString::setHashConstSingleton): + (JSC::JSRopeString::finishCreation): + (JSC::JSRopeString::append): + +2012-07-03 Tony Chang <tony@chromium.org> + + [chromium] Unreviewed, update .gitignore to handle VS2010 files. + + * JavaScriptCore.gyp/.gitignore: + +2012-07-03 Mark Lam <mark.lam@apple.com> + + Add ability to symbolically set and dump JSC VM options. + See comments in runtime/Options.h for details on how the options work. + https://bugs.webkit.org/show_bug.cgi?id=90420 + + Reviewed by Filip Pizlo. + + * assembler/LinkBuffer.cpp: + (JSC::LinkBuffer::finalizeCodeWithDisassembly): + * assembler/LinkBuffer.h: + (JSC): + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::shouldOptimizeNow): + * bytecode/CodeBlock.h: + (JSC::CodeBlock::likelyToTakeSlowCase): + (JSC::CodeBlock::couldTakeSlowCase): + (JSC::CodeBlock::likelyToTakeSpecialFastCase): + (JSC::CodeBlock::likelyToTakeDeepestSlowCase): + (JSC::CodeBlock::likelyToTakeAnySlowCase): + (JSC::CodeBlock::jitAfterWarmUp): + (JSC::CodeBlock::jitSoon): + (JSC::CodeBlock::reoptimizationRetryCounter): + (JSC::CodeBlock::countReoptimization): + (JSC::CodeBlock::counterValueForOptimizeAfterWarmUp): + (JSC::CodeBlock::counterValueForOptimizeAfterLongWarmUp): + (JSC::CodeBlock::optimizeSoon): + (JSC::CodeBlock::exitCountThresholdForReoptimization): + (JSC::CodeBlock::exitCountThresholdForReoptimizationFromLoop): + * bytecode/ExecutionCounter.h: + (JSC::ExecutionCounter::clippedThreshold): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::ByteCodeParser::handleInlining): + * dfg/DFGCapabilities.h: + (JSC::DFG::mightCompileEval): + (JSC::DFG::mightCompileProgram): + (JSC::DFG::mightCompileFunctionForCall): + (JSC::DFG::mightCompileFunctionForConstruct): + (JSC::DFG::mightInlineFunctionForCall): + (JSC::DFG::mightInlineFunctionForConstruct): + * dfg/DFGCommon.h: + (JSC::DFG::shouldShowDisassembly): + * dfg/DFGDriver.cpp: + (JSC::DFG::compile): + * dfg/DFGOSRExit.cpp: + (JSC::DFG::OSRExit::considerAddingAsFrequentExitSiteSlow): + * dfg/DFGVariableAccessData.h: + (JSC::DFG::VariableAccessData::shouldUseDoubleFormatAccordingToVote): + * heap/MarkStack.cpp: + (JSC::MarkStackSegmentAllocator::allocate): + (JSC::MarkStackSegmentAllocator::shrinkReserve): + (JSC::MarkStackArray::MarkStackArray): + (JSC::MarkStackThreadSharedData::MarkStackThreadSharedData): + (JSC::SlotVisitor::donateKnownParallel): + (JSC::SlotVisitor::drain): + (JSC::SlotVisitor::drainFromShared): + * heap/MarkStack.h: + (JSC::MarkStack::mergeOpaqueRootsIfProfitable): + (JSC::MarkStack::addOpaqueRoot): + * heap/SlotVisitor.h: + (JSC::SlotVisitor::donate): + * jit/JIT.cpp: + (JSC::JIT::emitOptimizationCheck): + * jsc.cpp: + (printUsageStatement): + (parseArguments): + * runtime/InitializeThreading.cpp: + (JSC::initializeThreadingOnce): + * runtime/JSGlobalData.cpp: + (JSC::enableAssembler): + * runtime/JSGlobalObject.cpp: + (JSC::JSGlobalObject::JSGlobalObject): + * runtime/Options.cpp: + (JSC): + (JSC::overrideOptionWithHeuristic): + (JSC::Options::initialize): + (JSC::Options::setOption): + (JSC::Options::dumpAllOptions): + (JSC::Options::dumpOption): + * runtime/Options.h: + (JSC): + (Options): + (EntryInfo): + +2012-07-03 Jocelyn Turcotte <jocelyn.turcotte@nokia.com> Joel Dillon <joel.dillon@codethink.co.uk> [Qt][Win] Fix broken QtWebKit5.lib linking https://bugs.webkit.org/show_bug.cgi?id=88321 - Reviewed by NOBODY (OOPS!). + Reviewed by Kenneth Rohde Christiansen. - Also update the Wx build to use the new define. + The goal is to have different ports build systems define STATICALLY_LINKED_WITH_WTF + when building JavaScriptCore, if both are packaged in the same DLL, instead + of relying on the code to handle this. + The effects of BUILDING_* and STATICALLY_LINKED_WITH_* are currently the same + except for a check in Source/JavaScriptCore/config.h. + + Keeping the old way for the WX port as requested by the port's contributors. + For non-Windows ports there is no difference between IMPORT and EXPORT, no + change is needed. * API/JSBase.h: + JS symbols shouldn't be included by WTF objects anymore. Remove the export when BUILDING_WTF. + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops: + Make sure that JavaScriptCore uses import symbols of WTF for the Win port. * runtime/JSExportMacros.h: - * wscript: -2012-06-13 Patrick Gansterer <paroga@webkit.org> +2012-07-02 Filip Pizlo <fpizlo@apple.com> + + DFG OSR exit value recoveries should be computed lazily + https://bugs.webkit.org/show_bug.cgi?id=82155 + + Reviewed by Gavin Barraclough. + + This change aims to reduce one aspect of DFG compile times: the fact + that we currently compute the value recoveries for each local and + argument on every speculation check. We compile many speculation checks, + so this can add up quick. The strategy that this change takes is to + have the DFG save just enough information about how the compiler is + choosing to represent state, that the DFG::OSRExitCompiler can reify + the value recoveries lazily. + + This appears to be an 0.3% SunSpider speed-up and is neutral elsewhere. + + I also took the opportunity to fix the sampling regions profiler (it + was missing an export macro) and to put in more sampling regions in + the DFG (which are disabled so long as ENABLE(SAMPLING_REGIONS) is + false). + + * CMakeLists.txt: + * GNUmakefile.list.am: + * JavaScriptCore.xcodeproj/project.pbxproj: + * Target.pri: + * bytecode/CodeBlock.cpp: + (JSC): + (JSC::CodeBlock::shrinkDFGDataToFit): + * bytecode/CodeBlock.h: + (CodeBlock): + (JSC::CodeBlock::minifiedDFG): + (JSC::CodeBlock::variableEventStream): + (DFGData): + * bytecode/Operands.h: + (JSC::Operands::hasOperand): + (Operands): + (JSC::Operands::size): + (JSC::Operands::at): + (JSC::Operands::operator[]): + (JSC::Operands::isArgument): + (JSC::Operands::isVariable): + (JSC::Operands::argumentForIndex): + (JSC::Operands::variableForIndex): + (JSC::Operands::operandForIndex): + (JSC): + (JSC::dumpOperands): + * bytecode/SamplingTool.h: + (SamplingRegion): + * dfg/DFGByteCodeParser.cpp: + (JSC::DFG::parse): + * dfg/DFGCFAPhase.cpp: + (JSC::DFG::performCFA): + * dfg/DFGCSEPhase.cpp: + (JSC::DFG::performCSE): + * dfg/DFGFixupPhase.cpp: + (JSC::DFG::performFixup): + * dfg/DFGGenerationInfo.h: + (JSC::DFG::GenerationInfo::GenerationInfo): + (JSC::DFG::GenerationInfo::initConstant): + (JSC::DFG::GenerationInfo::initInteger): + (JSC::DFG::GenerationInfo::initJSValue): + (JSC::DFG::GenerationInfo::initCell): + (JSC::DFG::GenerationInfo::initBoolean): + (JSC::DFG::GenerationInfo::initDouble): + (JSC::DFG::GenerationInfo::initStorage): + (GenerationInfo): + (JSC::DFG::GenerationInfo::noticeOSRBirth): + (JSC::DFG::GenerationInfo::use): + (JSC::DFG::GenerationInfo::spill): + (JSC::DFG::GenerationInfo::setSpilled): + (JSC::DFG::GenerationInfo::fillJSValue): + (JSC::DFG::GenerationInfo::fillCell): + (JSC::DFG::GenerationInfo::fillInteger): + (JSC::DFG::GenerationInfo::fillBoolean): + (JSC::DFG::GenerationInfo::fillDouble): + (JSC::DFG::GenerationInfo::fillStorage): + (JSC::DFG::GenerationInfo::appendFill): + (JSC::DFG::GenerationInfo::appendSpill): + * dfg/DFGJITCompiler.cpp: + (JSC::DFG::JITCompiler::link): + (JSC::DFG::JITCompiler::compile): + (JSC::DFG::JITCompiler::compileFunction): + * dfg/DFGMinifiedGraph.h: Added. + (DFG): + (MinifiedGraph): + (JSC::DFG::MinifiedGraph::MinifiedGraph): + (JSC::DFG::MinifiedGraph::at): + (JSC::DFG::MinifiedGraph::append): + (JSC::DFG::MinifiedGraph::prepareAndShrink): + (JSC::DFG::MinifiedGraph::setOriginalGraphSize): + (JSC::DFG::MinifiedGraph::originalGraphSize): + * dfg/DFGMinifiedNode.cpp: Added. + (DFG): + (JSC::DFG::MinifiedNode::fromNode): + * dfg/DFGMinifiedNode.h: Added. + (DFG): + (JSC::DFG::belongsInMinifiedGraph): + (MinifiedNode): + (JSC::DFG::MinifiedNode::MinifiedNode): + (JSC::DFG::MinifiedNode::index): + (JSC::DFG::MinifiedNode::op): + (JSC::DFG::MinifiedNode::hasChild1): + (JSC::DFG::MinifiedNode::child1): + (JSC::DFG::MinifiedNode::hasConstant): + (JSC::DFG::MinifiedNode::hasConstantNumber): + (JSC::DFG::MinifiedNode::constantNumber): + (JSC::DFG::MinifiedNode::hasWeakConstant): + (JSC::DFG::MinifiedNode::weakConstant): + (JSC::DFG::MinifiedNode::getIndex): + (JSC::DFG::MinifiedNode::compareByNodeIndex): + (JSC::DFG::MinifiedNode::hasChild): + * dfg/DFGNode.h: + (Node): + * dfg/DFGOSRExit.cpp: + (JSC::DFG::OSRExit::OSRExit): + * dfg/DFGOSRExit.h: + (OSRExit): + * dfg/DFGOSRExitCompiler.cpp: + * dfg/DFGOSRExitCompiler.h: + (OSRExitCompiler): + * dfg/DFGOSRExitCompiler32_64.cpp: + (JSC::DFG::OSRExitCompiler::compileExit): + * dfg/DFGOSRExitCompiler64.cpp: + (JSC::DFG::OSRExitCompiler::compileExit): + * dfg/DFGPredictionPropagationPhase.cpp: + (JSC::DFG::performPredictionPropagation): + * dfg/DFGRedundantPhiEliminationPhase.cpp: + (JSC::DFG::performRedundantPhiElimination): + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::SpeculativeJIT): + (DFG): + (JSC::DFG::SpeculativeJIT::fillStorage): + (JSC::DFG::SpeculativeJIT::noticeOSRBirth): + (JSC::DFG::SpeculativeJIT::compileMovHint): + (JSC::DFG::SpeculativeJIT::compile): + (JSC::DFG::SpeculativeJIT::computeValueRecoveryFor): + * dfg/DFGSpeculativeJIT.h: + (DFG): + (JSC::DFG::SpeculativeJIT::use): + (SpeculativeJIT): + (JSC::DFG::SpeculativeJIT::spill): + (JSC::DFG::SpeculativeJIT::speculationCheck): + (JSC::DFG::SpeculativeJIT::forwardSpeculationCheck): + (JSC::DFG::SpeculativeJIT::recordSetLocal): + * dfg/DFGSpeculativeJIT32_64.cpp: + (JSC::DFG::SpeculativeJIT::fillInteger): + (JSC::DFG::SpeculativeJIT::fillDouble): + (JSC::DFG::SpeculativeJIT::fillJSValue): + (JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal): + (JSC::DFG::SpeculativeJIT::fillSpeculateDouble): + (JSC::DFG::SpeculativeJIT::fillSpeculateCell): + (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean): + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGSpeculativeJIT64.cpp: + (JSC::DFG::SpeculativeJIT::fillInteger): + (JSC::DFG::SpeculativeJIT::fillDouble): + (JSC::DFG::SpeculativeJIT::fillJSValue): + (JSC::DFG::SpeculativeJIT::fillSpeculateIntInternal): + (JSC::DFG::SpeculativeJIT::fillSpeculateDouble): + (JSC::DFG::SpeculativeJIT::fillSpeculateCell): + (JSC::DFG::SpeculativeJIT::fillSpeculateBoolean): + (JSC::DFG::SpeculativeJIT::compile): + * dfg/DFGValueRecoveryOverride.h: Added. + (DFG): + (ValueRecoveryOverride): + (JSC::DFG::ValueRecoveryOverride::ValueRecoveryOverride): + * dfg/DFGValueSource.cpp: Added. + (DFG): + (JSC::DFG::ValueSource::dump): + * dfg/DFGValueSource.h: Added. + (DFG): + (JSC::DFG::dataFormatToValueSourceKind): + (JSC::DFG::valueSourceKindToDataFormat): + (JSC::DFG::isInRegisterFile): + (ValueSource): + (JSC::DFG::ValueSource::ValueSource): + (JSC::DFG::ValueSource::forPrediction): + (JSC::DFG::ValueSource::forDataFormat): + (JSC::DFG::ValueSource::isSet): + (JSC::DFG::ValueSource::kind): + (JSC::DFG::ValueSource::isInRegisterFile): + (JSC::DFG::ValueSource::dataFormat): + (JSC::DFG::ValueSource::valueRecovery): + (JSC::DFG::ValueSource::nodeIndex): + (JSC::DFG::ValueSource::nodeIndexFromKind): + (JSC::DFG::ValueSource::kindFromNodeIndex): + * dfg/DFGVariableEvent.cpp: Added. + (DFG): + (JSC::DFG::VariableEvent::dump): + (JSC::DFG::VariableEvent::dumpFillInfo): + (JSC::DFG::VariableEvent::dumpSpillInfo): + * dfg/DFGVariableEvent.h: Added. + (DFG): + (VariableEvent): + (JSC::DFG::VariableEvent::VariableEvent): + (JSC::DFG::VariableEvent::reset): + (JSC::DFG::VariableEvent::fillGPR): + (JSC::DFG::VariableEvent::fillPair): + (JSC::DFG::VariableEvent::fillFPR): + (JSC::DFG::VariableEvent::spill): + (JSC::DFG::VariableEvent::death): + (JSC::DFG::VariableEvent::setLocal): + (JSC::DFG::VariableEvent::movHint): + (JSC::DFG::VariableEvent::kind): + (JSC::DFG::VariableEvent::nodeIndex): + (JSC::DFG::VariableEvent::dataFormat): + (JSC::DFG::VariableEvent::gpr): + (JSC::DFG::VariableEvent::tagGPR): + (JSC::DFG::VariableEvent::payloadGPR): + (JSC::DFG::VariableEvent::fpr): + (JSC::DFG::VariableEvent::virtualRegister): + (JSC::DFG::VariableEvent::operand): + (JSC::DFG::VariableEvent::variableRepresentation): + * dfg/DFGVariableEventStream.cpp: Added. + (DFG): + (JSC::DFG::VariableEventStream::logEvent): + (MinifiedGenerationInfo): + (JSC::DFG::MinifiedGenerationInfo::MinifiedGenerationInfo): + (JSC::DFG::MinifiedGenerationInfo::update): + (JSC::DFG::VariableEventStream::reconstruct): + * dfg/DFGVariableEventStream.h: Added. + (DFG): + (VariableEventStream): + (JSC::DFG::VariableEventStream::appendAndLog): + * dfg/DFGVirtualRegisterAllocationPhase.cpp: + (JSC::DFG::performVirtualRegisterAllocation): + +2012-07-02 Filip Pizlo <fpizlo@apple.com> + + DFG::ArgumentsSimplificationPhase should assert that the PhantomArguments nodes it creates are not shouldGenerate() + https://bugs.webkit.org/show_bug.cgi?id=90407 - [WIN] Remove dependency on pthread from MachineStackMarker - https://bugs.webkit.org/show_bug.cgi?id=68429 + Reviewed by Mark Hahnenberg. - Reviewed by NOBODY (OOPS!). + * dfg/DFGArgumentsSimplificationPhase.cpp: + (JSC::DFG::ArgumentsSimplificationPhase::run): - Implement pthread TLS functionality with native windows functions. +2012-07-02 Gavin Barraclough <barraclough@apple.com> - * heap/MachineStackMarker.cpp: Use the new functions instead of pthread directly. - * heap/MachineStackMarker.h: - * wtf/ThreadSpecific.h: - (WTF::ThreadSpecificKeyCreate): Added wrapper around pthread_key_create. - (WTF::ThreadSpecificKeyDelete): Added wrapper around pthread_key_delete. - (WTF::ThreadSpecificSet): Added wrapper around pthread_setspecific. - (WTF::ThreadSpecificGet): Added wrapper around pthread_getspecific. - * wtf/ThreadSpecificWin.cpp: + Array.prototype.pop should throw if property is not configurable + https://bugs.webkit.org/show_bug.cgi?id=75788 + + Rubber Stamped by Oliver Hunt. + + No real bug here any more, but the error we throw sometimes has a misleading message. + + * runtime/JSArray.cpp: + (JSC::JSArray::pop): + +2012-06-29 Filip Pizlo <fpizlo@apple.com> + + JSObject wastes too much memory on unused property slots + https://bugs.webkit.org/show_bug.cgi?id=90255 + + Reviewed by Mark Hahnenberg. + + Rolling back in after applying a simple fix: it appears that + JSObject::setStructureAndReallocateStorageIfNecessary() was allocating more + property storage than necessary. Fixing this appears to resolve the crash. + + This does a few things: + + - JSNonFinalObject no longer has inline property storage. + + - Initial out-of-line property storage size is 4 slots for JSNonFinalObject, + or 2x the inline storage for JSFinalObject. + + - Property storage is only reallocated if it needs to be. Previously, we + would reallocate the property storage on any transition where the original + structure said shouldGrowProperyStorage(), but this led to spurious + reallocations when doing transitionless property adds and there are + deleted property slots available. That in turn led to crashes, because we + would switch to out-of-line storage even if the capacity matched the + criteria for inline storage. + + - Inline JSFunction allocation is killed off because we don't have a good + way of inlining property storage allocation. This didn't hurt performance. + Killing off code is better than fixing it if that code wasn't doing any + good. + + This looks like a 1% progression on V8. + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::privateExecute): + * jit/JIT.cpp: + (JSC::JIT::privateCompileSlowCases): + * jit/JIT.h: + * jit/JITInlineMethods.h: + (JSC::JIT::emitAllocateBasicJSObject): + (JSC): + * jit/JITOpcodes.cpp: + (JSC::JIT::emit_op_new_func): + (JSC): + (JSC::JIT::emit_op_new_func_exp): + * runtime/JSFunction.cpp: + (JSC::JSFunction::finishCreation): + * runtime/JSObject.h: + (JSC::JSObject::isUsingInlineStorage): + (JSObject): + (JSC::JSObject::finishCreation): + (JSC): + (JSC::JSNonFinalObject::hasInlineStorage): + (JSNonFinalObject): + (JSC::JSNonFinalObject::JSNonFinalObject): + (JSC::JSNonFinalObject::finishCreation): + (JSC::JSFinalObject::hasInlineStorage): + (JSC::JSFinalObject::finishCreation): + (JSC::JSObject::offsetOfInlineStorage): + (JSC::JSObject::setPropertyStorage): + (JSC::Structure::inlineStorageCapacity): + (JSC::Structure::isUsingInlineStorage): + (JSC::JSObject::putDirectInternal): + (JSC::JSObject::setStructureAndReallocateStorageIfNecessary): + (JSC::JSObject::putDirectWithoutTransition): + * runtime/Structure.cpp: + (JSC::Structure::Structure): + (JSC::nextPropertyStorageCapacity): + (JSC): + (JSC::Structure::growPropertyStorageCapacity): + (JSC::Structure::suggestedNewPropertyStorageSize): + * runtime/Structure.h: + (JSC::Structure::putWillGrowPropertyStorage): + (Structure): + +2012-06-29 Filip Pizlo <fpizlo@apple.com> + + Webkit crashes in DFG on Google Docs when creating a new document + https://bugs.webkit.org/show_bug.cgi?id=90209 + + Reviewed by Gavin Barraclough. + + Don't attempt to short-circuit Phantom(GetLocal) if the GetLocal is for a + captured variable. + + * dfg/DFGCFGSimplificationPhase.cpp: + (JSC::DFG::CFGSimplificationPhase::mergeBlocks): + +2012-06-30 Zan Dobersek <zandobersek@gmail.com> + + Unreviewed, rolling out r121605. + http://trac.webkit.org/changeset/121605 + https://bugs.webkit.org/show_bug.cgi?id=90336 + + Changes caused flaky crashes in sputnik/Unicode tests on Apple + WK1 and GTK Linux builders + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::privateExecute): + * jit/JIT.cpp: + (JSC::JIT::privateCompileSlowCases): + * jit/JIT.h: + * jit/JITInlineMethods.h: + (JSC::JIT::emitAllocateBasicJSObject): + (JSC::JIT::emitAllocateJSFinalObject): + (JSC): + (JSC::JIT::emitAllocateJSFunction): + * jit/JITOpcodes.cpp: + (JSC::JIT::emit_op_new_func): + (JSC::JIT::emitSlow_op_new_func): + (JSC): + (JSC::JIT::emit_op_new_func_exp): + (JSC::JIT::emitSlow_op_new_func_exp): + * runtime/JSFunction.cpp: + (JSC::JSFunction::finishCreation): + * runtime/JSObject.h: + (JSC::JSObject::isUsingInlineStorage): + (JSObject): + (JSC::JSObject::finishCreation): + (JSC): + (JSNonFinalObject): + (JSC::JSNonFinalObject::JSNonFinalObject): + (JSC::JSNonFinalObject::finishCreation): + (JSFinalObject): + (JSC::JSFinalObject::finishCreation): + (JSC::JSObject::offsetOfInlineStorage): + (JSC::JSObject::setPropertyStorage): + (JSC::Structure::isUsingInlineStorage): + (JSC::JSObject::putDirectInternal): + (JSC::JSObject::putDirectWithoutTransition): + (JSC::JSObject::transitionTo): + * runtime/Structure.cpp: + (JSC::Structure::Structure): + (JSC): + (JSC::Structure::growPropertyStorageCapacity): + (JSC::Structure::suggestedNewPropertyStorageSize): + * runtime/Structure.h: + (JSC::Structure::shouldGrowPropertyStorage): + (JSC::Structure::propertyStorageSize): + +2012-06-29 Mark Hahnenberg <mhahnenberg@apple.com> + + Remove warning about protected values when the Heap is being destroyed + https://bugs.webkit.org/show_bug.cgi?id=90302 + + Reviewed by Geoffrey Garen. + + Having to do book-keeping about whether values allocated from a certain + VM are or are not protected makes the JSC API much more difficult to use + correctly. Clients should be able to throw an entire VM away and not have + to worry about unprotecting all of the values that they protected earlier. + + * heap/Heap.cpp: + (JSC::Heap::lastChanceToFinalize): + +2012-06-29 Filip Pizlo <fpizlo@apple.com> + + JSObject wastes too much memory on unused property slots + https://bugs.webkit.org/show_bug.cgi?id=90255 + + Reviewed by Mark Hahnenberg. + + This does a few things: + + - JSNonFinalObject no longer has inline property storage. + + - Initial out-of-line property storage size is 4 slots for JSNonFinalObject, + or 2x the inline storage for JSFinalObject. + + - Property storage is only reallocated if it needs to be. Previously, we + would reallocate the property storage on any transition where the original + structure said shouldGrowProperyStorage(), but this led to spurious + reallocations when doing transitionless property adds and there are + deleted property slots available. That in turn led to crashes, because we + would switch to out-of-line storage even if the capacity matched the + criteria for inline storage. + + - Inline JSFunction allocation is killed off because we don't have a good + way of inlining property storage allocation. This didn't hurt performance. + Killing off code is better than fixing it if that code wasn't doing any + good. + + This looks like a 1% progression on V8. + + * interpreter/Interpreter.cpp: + (JSC::Interpreter::privateExecute): + * jit/JIT.cpp: + (JSC::JIT::privateCompileSlowCases): + * jit/JIT.h: + * jit/JITInlineMethods.h: + (JSC::JIT::emitAllocateBasicJSObject): + (JSC): + * jit/JITOpcodes.cpp: + (JSC::JIT::emit_op_new_func): + (JSC): + (JSC::JIT::emit_op_new_func_exp): + * runtime/JSFunction.cpp: + (JSC::JSFunction::finishCreation): + * runtime/JSObject.h: + (JSC::JSObject::isUsingInlineStorage): + (JSObject): + (JSC::JSObject::finishCreation): + (JSC): + (JSC::JSNonFinalObject::hasInlineStorage): + (JSNonFinalObject): + (JSC::JSNonFinalObject::JSNonFinalObject): + (JSC::JSNonFinalObject::finishCreation): + (JSC::JSFinalObject::hasInlineStorage): + (JSC::JSFinalObject::finishCreation): + (JSC::JSObject::offsetOfInlineStorage): + (JSC::JSObject::setPropertyStorage): + (JSC::Structure::inlineStorageCapacity): + (JSC::Structure::isUsingInlineStorage): + (JSC::JSObject::putDirectInternal): + (JSC::JSObject::setStructureAndReallocateStorageIfNecessary): + (JSC::JSObject::putDirectWithoutTransition): + * runtime/Structure.cpp: + (JSC::Structure::Structure): + (JSC::nextPropertyStorageCapacity): + (JSC): + (JSC::Structure::growPropertyStorageCapacity): + (JSC::Structure::suggestedNewPropertyStorageSize): + * runtime/Structure.h: + (JSC::Structure::putWillGrowPropertyStorage): + (Structure): + +2012-06-28 Filip Pizlo <fpizlo@apple.com> + + DFG recompilation heuristics should be based on count, not rate + https://bugs.webkit.org/show_bug.cgi?id=90146 + + Reviewed by Oliver Hunt. + + This removes a bunch of code that was previously trying to prevent spurious + reoptimizations if a large enough majority of executions of a code block did + not result in OSR exit. It turns out that this code was purely harmful. This + patch removes all of that logic and replaces it with a dead-simple + heuristic: if you exit more than N times (where N is an exponential function + of the number of times the code block has already been recompiled) then we + will recompile. + + This appears to be a broad ~1% win on many benchmarks large and small. + + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::CodeBlock): + * bytecode/CodeBlock.h: + (JSC::CodeBlock::osrExitCounter): + (JSC::CodeBlock::countOSRExit): + (CodeBlock): + (JSC::CodeBlock::addressOfOSRExitCounter): + (JSC::CodeBlock::offsetOfOSRExitCounter): + (JSC::CodeBlock::adjustedExitCountThreshold): + (JSC::CodeBlock::exitCountThresholdForReoptimization): + (JSC::CodeBlock::exitCountThresholdForReoptimizationFromLoop): + (JSC::CodeBlock::shouldReoptimizeNow): + (JSC::CodeBlock::shouldReoptimizeFromLoopNow): + * bytecode/ExecutionCounter.cpp: + (JSC::ExecutionCounter::setThreshold): + * bytecode/ExecutionCounter.h: + (ExecutionCounter): + (JSC::ExecutionCounter::clippedThreshold): + * dfg/DFGJITCompiler.cpp: + (JSC::DFG::JITCompiler::compileBody): + * dfg/DFGOSRExit.cpp: + (JSC::DFG::OSRExit::considerAddingAsFrequentExitSiteSlow): + * dfg/DFGOSRExitCompiler.cpp: + (JSC::DFG::OSRExitCompiler::handleExitCounts): + * dfg/DFGOperations.cpp: + * jit/JITStubs.cpp: + (JSC::DEFINE_STUB_FUNCTION): + * runtime/Options.cpp: + (Options): + (JSC::Options::initializeOptions): + * runtime/Options.h: + (Options): + +2012-06-28 Mark Lam <mark.lam@apple.com> + + Adding a commenting utility to record BytecodeGenerator comments + with opcodes that are emitted. Presently, the comments can only + be constant strings. Adding comments for opcodes is optional. + If a comment is added, the comment will be printed following the + opcode when CodeBlock::dump() is called. + + This utility is disabled by default, and is only meant for VM + development purposes. It should not be enabled for product builds. + + To enable this utility, set ENABLE_BYTECODE_COMMENTS in CodeBlock.h + to 1. + + https://bugs.webkit.org/show_bug.cgi?id=90095 + + Reviewed by Geoffrey Garen. + + * GNUmakefile.list.am: + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj: + * JavaScriptCore.xcodeproj/project.pbxproj: + * bytecode/CodeBlock.cpp: + (JSC::CodeBlock::dumpBytecodeCommentAndNewLine): Dumps the comment. + (JSC): + (JSC::CodeBlock::printUnaryOp): Add comment dumps. + (JSC::CodeBlock::printBinaryOp): Add comment dumps. + (JSC::CodeBlock::printConditionalJump): Add comment dumps. + (JSC::CodeBlock::printCallOp): Add comment dumps. + (JSC::CodeBlock::printPutByIdOp): Add comment dumps. + (JSC::CodeBlock::dump): Add comment dumps. + (JSC::CodeBlock::CodeBlock): + (JSC::CodeBlock::commentForBytecodeOffset): + Finds the comment for an opcode if available. + (JSC::CodeBlock::dumpBytecodeComments): + For debugging whether comments are collected. + It is not being called anywhere. + * bytecode/CodeBlock.h: + (CodeBlock): + (JSC::CodeBlock::bytecodeComments): + * bytecode/Comment.h: Added. + (JSC): + (Comment): + * bytecompiler/BytecodeGenerator.cpp: + (JSC::BytecodeGenerator::BytecodeGenerator): + (JSC::BytecodeGenerator::emitOpcode): Calls emitComment(). + (JSC): + (JSC::BytecodeGenerator::emitComment): Adds comment to CodeBlock. + (JSC::BytecodeGenerator::prependComment): + Registers a comment for emitComemnt() to use later. + * bytecompiler/BytecodeGenerator.h: + (BytecodeGenerator): + (JSC::BytecodeGenerator::emitComment): + (JSC::BytecodeGenerator::prependComment): + These are inlined versions of these functions that nullify them + when ENABLE_BYTECODE_COMMENTS is 0. + (JSC::BytecodeGenerator::comments): + +2012-06-28 Oliver Hunt <oliver@apple.com> + + 32bit DFG incorrectly claims an fpr is fillable even if it has not been proven double + https://bugs.webkit.org/show_bug.cgi?id=90127 + + Reviewed by Filip Pizlo. + + The 32-bit version of fillSpeculateDouble doesn't handle Number->fpr loads + correctly. This patch fixes this by killing the fill info in the GenerationInfo + when the spillFormat doesn't guarantee the value is a double. + + * dfg/DFGSpeculativeJIT32_64.cpp: + (JSC::DFG::SpeculativeJIT::fillSpeculateDouble): + +2012-06-28 Kent Tamura <tkent@chromium.org> + + Classify form control states by their owner forms + https://bugs.webkit.org/show_bug.cgi?id=89950 + + Reviewed by Hajime Morita. + + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def: + Expose WTF::StringBuilder::canShrink() + +2012-06-27 Michael Saboff <msaboff@apple.com> + + [Win] jscore-tests flakey + https://bugs.webkit.org/show_bug.cgi?id=88118 + + Reviewed by Jessie Berlin. + + jsDriver.pl on windows intermittently doesn't get the returned value from jsc, + instead it gets 126. Added a new option to jsc (-x) which prints the exit + code before exiting. jsDriver.pl uses this option on Windows and parses the + exit code output for the exit code, removing it before comparing the actual + and expected outputs. Filed a follow on "FIXME" defect: + [WIN] Intermittent failure for jsc return value to propagate through jsDriver.pl + https://bugs.webkit.org/show_bug.cgi?id=90119 + + * jsc.cpp: + (CommandLine::CommandLine): + (CommandLine): + (printUsageStatement): + (parseArguments): + (jscmain): + * tests/mozilla/jsDriver.pl: + (execute_tests): + +2012-06-27 Sheriff Bot <webkit.review.bot@gmail.com> + + Unreviewed, rolling out r121359. + http://trac.webkit.org/changeset/121359 + https://bugs.webkit.org/show_bug.cgi?id=90115 + + Broke many inspector tests (Requested by jpfau on #webkit). + + * interpreter/Interpreter.h: + (JSC::StackFrame::toString): + +2012-06-27 Filip Pizlo <fpizlo@apple.com> + + Javascript SHA-512 gives wrong hash on second and subsequent runs unless Web Inspector Javascript Debugging is on + https://bugs.webkit.org/show_bug.cgi?id=90053 + <rdar://problem/11764613> + + Reviewed by Mark Hahnenberg. + + The problem is that the code was assuming that the recovery should be Undefined if the source of + the SetLocal was !shouldGenerate(). But that's wrong, since the DFG optimizer may skip around a + UInt32ToNumber node (hence making it !shouldGenerate()) and keep the source of that node alive. + In that case we should base the recovery on the source of the UInt32ToNumber. The logic for this + was already in place but the fast check for !shouldGenerate() broke it. + + * dfg/DFGSpeculativeJIT.cpp: + (JSC::DFG::SpeculativeJIT::computeValueRecoveryFor): + +2012-06-27 Filip Pizlo <fpizlo@apple.com> + + DFG disassembly should be easier to read + https://bugs.webkit.org/show_bug.cgi?id=90106 + + Reviewed by Mark Hahnenberg. + + Did a few things: + + - Options::showDFGDisassembly now shows OSR exit disassembly as well. + + - Phi node dumping doesn't attempt to do line wrapping since it just made the dump harder + to read. + + - DFG graph disassembly view shows a few additional node types that turn out to be + essential for understanding OSR exits. + + Put together, these changes reinforce the philosophy that anything needed for computing + OSR exit is just as important as the machine code itself. Of course, we still don't take + that philosophy to its full extreme - for example Phantom nodes are not dumped. We may + revisit that in the future. + + * assembler/LinkBuffer.cpp: + (JSC::LinkBuffer::finalizeCodeWithDisassembly): + * assembler/LinkBuffer.h: + (JSC): + * dfg/DFGDisassembler.cpp: + (JSC::DFG::Disassembler::dump): + * dfg/DFGGraph.cpp: + (JSC::DFG::Graph::dumpBlockHeader): + * dfg/DFGNode.h: + (JSC::DFG::Node::willHaveCodeGenOrOSR): + * dfg/DFGOSRExitCompiler.cpp: + * jit/JIT.cpp: + (JSC::JIT::privateCompile): + +2012-06-25 Mark Hahnenberg <mhahnenberg@apple.com> + + JSLock should be per-JSGlobalData + https://bugs.webkit.org/show_bug.cgi?id=89123 + + Reviewed by Geoffrey Garen. + + * API/APIShims.h: + (APIEntryShimWithoutLock): + (JSC::APIEntryShimWithoutLock::APIEntryShimWithoutLock): Added an extra parameter to the constructor to + determine whether we should ref the JSGlobalData or not. We want to ref all the time except for in the + HeapTimer class because timerDidFire could run after somebody has started to tear down that particular + JSGlobalData, so we wouldn't want to resurrect the ref count of that JSGlobalData from 0 back to 1 after + its destruction has begun. + (JSC::APIEntryShimWithoutLock::~APIEntryShimWithoutLock): + (JSC::APIEntryShim::APIEntryShim): + (APIEntryShim): + (JSC::APIEntryShim::~APIEntryShim): + (JSC::APIEntryShim::init): Factored out common initialization code for the various APIEntryShim constructors. + Also moved the timeoutChecker stop and start here because we need to start after we've grabbed the API lock + and before we've released it, which can only done in APIEntryShim. + (JSC::APICallbackShim::~APICallbackShim): We no longer need to synchronize here. + * API/JSContextRef.cpp: + (JSGlobalContextCreate): + (JSGlobalContextCreateInGroup): + (JSGlobalContextRelease): + (JSContextCreateBacktrace): + * JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def: + * heap/CopiedSpace.cpp: + (JSC::CopiedSpace::tryAllocateSlowCase): + * heap/Heap.cpp: + (JSC::Heap::protect): + (JSC::Heap::unprotect): + (JSC::Heap::collect): + (JSC::Heap::setActivityCallback): + (JSC::Heap::activityCallback): + (JSC::Heap::sweeper): + * heap/Heap.h: Changed m_activityCallback and m_sweeper to be raw pointers rather than OwnPtrs because they + are now responsible for their own lifetime. Also changed the order of declaration of the GCActivityCallback + and the IncrementalSweeper to make sure they're the last things that get initialized during construction to + prevent any issues with uninitialized memory in the JSGlobalData/Heap they might care about. + (Heap): + * heap/HeapTimer.cpp: Refactored to allow for thread-safe operation and shutdown. + (JSC::HeapTimer::~HeapTimer): + (JSC::HeapTimer::invalidate): + (JSC): + (JSC::HeapTimer::didStartVMShutdown): Called at the beginning of ~JSGlobalData. If we're on the same thread + that the HeapTimer is running on, we kill the HeapTimer ourselves. If not, then we set some state in the + HeapTimer and schedule it to fire immediately so that it can notice and kill itself. + (JSC::HeapTimer::timerDidFire): We grab our mutex and check our JSGlobalData pointer. If it has been zero-ed + out, then we know the VM has started to shutdown and we should kill ourselves. Otherwise, grab the APIEntryShim, + but without ref-ing the JSGlobalData (we don't want to bring the JSGlobalData's ref-count from 0 to 1) in case + we were interrupted between releasing our mutex and trying to grab the APILock. + * heap/HeapTimer.h: + (HeapTimer): + * heap/IncrementalSweeper.cpp: + (JSC::IncrementalSweeper::doWork): We no longer need the API shim here since HeapTimer::timerDidFire handles + all of that for us. + (JSC::IncrementalSweeper::create): + * heap/IncrementalSweeper.h: + (IncrementalSweeper): + * heap/MarkedAllocator.cpp: + (JSC::MarkedAllocator::allocateSlowCase): + * heap/WeakBlock.cpp: + (JSC::WeakBlock::reap): + * jsc.cpp: + (functionGC): + (functionReleaseExecutableMemory): + (jscmain): + * runtime/Completion.cpp: + (JSC::checkSyntax): + (JSC::evaluate): + * runtime/GCActivityCallback.h: + (DefaultGCActivityCallback): + (JSC::DefaultGCActivityCallback::create): + * runtime/JSGlobalData.cpp: + (JSC::JSGlobalData::JSGlobalData): + (JSC::JSGlobalData::~JSGlobalData): Signals to the two HeapTimers (GCActivityCallback and IncrementalSweeper) + that the VM has started shutting down. It then waits until the HeapTimer is done with whatever activity + it needs to do before continuing with any further destruction. Also asserts that we do not currently hold the + APILock because this could potentially cause deadlock when we try to signal to the HeapTimers using their mutexes. + (JSC::JSGlobalData::sharedInstance): Protect the initialization for the shared instance with the GlobalJSLock. + (JSC::JSGlobalData::sharedInstanceInternal): + * runtime/JSGlobalData.h: Change to be ThreadSafeRefCounted so that we don't have to worry about refing and + de-refing JSGlobalDatas on separate threads since we don't do it that often anyways. + (JSGlobalData): + (JSC::JSGlobalData::apiLock): + * runtime/JSGlobalObject.cpp: + (JSC::JSGlobalObject::~JSGlobalObject): + (JSC::JSGlobalObject::init): + * runtime/JSLock.cpp: + (JSC): + (JSC::GlobalJSLock::GlobalJSLock): For accessing the shared instance. + (JSC::GlobalJSLock::~GlobalJSLock): + (JSC::JSLockHolder::JSLockHolder): MutexLocker for JSLock. Also refs the JSGlobalData to keep it alive so that + it can successfully unlock it later without it disappearing from underneath it. + (JSC::JSLockHolder::~JSLockHolder): + (JSC::JSLock::JSLock): + (JSC::JSLock::~JSLock): + (JSC::JSLock::lock): Uses the spin lock for guarding the lock count and owner thread fields. Uses the mutex for + actually waiting for long periods. + (JSC::JSLock::unlock): + (JSC::JSLock::currentThreadIsHoldingLock): + (JSC::JSLock::dropAllLocks): + (JSC::JSLock::dropAllLocksUnconditionally): + (JSC::JSLock::grabAllLocks): + (JSC::JSLock::DropAllLocks::DropAllLocks): + (JSC::JSLock::DropAllLocks::~DropAllLocks): + * runtime/JSLock.h: + (JSC): + (GlobalJSLock): + (JSLockHolder): + (JSLock): + (DropAllLocks): + * runtime/WeakGCMap.h: + (JSC::WeakGCMap::set): + * testRegExp.cpp: + (realMain): + +2012-06-27 Filip Pizlo <fpizlo@apple.com> + + x86 disassembler confuses immediates with addresses + https://bugs.webkit.org/show_bug.cgi?id=90099 + + Reviewed by Mark Hahnenberg. + + Prepend "$" to immediates to disambiguate between immediates and addresses. This is in + accordance with the gas and AT&T syntax. + + * disassembler/udis86/udis86_syn-att.c: + (gen_operand): + +2012-06-27 Filip Pizlo <fpizlo@apple.com> + + Add a comment clarifying Options::showDisassembly versus Options::showDFGDisassembly. + + Rubber stamped by Mark Hahnenberg. + + * runtime/Options.cpp: + (JSC::Options::initializeOptions): + +2012-06-27 Anthony Scian <ascian@rim.com> + + Web Inspector [JSC]: Implement ScriptCallStack::stackTrace + https://bugs.webkit.org/show_bug.cgi?id=40118 + + Reviewed by Yong Li. + + Added member functions to expose function name, urlString, and line #. + Refactored toString to make use of these member functions to reduce + duplicated code for future maintenance. + + Manually tested refactoring of toString by tracing thrown exceptions. + + * interpreter/Interpreter.h: + (StackFrame): + (JSC::StackFrame::toString): + (JSC::StackFrame::friendlySourceURL): + (JSC::StackFrame::friendlyFunctionName): + (JSC::StackFrame::friendlyLineNumber): + +2012-06-27 Oswald Buddenhagen <oswald.buddenhagen@nokia.com> + + [Qt] Remove redundant c++11 warning suppression code + + This is already handled in default_post. + + Reviewed by Tor Arne Vestbø. + + * Target.pri: + +2012-06-26 Tor Arne Vestbø <tor.arne.vestbo@nokia.com> + + [Qt] Add missing heades to HEADERS + + For JavaScriptCore there aren't any Qt specific files, so we include all + headers for easy editing in Qt Creator. + + Reviewed by Simon Hausmann. + + * Target.pri: + +2012-06-26 Dominic Cooney <dominicc@chromium.org> + + [Chromium] Remove unused build scripts and empty folders for JavaScriptCore w/ gyp + https://bugs.webkit.org/show_bug.cgi?id=90029 + + Reviewed by Adam Barth. + + * gyp: Removed. + * gyp/generate-derived-sources.sh: Removed. + * gyp/generate-dtrace-header.sh: Removed. + * gyp/run-if-exists.sh: Removed. + * gyp/update-info-plist.sh: Removed. 2012-06-26 Geoffrey Garen <ggaren@apple.com> diff --git a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig index ae2c393b1..5fa30a6e9 100644 --- a/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig +++ b/Source/JavaScriptCore/Configurations/FeatureDefines.xcconfig @@ -51,11 +51,11 @@ ENABLE_DATALIST = ; ENABLE_DATA_TRANSFER_ITEMS = ; ENABLE_DETAILS = ENABLE_DETAILS; ENABLE_DEVICE_ORIENTATION = ; +ENABLE_DIALOG_ELEMENT = ; ENABLE_DIRECTORY_UPLOAD = ; ENABLE_FILE_SYSTEM = ; ENABLE_FILTERS = $(ENABLE_FILTERS_$(REAL_PLATFORM_NAME)); ENABLE_FILTERS_macosx = ENABLE_FILTERS; -ENABLE_FONT_BOOSTING = ; ENABLE_FULLSCREEN_API = ENABLE_FULLSCREEN_API; ENABLE_GAMEPAD = ; ENABLE_GEOLOCATION = ENABLE_GEOLOCATION; @@ -119,6 +119,7 @@ ENABLE_SVG = ENABLE_SVG; ENABLE_SVG_DOM_OBJC_BINDINGS = $(ENABLE_SVG_DOM_OBJC_BINDINGS_$(REAL_PLATFORM_NAME)); ENABLE_SVG_DOM_OBJC_BINDINGS_macosx = ENABLE_SVG_DOM_OBJC_BINDINGS; ENABLE_SVG_FONTS = ENABLE_SVG_FONTS; +ENABLE_TEXT_AUTOSIZING = ; ENABLE_TEXT_NOTIFICATIONS_ONLY = ENABLE_TEXT_NOTIFICATIONS_ONLY; ENABLE_TOUCH_ICON_LOADING = ; ENABLE_UNDO_MANAGER = ; @@ -132,4 +133,4 @@ ENABLE_WEB_TIMING = ; ENABLE_WORKERS = ENABLE_WORKERS; ENABLE_XSLT = ENABLE_XSLT; -FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS3_FLEXBOX) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_VARIABLES) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FONT_BOOSTING) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_TAG) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PROGRESS_TAG) $(ENABLE_QUOTA) $(ENABLE_REGISTER_PROTOCOL_HANDLER) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_UNDO_MANAGER) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XSLT); +FEATURE_DEFINES = $(ENABLE_3D_RENDERING) $(ENABLE_ACCELERATED_2D_CANVAS) $(ENABLE_ANIMATION_API) $(ENABLE_BLOB) $(ENABLE_CHANNEL_MESSAGING) $(ENABLE_CSP_NEXT) $(ENABLE_CSS3_FLEXBOX) $(ENABLE_CSS_EXCLUSIONS) $(ENABLE_CSS_FILTERS) $(ENABLE_CSS_IMAGE_RESOLUTION) $(ENABLE_CSS_REGIONS) $(ENABLE_CSS_SHADERS) $(ENABLE_CSS_VARIABLES) $(ENABLE_CUSTOM_SCHEME_HANDLER) $(ENABLE_DASHBOARD_SUPPORT) $(ENABLE_DATALIST) $(ENABLE_DATA_TRANSFER_ITEMS) $(ENABLE_DETAILS) $(ENABLE_DEVICE_ORIENTATION) $(ENABLE_DIALOG_ELEMENT) $(ENABLE_DIRECTORY_UPLOAD) $(ENABLE_FILE_SYSTEM) $(ENABLE_FILTERS) $(ENABLE_FULLSCREEN_API) $(ENABLE_GAMEPAD) $(ENABLE_GEOLOCATION) $(ENABLE_HIGH_DPI_CANVAS) $(ENABLE_ICONDATABASE) $(ENABLE_IFRAME_SEAMLESS) $(ENABLE_INDEXED_DATABASE) $(ENABLE_INPUT_TYPE_COLOR) $(ENABLE_INPUT_SPEECH) $(ENABLE_INPUT_TYPE_DATE) $(ENABLE_INPUT_TYPE_DATETIME) $(ENABLE_INPUT_TYPE_DATETIMELOCAL) $(ENABLE_INPUT_TYPE_MONTH) $(ENABLE_INPUT_TYPE_TIME) $(ENABLE_INPUT_TYPE_WEEK) $(ENABLE_JAVASCRIPT_DEBUGGER) $(ENABLE_LEGACY_CSS_VENDOR_PREFIXES) $(ENABLE_LEGACY_NOTIFICATIONS) $(ENABLE_LINK_PREFETCH) $(ENABLE_LINK_PRERENDER) $(ENABLE_MATHML) $(ENABLE_MEDIA_SOURCE) $(ENABLE_MEDIA_STATISTICS) $(ENABLE_METER_TAG) $(ENABLE_MICRODATA) $(ENABLE_MUTATION_OBSERVERS) $(ENABLE_NOTIFICATIONS) $(ENABLE_PAGE_VISIBILITY_API) $(ENABLE_PROGRESS_TAG) $(ENABLE_QUOTA) $(ENABLE_REGISTER_PROTOCOL_HANDLER) $(ENABLE_REQUEST_ANIMATION_FRAME) $(ENABLE_SCRIPTED_SPEECH) $(ENABLE_SHADOW_DOM) $(ENABLE_SHARED_WORKERS) $(ENABLE_SQL_DATABASE) $(ENABLE_STYLE_SCOPED) $(ENABLE_SVG) $(ENABLE_SVG_DOM_OBJC_BINDINGS) $(ENABLE_SVG_FONTS) $(ENABLE_TEXT_AUTOSIZING) $(ENABLE_TEXT_NOTIFICATIONS_ONLY) $(ENABLE_TOUCH_ICON_LOADING) $(ENABLE_UNDO_MANAGER) $(ENABLE_VIDEO) $(ENABLE_VIDEO_TRACK) $(ENABLE_WEBGL) $(ENABLE_WEB_AUDIO) $(ENABLE_WEB_SOCKETS) $(ENABLE_WEB_TIMING) $(ENABLE_WORKERS) $(ENABLE_XSLT); diff --git a/Source/JavaScriptCore/GNUmakefile.list.am b/Source/JavaScriptCore/GNUmakefile.list.am index 84d6e60ff..7e6056e97 100644 --- a/Source/JavaScriptCore/GNUmakefile.list.am +++ b/Source/JavaScriptCore/GNUmakefile.list.am @@ -92,6 +92,7 @@ javascriptcore_sources += \ Source/JavaScriptCore/bytecode/CodeBlock.cpp \ Source/JavaScriptCore/bytecode/CodeBlock.h \ Source/JavaScriptCore/bytecode/CodeOrigin.h \ + Source/JavaScriptCore/bytecode/Comment.h \ Source/JavaScriptCore/bytecode/DataFormat.h \ Source/JavaScriptCore/bytecode/DFGExitProfile.cpp \ Source/JavaScriptCore/bytecode/DFGExitProfile.h \ @@ -187,6 +188,9 @@ javascriptcore_sources += \ Source/JavaScriptCore/dfg/DFGInsertionSet.h \ Source/JavaScriptCore/dfg/DFGJITCompiler.cpp \ Source/JavaScriptCore/dfg/DFGJITCompiler.h \ + Source/JavaScriptCore/dfg/DFGMinifiedGraph.h \ + Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp \ + Source/JavaScriptCore/dfg/DFGMinifiedNode.h \ Source/JavaScriptCore/dfg/DFGNode.h \ Source/JavaScriptCore/dfg/DFGNodeFlags.cpp \ Source/JavaScriptCore/dfg/DFGNodeFlags.h \ @@ -221,6 +225,13 @@ javascriptcore_sources += \ Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h \ Source/JavaScriptCore/dfg/DFGThunks.cpp \ Source/JavaScriptCore/dfg/DFGThunks.h \ + Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h \ + Source/JavaScriptCore/dfg/DFGValueSource.cpp \ + Source/JavaScriptCore/dfg/DFGValueSource.h \ + Source/JavaScriptCore/dfg/DFGVariableEvent.cpp \ + Source/JavaScriptCore/dfg/DFGVariableEvent.h \ + Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp \ + Source/JavaScriptCore/dfg/DFGVariableEventStream.h \ Source/JavaScriptCore/dfg/DFGValidate.cpp \ Source/JavaScriptCore/dfg/DFGValidate.h \ Source/JavaScriptCore/dfg/DFGVariableAccessData.h \ @@ -254,6 +265,8 @@ javascriptcore_sources += \ Source/JavaScriptCore/heap/BlockAllocator.h \ Source/JavaScriptCore/heap/Heap.cpp \ Source/JavaScriptCore/heap/Heap.h \ + Source/JavaScriptCore/heap/JITStubRoutineSet.cpp \ + Source/JavaScriptCore/heap/JITStubRoutineSet.h \ Source/JavaScriptCore/heap/ListableHandler.h \ Source/JavaScriptCore/heap/Local.h \ Source/JavaScriptCore/heap/LocalScope.h \ @@ -343,6 +356,8 @@ javascriptcore_sources += \ Source/JavaScriptCore/jit/CompactJITCodeMap.h \ Source/JavaScriptCore/jit/ExecutableAllocator.cpp \ Source/JavaScriptCore/jit/ExecutableAllocator.h \ + Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp \ + Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h \ Source/JavaScriptCore/jit/HostCallReturnValue.cpp \ Source/JavaScriptCore/jit/HostCallReturnValue.h \ Source/JavaScriptCore/jit/JITArithmetic32_64.cpp \ @@ -362,6 +377,8 @@ javascriptcore_sources += \ Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp \ Source/JavaScriptCore/jit/JITPropertyAccess.cpp \ Source/JavaScriptCore/jit/JITStubCall.h \ + Source/JavaScriptCore/jit/JITStubRoutine.cpp \ + Source/JavaScriptCore/jit/JITStubRoutine.h \ Source/JavaScriptCore/jit/JITStubs.cpp \ Source/JavaScriptCore/jit/JITStubs.h \ Source/JavaScriptCore/jit/JITWriteBarrier.h \ @@ -571,6 +588,7 @@ javascriptcore_sources += \ Source/JavaScriptCore/runtime/PropertyName.h \ Source/JavaScriptCore/runtime/PropertyNameArray.cpp \ Source/JavaScriptCore/runtime/PropertyNameArray.h \ + Source/JavaScriptCore/runtime/PropertyOffset.h \ Source/JavaScriptCore/runtime/PropertySlot.cpp \ Source/JavaScriptCore/runtime/PropertySlot.h \ Source/JavaScriptCore/runtime/Protect.h \ diff --git a/Source/JavaScriptCore/JavaScriptCore.gyp/.gitignore b/Source/JavaScriptCore/JavaScriptCore.gyp/.gitignore index 9b06f58bf..f6c1348f3 100644 --- a/Source/JavaScriptCore/JavaScriptCore.gyp/.gitignore +++ b/Source/JavaScriptCore/JavaScriptCore.gyp/.gitignore @@ -2,4 +2,5 @@ *.mk *.sln *.vcproj* +*.vcxproj* JavaScriptCore.xcodeproj diff --git a/Source/JavaScriptCore/JavaScriptCore.pri b/Source/JavaScriptCore/JavaScriptCore.pri index f6580c51f..380bbaf1b 100644 --- a/Source/JavaScriptCore/JavaScriptCore.pri +++ b/Source/JavaScriptCore/JavaScriptCore.pri @@ -34,6 +34,12 @@ INCLUDEPATH += \ win32-* { LIBS += -lwinmm + + win32-g++* { + LIBS += -lpthreadGC2 + } else:win32-msvc* { + LIBS += -lpthreadVC2 + } } wince* { diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def index 6a8a89372..c50013ac1 100755 --- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def +++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.def @@ -6,11 +6,14 @@ EXPORTS ??0Collator@WTF@@QAE@PBD@Z ??0DateInstance@JSC@@IAE@PAVExecState@1@PAVStructure@1@@Z ??0DefaultGCActivityCallback@JSC@@QAE@PAVHeap@1@@Z - ??0DropAllLocks@JSLock@JSC@@QAE@W4JSLockBehavior@2@@Z + ??0DropAllLocks@JSLock@JSC@@QAE@PAVExecState@2@@Z + ??0DropAllLocks@JSLock@JSC@@QAE@PAVJSGlobalData@2@@Z ??0DynamicGlobalObjectScope@JSC@@QAE@AAVJSGlobalData@1@PAVJSGlobalObject@1@@Z ??0InternalFunction@JSC@@IAE@PAVJSGlobalObject@1@PAVStructure@1@@Z ??0JSGlobalObject@JSC@@IAE@AAVJSGlobalData@1@PAVStructure@1@PBUGlobalObjectMethodTable@1@@Z - ??0JSLock@JSC@@QAE@PAVExecState@1@@Z + ??0JSLockHolder@JSC@@QAE@AAVJSGlobalData@1@@Z + ??0JSLockHolder@JSC@@QAE@PAVExecState@1@@Z + ??0JSLockHolder@JSC@@QAE@PAVJSGlobalData@1@@Z ??0MD5@WTF@@QAE@XZ ??0Mutex@WTF@@QAE@XZ ??0ParallelEnvironment@WTF@@QAE@P6AXPAX@ZIH@Z @@ -33,6 +36,7 @@ EXPORTS ??1DropAllLocks@JSLock@JSC@@QAE@XZ ??1JSGlobalData@JSC@@QAE@XZ ??1JSGlobalObject@JSC@@QAE@XZ + ??1JSLockHolder@JSC@@QAE@XZ ??1Mutex@WTF@@QAE@XZ ??1RefCountedLeakCounter@WTF@@QAE@XZ ??1SourceProviderCache@JSC@@QAE@XZ @@ -57,9 +61,9 @@ EXPORTS ?addBytes@SHA1@WTF@@QAEXPBEI@Z ?addCurrentThread@MachineThreads@JSC@@QAEXXZ ?addFinalizer@Heap@JSC@@QAEXPAVJSCell@2@P6AX0@Z@Z - ?addPropertyTransition@Structure@JSC@@SAPAV12@AAVJSGlobalData@2@PAV12@VPropertyName@2@IPAVJSCell@2@AAI@Z - ?addPropertyTransitionToExistingStructure@Structure@JSC@@SAPAV12@PAV12@VPropertyName@2@IPAVJSCell@2@AAI@Z - ?addPropertyWithoutTransition@Structure@JSC@@QAEIAAVJSGlobalData@2@VPropertyName@2@IPAVJSCell@2@@Z + ?addPropertyTransition@Structure@JSC@@SAPAV12@AAVJSGlobalData@2@PAV12@VPropertyName@2@IPAVJSCell@2@AAH@Z + ?addPropertyTransitionToExistingStructure@Structure@JSC@@SAPAV12@PAV12@VPropertyName@2@IPAVJSCell@2@AAH@Z + ?addPropertyWithoutTransition@Structure@JSC@@QAEHAAVJSGlobalData@2@VPropertyName@2@IPAVJSCell@2@@Z ?addSlowCase@Identifier@JSC@@CA?AV?$PassRefPtr@VStringImpl@WTF@@@WTF@@PAVExecState@2@PAVStringImpl@4@@Z ?addSlowCase@Identifier@JSC@@CA?AV?$PassRefPtr@VStringImpl@WTF@@@WTF@@PAVJSGlobalData@2@PAVStringImpl@4@@Z ?addStaticGlobals@JSGlobalObject@JSC@@IAEXPAUGlobalPropertyInfo@12@H@Z @@ -81,6 +85,7 @@ EXPORTS ?callHostFunctionAsConstructor@JSC@@YI_JPAVExecState@1@@Z ?callOnMainThread@WTF@@YAXP6AXPAX@Z0@Z ?callOnMainThreadAndWait@WTF@@YAXP6AXPAX@Z0@Z + ?canShrink@StringBuilder@WTF@@QBE_NXZ ?cancelCallOnMainThread@WTF@@YAXP6AXPAX@Z0@Z ?capacity@Heap@JSC@@QAEIXZ ?changePrototypeTransition@Structure@JSC@@SAPAV12@AAVJSGlobalData@2@PAV12@VJSValue@2@@Z @@ -124,7 +129,6 @@ EXPORTS ?cryptographicallyRandomNumber@WTF@@YAIXZ ?cryptographicallyRandomValues@WTF@@YAXPAXI@Z ?currentThread@WTF@@YAIXZ - ?currentThreadIsHoldingLock@JSLock@JSC@@SA_NXZ ?currentTime@WTF@@YANXZ ?data@CString@WTF@@QBEPBDXZ ?dataLog@WTF@@YAXPBDZZ @@ -155,6 +159,7 @@ EXPORTS ?deleteAllCompiledCode@Heap@JSC@@QAEXXZ ?displayName@JSFunction@JSC@@QAE?BVUString@2@PAVExecState@2@@Z ?dtoa@WTF@@YAXQADNAA_NAAHAAI@Z + ?dumpAllOptions@Options@JSC@@SAXPAU_iobuf@@@Z ?dumpSampleData@JSGlobalData@JSC@@QAEXPAVExecState@2@@Z ?empty@StringImpl@WTF@@SAPAV12@XZ ?enumerable@PropertyDescriptor@JSC@@QBE_NXZ @@ -187,7 +192,6 @@ EXPORTS ?from@Identifier@JSC@@SA?AV12@PAVExecState@2@I@Z ?functionGetter@PropertySlot@JSC@@ABE?AVJSValue@2@PAVExecState@2@@Z ?functionName@DebuggerCallFrame@JSC@@QBEPBVUString@2@XZ - ?get@Structure@JSC@@QAEIAAVJSGlobalData@2@VPropertyName@2@AAIAAPAVJSCell@2@@Z ?getCalculatedDisplayName@JSC@@YA?AVUString@1@PAVExecState@1@PAVJSObject@1@@Z ?getCallData@JSCell@JSC@@SA?AW4CallType@2@PAV12@AATCallData@2@@Z ?getCallableObjectSlow@JSC@@YAPAVJSCell@1@PAV21@@Z @@ -206,11 +210,12 @@ EXPORTS ?getStackTrace@Interpreter@JSC@@SAXPAVJSGlobalData@2@AAV?$Vector@UStackFrame@JSC@@$0A@@WTF@@@Z ?getString@JSCell@JSC@@QBE?AVUString@2@PAVExecState@2@@Z ?getString@JSCell@JSC@@QBE_NPAVExecState@2@AAVUString@2@@Z + ?get@Structure@JSC@@QAEHAAVJSGlobalData@2@VPropertyName@2@AAIAAPAVJSCell@2@@Z ?getter@PropertyDescriptor@JSC@@QBE?AVJSValue@2@XZ ?globalExec@JSGlobalObject@JSC@@QAEPAVExecState@2@XZ ?globalObjectCount@Heap@JSC@@QAEIXZ + ?growOutOfLineStorage@JSObject@JSC@@QAEPAV?$WriteBarrierBase@W4Unknown@JSC@@@2@AAVJSGlobalData@2@II@Z ?grow@HandleSet@JSC@@AAEXXZ - ?growPropertyStorage@JSObject@JSC@@QAEPAV?$WriteBarrierBase@W4Unknown@JSC@@@2@AAVJSGlobalData@2@II@Z ?hasInstance@JSObject@JSC@@SA_NPAV12@PAVExecState@2@VJSValue@2@2@Z ?hasProperty@JSObject@JSC@@QBE_NPAVExecState@2@I@Z ?hasProperty@JSObject@JSC@@QBE_NPAVExecState@2@VPropertyName@2@@Z @@ -236,10 +241,9 @@ EXPORTS ?jsOwnedString@JSC@@YAPAVJSString@1@PAVJSGlobalData@1@ABVUString@1@@Z ?jsString@JSC@@YAPAVJSString@1@PAVJSGlobalData@1@ABVUString@1@@Z ?length@CString@WTF@@QBEIXZ - ?lock@JSLock@JSC@@SAXW4JSLockBehavior@2@@Z + ?lock@JSLock@JSC@@QAEXXZ ?lock@Mutex@WTF@@QAEXXZ ?lockAtomicallyInitializedStaticMutex@WTF@@YAXXZ - ?lockCount@JSLock@JSC@@SAHXZ ?match@RegExp@JSC@@QAEHAAVJSGlobalData@2@ABVUString@2@IAAV?$Vector@H$0CA@@WTF@@@Z ?materializePropertyMap@Structure@JSC@@AAEXAAVJSGlobalData@2@@Z ?monotonicallyIncreasingTime@WTF@@YANXZ @@ -302,6 +306,7 @@ EXPORTS ?setGetter@PropertyDescriptor@JSC@@QAEXVJSValue@2@@Z ?setLoc@StatementNode@JSC@@QAEXHH@Z ?setMainThreadCallbacksPaused@WTF@@YAX_N@Z + ?setOption@Options@JSC@@SA_NPBD@Z ?setOrderLowerFirst@Collator@WTF@@QAEX_N@Z ?setPrototype@JSObject@JSC@@QAEXAAVJSGlobalData@2@VJSValue@2@@Z ?setSetter@PropertyDescriptor@JSC@@QAEXVJSValue@2@@Z @@ -323,7 +328,8 @@ EXPORTS ?stopProfiling@Profiler@JSC@@QAE?AV?$PassRefPtr@VProfile@JSC@@@WTF@@PAVExecState@2@ABVUString@2@@Z ?stopSampling@JSGlobalData@JSC@@QAEXXZ ?substringSharingImpl@UString@JSC@@QBE?AV12@II@Z - ?suggestedNewPropertyStorageSize@Structure@JSC@@QAEIXZ + ?suggestedNewOutOfLineStorageCapacity@Structure@JSC@@QAEIXZ + ?sweeper@Heap@JSC@@QAEPAVIncrementalSweeper@2@XZ ?synthesizePrototype@JSValue@JSC@@QBEPAVJSObject@2@PAVExecState@2@@Z ?thisObject@DebuggerCallFrame@JSC@@QBEPAVJSObject@2@XZ ?throwError@JSC@@YA?AVJSValue@1@PAVExecState@1@V21@@Z @@ -353,7 +359,7 @@ EXPORTS ?tryFinishCreationUninitialized@JSArray@JSC@@IAEPAV12@AAVJSGlobalData@2@I@Z ?tryLock@Mutex@WTF@@QAE_NXZ ?type@DebuggerCallFrame@JSC@@QBE?AW4Type@12@XZ - ?unlock@JSLock@JSC@@SAXW4JSLockBehavior@2@@Z + ?unlock@JSLock@JSC@@QAEXXZ ?unlock@Mutex@WTF@@QAEXXZ ?unlockAtomicallyInitializedStaticMutex@WTF@@YAXXZ ?unprotect@Heap@JSC@@QAE_NVJSValue@2@@Z diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj index 78ca7dd6e..92749eb75 100644 --- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj +++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCore.vcproj @@ -1094,6 +1094,10 @@ > </File> <File + RelativePath="..\..\runtime\PropertyOffset.h" + > + </File> + <File RelativePath="..\..\runtime\PropertySlot.cpp" > </File> @@ -1562,6 +1566,10 @@ > </File> <File + RelativePath="..\..\bytecode\Comment.h" + > + </File> + <File RelativePath="..\..\bytecode\EvalCodeCache.h" > </File> @@ -1850,6 +1858,14 @@ > </File> <File + RelativePath="..\..\jit\GCAwareJITStubRoutine.cpp" + > + </File> + <File + RelativePath="..\..\jit\GCAwareJITStubRoutine.h" + > + </File> + <File RelativePath="..\..\jit\JIT.cpp" > </File> @@ -1910,6 +1926,14 @@ > </File> <File + RelativePath="..\..\jit\JITStubRoutine.cpp" + > + </File> + <File + RelativePath="..\..\jit\JITStubRoutine.h" + > + </File> + <File RelativePath="..\..\jit\JITStubs.cpp" > </File> @@ -2206,6 +2230,14 @@ > </File> <File + RelativePath="..\..\heap\JITStubRoutineSet.cpp" + > + </File> + <File + RelativePath="..\..\heap\JITStubRoutineSet.h" + > + </File> + <File RelativePath="..\..\heap\IncrementalSweeper.h" > </File> diff --git a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops index df0724e7c..7d2461d11 100644 --- a/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops +++ b/Source/JavaScriptCore/JavaScriptCore.vcproj/JavaScriptCore/JavaScriptCoreCommon.vsprops @@ -7,7 +7,7 @@ <Tool Name="VCCLCompilerTool" AdditionalIncludeDirectories=""$(ConfigurationBuildDir)\obj\JavaScriptCore\DerivedSources\";../../;../../API/;../../parser/;../../bytecompiler/;../../dfg/;../../disassembler;../../jit/;../../llint/;../../runtime/;../../tools/;../../bytecode/;../../interpreter/;../../wtf/;../../profiler;../../assembler/;../../debugger/;../../heap/;"$(WebKitLibrariesDir)\include";"$(WebKitLibrariesDir)\include\private";"$(ConfigurationBuildDir)\include";"$(ConfigurationBuildDir)\include\JavaScriptCore";"$(ConfigurationBuildDir)\include\private";"$(ConfigurationBuildDir)\include\private\JavaScriptCore";"$(WebKitLibrariesDir)\include\pthreads"" - PreprocessorDefinitions="__STD_C" + PreprocessorDefinitions="STATICALLY_LINKED_WITH_WTF;__STD_C" ForcedIncludeFiles="ICUVersion.h" /> <Tool diff --git a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj index a64a1065f..188c1ffe7 100644 --- a/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj +++ b/Source/JavaScriptCore/JavaScriptCore.xcodeproj/project.pbxproj @@ -92,6 +92,16 @@ 0F2BDC16151C5D4F00CD8910 /* DFGFixupPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC13151C5D4A00CD8910 /* DFGFixupPhase.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F2BDC21151E803B00CD8910 /* DFGInsertionSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC1F151E803800CD8910 /* DFGInsertionSet.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F2BDC2C151FDE9100CD8910 /* Operands.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC2B151FDE8B00CD8910 /* Operands.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F2BDC451522801B00CD8910 /* DFGMinifiedGraph.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC3D1522801700CD8910 /* DFGMinifiedGraph.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F2BDC461522802000CD8910 /* DFGMinifiedNode.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC3E1522801700CD8910 /* DFGMinifiedNode.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F2BDC471522802500CD8910 /* DFGValueRecoveryOverride.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC3F1522801700CD8910 /* DFGValueRecoveryOverride.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F2BDC481522802900CD8910 /* DFGValueSource.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC401522801700CD8910 /* DFGValueSource.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F2BDC491522809600CD8910 /* DFGVariableEvent.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC411522801700CD8910 /* DFGVariableEvent.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F2BDC4A1522809A00CD8910 /* DFGVariableEventStream.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F2BDC421522801700CD8910 /* DFGVariableEventStream.cpp */; }; + 0F2BDC4B1522809D00CD8910 /* DFGVariableEventStream.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2BDC431522801700CD8910 /* DFGVariableEventStream.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F2BDC4D1522818600CD8910 /* DFGMinifiedNode.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F2BDC4C1522818300CD8910 /* DFGMinifiedNode.cpp */; }; + 0F2BDC4F15228BF300CD8910 /* DFGValueSource.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F2BDC4E15228BE700CD8910 /* DFGValueSource.cpp */; }; + 0F2BDC5115228FFD00CD8910 /* DFGVariableEvent.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F2BDC5015228FFA00CD8910 /* DFGVariableEvent.cpp */; }; 0F2C556F14738F3100121E4F /* DFGCodeBlocks.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F2C556E14738F2E00121E4F /* DFGCodeBlocks.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F2C557014738F3500121E4F /* DFGCodeBlocks.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F2C556D14738F2E00121E4F /* DFGCodeBlocks.cpp */; }; 0F3B3A1A153E68F2003ED0FF /* DFGConstantFoldingPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F3B3A17153E68EF003ED0FF /* DFGConstantFoldingPhase.cpp */; }; @@ -130,6 +140,12 @@ 0F620179143FCD480068B77C /* DFGAbstractState.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F62016D143FCD2F0068B77C /* DFGAbstractState.cpp */; }; 0F66E16B14DF3F1600B7B2E4 /* DFGAdjacencyList.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F66E16814DF3F1300B7B2E4 /* DFGAdjacencyList.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F66E16C14DF3F1600B7B2E4 /* DFGEdge.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F66E16914DF3F1300B7B2E4 /* DFGEdge.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F766D2815A8CC1E008F363E /* JITStubRoutine.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F766D2615A8CC1B008F363E /* JITStubRoutine.cpp */; }; + 0F766D2B15A8CC38008F363E /* JITStubRoutineSet.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F766D2915A8CC34008F363E /* JITStubRoutineSet.cpp */; }; + 0F766D2C15A8CC3A008F363E /* JITStubRoutineSet.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D2A15A8CC34008F363E /* JITStubRoutineSet.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F766D2F15A8DCE0008F363E /* GCAwareJITStubRoutine.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F766D2D15A8DCDD008F363E /* GCAwareJITStubRoutine.cpp */; }; + 0F766D3015A8DCE2008F363E /* GCAwareJITStubRoutine.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D2E15A8DCDD008F363E /* GCAwareJITStubRoutine.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0F766D3115AA8112008F363E /* JITStubRoutine.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F766D1C15A5028D008F363E /* JITStubRoutine.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F7700921402FF3C0078EB39 /* SamplingCounter.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F7700911402FF280078EB39 /* SamplingCounter.cpp */; }; 0F7B294A14C3CD29007C3DB1 /* DFGCCallHelpers.h in Headers */ = {isa = PBXBuildFile; fileRef = 0F7B294814C3CD23007C3DB1 /* DFGCCallHelpers.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0F7B294B14C3CD2F007C3DB1 /* DFGCapabilities.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FD82E1F14172C2F00179C94 /* DFGCapabilities.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -213,6 +229,7 @@ 0FF427651591A1CE004CB9FF /* DFGDisassembler.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FF427621591A1C9004CB9FF /* DFGDisassembler.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0FF42771159275D5004CB9FF /* ResolveGlobalStatus.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FF4276E159275D2004CB9FF /* ResolveGlobalStatus.cpp */; }; 0FF42772159275D8004CB9FF /* ResolveGlobalStatus.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FF4276F159275D2004CB9FF /* ResolveGlobalStatus.h */; settings = {ATTRIBUTES = (Private, ); }; }; + 0FF7168C15A3B235008F5DAA /* PropertyOffset.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FF7168A15A3B231008F5DAA /* PropertyOffset.h */; settings = {ATTRIBUTES = (Private, ); }; }; 0FF922D414F46B410041A24E /* LLIntOffsetsExtractor.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0F4680A114BA7F8200BFE272 /* LLIntOffsetsExtractor.cpp */; }; 0FFFC95714EF90A000C72532 /* DFGCFAPhase.cpp in Sources */ = {isa = PBXBuildFile; fileRef = 0FFFC94B14EF909500C72532 /* DFGCFAPhase.cpp */; }; 0FFFC95814EF90A200C72532 /* DFGCFAPhase.h in Headers */ = {isa = PBXBuildFile; fileRef = 0FFFC94C14EF909500C72532 /* DFGCFAPhase.h */; settings = {ATTRIBUTES = (Private, ); }; }; @@ -814,6 +831,16 @@ 0F2BDC13151C5D4A00CD8910 /* DFGFixupPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGFixupPhase.h; path = dfg/DFGFixupPhase.h; sourceTree = "<group>"; }; 0F2BDC1F151E803800CD8910 /* DFGInsertionSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGInsertionSet.h; path = dfg/DFGInsertionSet.h; sourceTree = "<group>"; }; 0F2BDC2B151FDE8B00CD8910 /* Operands.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Operands.h; sourceTree = "<group>"; }; + 0F2BDC3D1522801700CD8910 /* DFGMinifiedGraph.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGMinifiedGraph.h; path = dfg/DFGMinifiedGraph.h; sourceTree = "<group>"; }; + 0F2BDC3E1522801700CD8910 /* DFGMinifiedNode.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGMinifiedNode.h; path = dfg/DFGMinifiedNode.h; sourceTree = "<group>"; }; + 0F2BDC3F1522801700CD8910 /* DFGValueRecoveryOverride.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGValueRecoveryOverride.h; path = dfg/DFGValueRecoveryOverride.h; sourceTree = "<group>"; }; + 0F2BDC401522801700CD8910 /* DFGValueSource.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGValueSource.h; path = dfg/DFGValueSource.h; sourceTree = "<group>"; }; + 0F2BDC411522801700CD8910 /* DFGVariableEvent.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGVariableEvent.h; path = dfg/DFGVariableEvent.h; sourceTree = "<group>"; }; + 0F2BDC421522801700CD8910 /* DFGVariableEventStream.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGVariableEventStream.cpp; path = dfg/DFGVariableEventStream.cpp; sourceTree = "<group>"; }; + 0F2BDC431522801700CD8910 /* DFGVariableEventStream.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGVariableEventStream.h; path = dfg/DFGVariableEventStream.h; sourceTree = "<group>"; }; + 0F2BDC4C1522818300CD8910 /* DFGMinifiedNode.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGMinifiedNode.cpp; path = dfg/DFGMinifiedNode.cpp; sourceTree = "<group>"; }; + 0F2BDC4E15228BE700CD8910 /* DFGValueSource.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGValueSource.cpp; path = dfg/DFGValueSource.cpp; sourceTree = "<group>"; }; + 0F2BDC5015228FFA00CD8910 /* DFGVariableEvent.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGVariableEvent.cpp; path = dfg/DFGVariableEvent.cpp; sourceTree = "<group>"; }; 0F2C556D14738F2E00121E4F /* DFGCodeBlocks.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = DFGCodeBlocks.cpp; sourceTree = "<group>"; }; 0F2C556E14738F2E00121E4F /* DFGCodeBlocks.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = DFGCodeBlocks.h; sourceTree = "<group>"; }; 0F3B3A17153E68EF003ED0FF /* DFGConstantFoldingPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGConstantFoldingPhase.cpp; path = dfg/DFGConstantFoldingPhase.cpp; sourceTree = "<group>"; }; @@ -854,6 +881,12 @@ 0F620172143FCD2F0068B77C /* DFGVariableAccessData.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGVariableAccessData.h; path = dfg/DFGVariableAccessData.h; sourceTree = "<group>"; }; 0F66E16814DF3F1300B7B2E4 /* DFGAdjacencyList.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGAdjacencyList.h; path = dfg/DFGAdjacencyList.h; sourceTree = "<group>"; }; 0F66E16914DF3F1300B7B2E4 /* DFGEdge.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGEdge.h; path = dfg/DFGEdge.h; sourceTree = "<group>"; }; + 0F766D1C15A5028D008F363E /* JITStubRoutine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITStubRoutine.h; sourceTree = "<group>"; }; + 0F766D2615A8CC1B008F363E /* JITStubRoutine.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITStubRoutine.cpp; sourceTree = "<group>"; }; + 0F766D2915A8CC34008F363E /* JITStubRoutineSet.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JITStubRoutineSet.cpp; sourceTree = "<group>"; }; + 0F766D2A15A8CC34008F363E /* JITStubRoutineSet.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = JITStubRoutineSet.h; sourceTree = "<group>"; }; + 0F766D2D15A8DCDD008F363E /* GCAwareJITStubRoutine.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = GCAwareJITStubRoutine.cpp; sourceTree = "<group>"; }; + 0F766D2E15A8DCDD008F363E /* GCAwareJITStubRoutine.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = GCAwareJITStubRoutine.h; sourceTree = "<group>"; }; 0F77008E1402FDD60078EB39 /* SamplingCounter.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = SamplingCounter.h; sourceTree = "<group>"; }; 0F7700911402FF280078EB39 /* SamplingCounter.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = SamplingCounter.cpp; sourceTree = "<group>"; }; 0F7B294814C3CD23007C3DB1 /* DFGCCallHelpers.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCCallHelpers.h; path = dfg/DFGCCallHelpers.h; sourceTree = "<group>"; }; @@ -937,6 +970,7 @@ 0FF427621591A1C9004CB9FF /* DFGDisassembler.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGDisassembler.h; path = dfg/DFGDisassembler.h; sourceTree = "<group>"; }; 0FF4276E159275D2004CB9FF /* ResolveGlobalStatus.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; path = ResolveGlobalStatus.cpp; sourceTree = "<group>"; }; 0FF4276F159275D2004CB9FF /* ResolveGlobalStatus.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = ResolveGlobalStatus.h; sourceTree = "<group>"; }; + 0FF7168A15A3B231008F5DAA /* PropertyOffset.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = PropertyOffset.h; sourceTree = "<group>"; }; 0FF922CF14F46B130041A24E /* JSCLLIntOffsetsExtractor */ = {isa = PBXFileReference; explicitFileType = "compiled.mach-o.executable"; includeInIndex = 0; path = JSCLLIntOffsetsExtractor; sourceTree = BUILT_PRODUCTS_DIR; }; 0FFFC94B14EF909500C72532 /* DFGCFAPhase.cpp */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.cpp.cpp; name = DFGCFAPhase.cpp; path = dfg/DFGCFAPhase.cpp; sourceTree = "<group>"; }; 0FFFC94C14EF909500C72532 /* DFGCFAPhase.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; name = DFGCFAPhase.h; path = dfg/DFGCFAPhase.h; sourceTree = "<group>"; }; @@ -1446,6 +1480,7 @@ F692A8850255597D01FF60F7 /* UString.cpp */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.cpp; path = UString.cpp; sourceTree = "<group>"; tabWidth = 8; }; F692A8860255597D01FF60F7 /* UString.h */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.c.h; path = UString.h; sourceTree = "<group>"; tabWidth = 8; }; F692A8870255597D01FF60F7 /* JSValue.cpp */ = {isa = PBXFileReference; fileEncoding = 30; indentWidth = 4; lastKnownFileType = sourcecode.cpp.cpp; path = JSValue.cpp; sourceTree = "<group>"; tabWidth = 8; }; + FEB63AA2159B9DA3008932A6 /* Comment.h */ = {isa = PBXFileReference; fileEncoding = 4; lastKnownFileType = sourcecode.c.h; path = Comment.h; sourceTree = "<group>"; }; /* End PBXFileReference section */ /* Begin PBXFrameworksBuildPhase section */ @@ -1669,6 +1704,8 @@ A7B48DB60EE74CFC00DCBDB6 /* ExecutableAllocator.cpp */, A7B48DB50EE74CFC00DCBDB6 /* ExecutableAllocator.h */, 86DB64630F95C6FC00D7D921 /* ExecutableAllocatorFixedVMPool.cpp */, + 0F766D2D15A8DCDD008F363E /* GCAwareJITStubRoutine.cpp */, + 0F766D2E15A8DCDD008F363E /* GCAwareJITStubRoutine.h */, 0F4680D014BBC5F800BFE272 /* HostCallReturnValue.cpp */, 0F4680D114BBC5F800BFE272 /* HostCallReturnValue.h */, 1429D92D0ED22D7000B89619 /* JIT.cpp */, @@ -1688,6 +1725,8 @@ 86CC85C30EE7A89400288682 /* JITPropertyAccess.cpp */, A7C1E8C8112E701C00A37F98 /* JITPropertyAccess32_64.cpp */, 960626950FB8EC02009798AB /* JITStubCall.h */, + 0F766D2615A8CC1B008F363E /* JITStubRoutine.cpp */, + 0F766D1C15A5028D008F363E /* JITStubRoutine.h */, 14A23D6C0F4E19CE0023CDAD /* JITStubs.cpp */, 14A6581A0F4E36F4000150FD /* JITStubs.h */, A76F54A213B28AAB00EF2BCE /* JITWriteBarrier.h */, @@ -1729,6 +1768,8 @@ 14BA7A9613AADFF8005B7C2C /* Heap.h */, C2C8D02F14A3CEFC00578E65 /* HeapBlock.h */, 14F97446138C853E00DA1C67 /* HeapRootVisitor.h */, + 0F766D2915A8CC34008F363E /* JITStubRoutineSet.cpp */, + 0F766D2A15A8CC34008F363E /* JITStubRoutineSet.h */, 0F431736146BAC65007E3890 /* ListableHandler.h */, 142E3130134FF0A600AFADB5 /* Local.h */, 142E3131134FF0A600AFADB5 /* LocalScope.h */, @@ -2086,6 +2127,7 @@ 86158AB2155C8B3F00B45C9C /* PropertyName.h */, 65400C0F0A69BAF200509887 /* PropertyNameArray.cpp */, 65400C100A69BAF200509887 /* PropertyNameArray.h */, + 0FF7168A15A3B231008F5DAA /* PropertyOffset.h */, 65621E6B089E859700760F35 /* PropertySlot.cpp */, 65621E6C089E859700760F35 /* PropertySlot.h */, 65C02FBB0637462A003E7EE6 /* Protect.h */, @@ -2226,6 +2268,9 @@ 0F2BDC1F151E803800CD8910 /* DFGInsertionSet.h */, 86EC9DBB1328DF82002B2AD7 /* DFGJITCompiler.cpp */, 86EC9DBC1328DF82002B2AD7 /* DFGJITCompiler.h */, + 0F2BDC3D1522801700CD8910 /* DFGMinifiedGraph.h */, + 0F2BDC4C1522818300CD8910 /* DFGMinifiedNode.cpp */, + 0F2BDC3E1522801700CD8910 /* DFGMinifiedNode.h */, 86ECA3E9132DEF1C002B2AD7 /* DFGNode.h */, 0FA581B7150E952A00B9A2D9 /* DFGNodeFlags.cpp */, 0FA581B8150E952A00B9A2D9 /* DFGNodeFlags.h */, @@ -2260,9 +2305,16 @@ 86880F4C14353B2100B08D42 /* DFGSpeculativeJIT64.cpp */, 0FC0979F146B28C700CF2442 /* DFGThunks.cpp */, 0FC097A0146B28C700CF2442 /* DFGThunks.h */, + 0F2BDC3F1522801700CD8910 /* DFGValueRecoveryOverride.h */, + 0F2BDC4E15228BE700CD8910 /* DFGValueSource.cpp */, + 0F2BDC401522801700CD8910 /* DFGValueSource.h */, 0F3B3A2915474FF4003ED0FF /* DFGValidate.cpp */, 0F3B3A2A15474FF4003ED0FF /* DFGValidate.h */, 0F620172143FCD2F0068B77C /* DFGVariableAccessData.h */, + 0F2BDC5015228FFA00CD8910 /* DFGVariableEvent.cpp */, + 0F2BDC411522801700CD8910 /* DFGVariableEvent.h */, + 0F2BDC421522801700CD8910 /* DFGVariableEventStream.cpp */, + 0F2BDC431522801700CD8910 /* DFGVariableEventStream.h */, 0FFFC95314EF909500C72532 /* DFGVirtualRegisterAllocationPhase.cpp */, 0FFFC95414EF909500C72532 /* DFGVirtualRegisterAllocationPhase.h */, ); @@ -2360,6 +2412,7 @@ 0F0B83B314BCF85E00885B4F /* MethodCallLinkInfo.h */, 0F0B83AE14BCF71400885B4F /* CallLinkInfo.cpp */, 0F0B83AF14BCF71400885B4F /* CallLinkInfo.h */, + FEB63AA2159B9DA3008932A6 /* Comment.h */, 0F0B83AC14BCF60200885B4F /* LineInfo.h */, 0F0B83AA14BCF5B900885B4F /* ExpressionRangeInfo.h */, 0F0B83A814BCF55E00885B4F /* HandlerInfo.h */, @@ -2745,6 +2798,12 @@ 0F2BDC16151C5D4F00CD8910 /* DFGFixupPhase.h in Headers */, 0F2BDC21151E803B00CD8910 /* DFGInsertionSet.h in Headers */, 0F2BDC2C151FDE9100CD8910 /* Operands.h in Headers */, + 0F2BDC451522801B00CD8910 /* DFGMinifiedGraph.h in Headers */, + 0F2BDC461522802000CD8910 /* DFGMinifiedNode.h in Headers */, + 0F2BDC471522802500CD8910 /* DFGValueRecoveryOverride.h in Headers */, + 0F2BDC481522802900CD8910 /* DFGValueSource.h in Headers */, + 0F2BDC491522809600CD8910 /* DFGVariableEvent.h in Headers */, + 0F2BDC4B1522809D00CD8910 /* DFGVariableEventStream.h in Headers */, 8612E4CD152389EC00C836BE /* MatchResult.h in Headers */, 0F1E3A461534CBAF000F9456 /* DFGArgumentPosition.h in Headers */, 0F3B3A1B153E68F4003ED0FF /* DFGConstantFoldingPhase.h in Headers */, @@ -2773,6 +2832,10 @@ 0FF4274B158EBE91004CB9FF /* udis86.h in Headers */, 0FF427651591A1CE004CB9FF /* DFGDisassembler.h in Headers */, 0FF42772159275D8004CB9FF /* ResolveGlobalStatus.h in Headers */, + 0FF7168C15A3B235008F5DAA /* PropertyOffset.h in Headers */, + 0F766D2C15A8CC3A008F363E /* JITStubRoutineSet.h in Headers */, + 0F766D3015A8DCE2008F363E /* GCAwareJITStubRoutine.h in Headers */, + 0F766D3115AA8112008F363E /* JITStubRoutine.h in Headers */, ); runOnlyForDeploymentPostprocessing = 0; }; @@ -3334,6 +3397,10 @@ 8642C510151C06A90046D4EF /* RegExpCachedResult.cpp in Sources */, 8642C512151C083D0046D4EF /* RegExpMatchesArray.cpp in Sources */, 863C6D9C1521111A00585E4E /* YarrCanonicalizeUCS2.cpp in Sources */, + 0F2BDC4A1522809A00CD8910 /* DFGVariableEventStream.cpp in Sources */, + 0F2BDC4D1522818600CD8910 /* DFGMinifiedNode.cpp in Sources */, + 0F2BDC4F15228BF300CD8910 /* DFGValueSource.cpp in Sources */, + 0F2BDC5115228FFD00CD8910 /* DFGVariableEvent.cpp in Sources */, 14816E1B154CC56C00B8054C /* BlockAllocator.cpp in Sources */, 86EBF2FF1560F06A008E9222 /* NameConstructor.cpp in Sources */, 86EBF3011560F06A008E9222 /* NameInstance.cpp in Sources */, @@ -3361,6 +3428,9 @@ C2D58C3415912FEE0021A844 /* GCActivityCallback.cpp in Sources */, 0FF427641591A1CC004CB9FF /* DFGDisassembler.cpp in Sources */, 0FF42771159275D5004CB9FF /* ResolveGlobalStatus.cpp in Sources */, + 0F766D2815A8CC1E008F363E /* JITStubRoutine.cpp in Sources */, + 0F766D2B15A8CC38008F363E /* JITStubRoutineSet.cpp in Sources */, + 0F766D2F15A8DCE0008F363E /* GCAwareJITStubRoutine.cpp in Sources */, ); runOnlyForDeploymentPostprocessing = 0; }; diff --git a/Source/JavaScriptCore/Target.pri b/Source/JavaScriptCore/Target.pri index 2f479222a..b019e417e 100644 --- a/Source/JavaScriptCore/Target.pri +++ b/Source/JavaScriptCore/Target.pri @@ -83,6 +83,7 @@ SOURCES += \ heap/Heap.cpp \ heap/HeapTimer.cpp \ heap/IncrementalSweeper.cpp \ + heap/JITStubRoutineSet.cpp \ heap/MachineStackMarker.cpp \ heap/MarkStack.cpp \ heap/MarkedAllocator.cpp \ @@ -109,6 +110,7 @@ SOURCES += \ dfg/DFGFixupPhase.cpp \ dfg/DFGGraph.cpp \ dfg/DFGJITCompiler.cpp \ + dfg/DFGMinifiedNode.cpp \ dfg/DFGNodeFlags.cpp \ dfg/DFGOperations.cpp \ dfg/DFGOSREntry.cpp \ @@ -124,6 +126,9 @@ SOURCES += \ dfg/DFGSpeculativeJIT32_64.cpp \ dfg/DFGSpeculativeJIT64.cpp \ dfg/DFGThunks.cpp \ + dfg/DFGValueSource.cpp \ + dfg/DFGVariableEvent.cpp \ + dfg/DFGVariableEventStream.cpp \ dfg/DFGValidate.cpp \ dfg/DFGVirtualRegisterAllocationPhase.cpp \ interpreter/AbstractPC.cpp \ @@ -133,6 +138,7 @@ SOURCES += \ jit/ExecutableAllocatorFixedVMPool.cpp \ jit/ExecutableAllocator.cpp \ jit/HostCallReturnValue.cpp \ + jit/GCAwareJITStubRoutine.cpp \ jit/JITArithmetic.cpp \ jit/JITArithmetic32_64.cpp \ jit/JITCall.cpp \ @@ -143,6 +149,7 @@ SOURCES += \ jit/JITOpcodes32_64.cpp \ jit/JITPropertyAccess.cpp \ jit/JITPropertyAccess32_64.cpp \ + jit/JITStubRoutine.cpp \ jit/JITStubs.cpp \ jit/ThunkGenerators.cpp \ parser/Lexer.cpp \ @@ -248,6 +255,8 @@ SOURCES += \ tools/CodeProfiling.cpp \ yarr/YarrJIT.cpp \ +HEADERS += $$files(*.h, true) + *sh4* { QMAKE_CXXFLAGS += -mieee -w QMAKE_CFLAGS += -mieee -w @@ -259,15 +268,4 @@ lessThan(QT_GCC_MAJOR_VERSION, 5) { # Disable C++0x mode in JSC for those who enabled it in their Qt's mkspec. *-g++*:QMAKE_CXXFLAGS -= -std=c++0x -std=gnu++0x } - - # GCC 4.6 and after. - greaterThan(QT_GCC_MINOR_VERSION, 5) { - if (!contains(QMAKE_CXXFLAGS, -std=c++0x) && !contains(QMAKE_CXXFLAGS, -std=gnu++0x)) { - # We need to deactivate those warnings because some names conflicts with upcoming c++0x types (e.g.nullptr). - QMAKE_CFLAGS_WARN_ON += -Wno-c++0x-compat - QMAKE_CXXFLAGS_WARN_ON += -Wno-c++0x-compat - QMAKE_CFLAGS += -Wno-c++0x-compat - QMAKE_CXXFLAGS += -Wno-c++0x-compat - } - } } diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.cpp b/Source/JavaScriptCore/assembler/ARMAssembler.cpp index 74809cadb..362fcc630 100644 --- a/Source/JavaScriptCore/assembler/ARMAssembler.cpp +++ b/Source/JavaScriptCore/assembler/ARMAssembler.cpp @@ -262,86 +262,117 @@ ARMWord ARMAssembler::encodeComplexImm(ARMWord imm, int dest) // Memory load/store helpers -void ARMAssembler::dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes) +void ARMAssembler::dataTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, int32_t offset) { - ARMWord transferFlag = bytes ? DT_BYTE : 0; if (offset >= 0) { if (offset <= 0xfff) - dtr_u(isLoad, srcDst, base, offset | transferFlag); + dtr_u(transferType, srcDst, base, offset); else if (offset <= 0xfffff) { add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 12) | (10 << 8)); - dtr_u(isLoad, srcDst, ARMRegisters::S0, (offset & 0xfff) | transferFlag); + dtr_u(transferType, srcDst, ARMRegisters::S0, (offset & 0xfff)); } else { moveImm(offset, ARMRegisters::S0); - dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag); + dtr_ur(transferType, srcDst, base, ARMRegisters::S0); } } else { if (offset >= -0xfff) - dtr_d(isLoad, srcDst, base, -offset | transferFlag); + dtr_d(transferType, srcDst, base, -offset); else if (offset >= -0xfffff) { sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 12) | (10 << 8)); - dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag); + dtr_d(transferType, srcDst, ARMRegisters::S0, (-offset & 0xfff)); } else { moveImm(offset, ARMRegisters::S0); - dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag); + dtr_ur(transferType, srcDst, base, ARMRegisters::S0); } } } -void ARMAssembler::baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset, bool bytes) +void ARMAssembler::baseIndexTransfer32(DataTransferTypeA transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) { - ARMWord op2; - ARMWord transferFlag = bytes ? DT_BYTE : 0; - ASSERT(scale >= 0 && scale <= 3); - op2 = lsl(index, scale); + ARMWord op2 = lsl(index, scale); - if (offset >= 0 && offset <= 0xfff) { - add_r(ARMRegisters::S0, base, op2); - dtr_u(isLoad, srcDst, ARMRegisters::S0, offset | transferFlag); + if (!offset) { + dtr_ur(transferType, srcDst, base, op2); return; } - if (offset <= 0 && offset >= -0xfff) { - add_r(ARMRegisters::S0, base, op2); - dtr_d(isLoad, srcDst, ARMRegisters::S0, (-offset & 0xfff) | transferFlag); + + add_r(ARMRegisters::S1, base, op2); + dataTransfer32(transferType, srcDst, ARMRegisters::S1, offset); +} + +void ARMAssembler::dataTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, int32_t offset) +{ + if (offset >= 0) { + if (offset <= 0xff) + dtrh_u(transferType, srcDst, base, getOp2Half(offset)); + else if (offset <= 0xffff) { + add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 8) | (12 << 8)); + dtrh_u(transferType, srcDst, ARMRegisters::S0, getOp2Half(offset & 0xff)); + } else { + moveImm(offset, ARMRegisters::S0); + dtrh_ur(transferType, srcDst, base, ARMRegisters::S0); + } + } else { + if (offset >= -0xff) + dtrh_d(transferType, srcDst, base, getOp2Half(-offset)); + else if (offset >= -0xffff) { + sub_r(ARMRegisters::S0, base, OP2_IMM | (-offset >> 8) | (12 << 8)); + dtrh_d(transferType, srcDst, ARMRegisters::S0, getOp2Half(-offset & 0xff)); + } else { + moveImm(offset, ARMRegisters::S0); + dtrh_ur(transferType, srcDst, base, ARMRegisters::S0); + } + } +} + +void ARMAssembler::baseIndexTransfer16(DataTransferTypeB transferType, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) +{ + if (!scale && !offset) { + dtrh_ur(transferType, srcDst, base, index); return; } - ldr_un_imm(ARMRegisters::S0, offset); - add_r(ARMRegisters::S0, ARMRegisters::S0, op2); - dtr_ur(isLoad, srcDst, base, ARMRegisters::S0 | transferFlag); + add_r(ARMRegisters::S1, base, lsl(index, scale)); + dataTransfer16(transferType, srcDst, ARMRegisters::S1, offset); } -void ARMAssembler::doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset) +void ARMAssembler::dataTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, int32_t offset) { // VFP cannot directly access memory that is not four-byte-aligned if (!(offset & 0x3)) { if (offset <= 0x3ff && offset >= 0) { - fdtr_u(isLoad, srcDst, base, offset >> 2); + fdtr_u(transferType, srcDst, base, offset >> 2); return; } if (offset <= 0x3ffff && offset >= 0) { add_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); - fdtr_u(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); + fdtr_u(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); return; } offset = -offset; if (offset <= 0x3ff && offset >= 0) { - fdtr_d(isLoad, srcDst, base, offset >> 2); + fdtr_d(transferType, srcDst, base, offset >> 2); return; } if (offset <= 0x3ffff && offset >= 0) { sub_r(ARMRegisters::S0, base, OP2_IMM | (offset >> 10) | (11 << 8)); - fdtr_d(isLoad, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); + fdtr_d(transferType, srcDst, ARMRegisters::S0, (offset >> 2) & 0xff); return; } offset = -offset; } - ldr_un_imm(ARMRegisters::S0, offset); + moveImm(offset, ARMRegisters::S0); add_r(ARMRegisters::S0, ARMRegisters::S0, base); - fdtr_u(isLoad, srcDst, ARMRegisters::S0, 0); + fdtr_u(transferType, srcDst, ARMRegisters::S0, 0); +} + +void ARMAssembler::baseIndexTransferFloat(DataTransferTypeFloat transferType, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset) +{ + add_r(ARMRegisters::S1, base, lsl(index, scale)); + dataTransferFloat(transferType, srcDst, ARMRegisters::S1, offset); } PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& globalData, void* ownerUID, JITCompilationEffort effort) @@ -361,10 +392,10 @@ PassRefPtr<ExecutableMemoryHandle> ARMAssembler::executableCopy(JSGlobalData& gl ARMWord* addr = getLdrImmAddress(ldrAddr); if (*addr != InvalidBranchTarget) { if (!(iter->m_offset & 1)) { - int diff = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching); + intptr_t difference = reinterpret_cast_ptr<ARMWord*>(data + *addr) - (ldrAddr + DefaultPrefetching); - if ((diff <= BOFFSET_MAX && diff >= BOFFSET_MIN)) { - *ldrAddr = B | getConditionalField(*ldrAddr) | (diff & BRANCH_MASK); + if ((difference <= BOFFSET_MAX && difference >= BOFFSET_MIN)) { + *ldrAddr = B | getConditionalField(*ldrAddr) | (difference & BRANCH_MASK); continue; } } diff --git a/Source/JavaScriptCore/assembler/ARMAssembler.h b/Source/JavaScriptCore/assembler/ARMAssembler.h index 16dc0cfc2..a0d7d27c3 100644 --- a/Source/JavaScriptCore/assembler/ARMAssembler.h +++ b/Source/JavaScriptCore/assembler/ARMAssembler.h @@ -41,16 +41,16 @@ namespace JSC { r0 = 0, r1, r2, - r3, S0 = r3, + r3, S0 = r3, /* Same as thumb assembler. */ r4, r5, r6, r7, - r8, S1 = r8, + r8, r9, r10, r11, - r12, + r12, S1 = r12, r13, sp = r13, r14, lr = r14, r15, pc = r15 @@ -60,11 +60,11 @@ namespace JSC { d0, d1, d2, - d3, SD0 = d3, + d3, d4, d5, d6, - d7, + d7, SD0 = d7, /* Same as thumb assembler. */ d8, d9, d10, @@ -100,7 +100,10 @@ namespace JSC { typedef AssemblerBufferWithConstantPool<2048, 4, 4, ARMAssembler> ARMBuffer; typedef SegmentedVector<AssemblerLabel, 64> Jumps; - ARMAssembler() { } + ARMAssembler() + : m_indexOfTailOfLastWatchpoint(1) + { + } // ARM conditional constants typedef enum { @@ -141,33 +144,33 @@ namespace JSC { MVN = (0xf << 21), MUL = 0x00000090, MULL = 0x00c00090, + VMOV_F64 = 0x0eb00b40, VADD_F64 = 0x0e300b00, VDIV_F64 = 0x0e800b00, VSUB_F64 = 0x0e300b40, VMUL_F64 = 0x0e200b00, VCMP_F64 = 0x0eb40b40, VSQRT_F64 = 0x0eb10bc0, - DTR = 0x05000000, - LDRH = 0x00100090, - STRH = 0x00000090, + VABS_F64 = 0x0eb00bc0, + VNEG_F64 = 0x0eb10b40, STMDB = 0x09200000, LDMIA = 0x08b00000, - FDTR = 0x0d000b00, B = 0x0a000000, BL = 0x0b000000, -#if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__) BX = 0x012fff10, -#endif - VMOV_VFP = 0x0e000a10, - VMOV_ARM = 0x0e100a10, + VMOV_VFP64 = 0x0c400a10, + VMOV_ARM64 = 0x0c500a10, + VMOV_VFP32 = 0x0e000a10, + VMOV_ARM32 = 0x0e100a10, VCVT_F64_S32 = 0x0eb80bc0, VCVT_S32_F64 = 0x0ebd0b40, + VCVT_U32_F64 = 0x0ebc0b40, + VCVT_F32_F64 = 0x0eb70bc0, + VCVT_F64_F32 = 0x0eb70ac0, VMRS_APSR = 0x0ef1fa10, -#if WTF_ARM_ARCH_AT_LEAST(5) CLZ = 0x016f0f10, BKPT = 0xe1200070, BLX = 0x012fff30, -#endif #if WTF_ARM_ARCH_AT_LEAST(7) MOVW = 0x03000000, MOVT = 0x03400000, @@ -177,17 +180,37 @@ namespace JSC { enum { OP2_IMM = (1 << 25), - OP2_IMMh = (1 << 22), + OP2_IMM_HALF = (1 << 22), OP2_INV_IMM = (1 << 26), SET_CC = (1 << 20), OP2_OFSREG = (1 << 25), + // Data transfer flags. DT_UP = (1 << 23), - DT_BYTE = (1 << 22), DT_WB = (1 << 21), - // This flag is inlcuded in LDR and STR DT_PRE = (1 << 24), - HDT_UH = (1 << 5), DT_LOAD = (1 << 20), + DT_BYTE = (1 << 22), + }; + + enum DataTransferTypeA { + LoadUint32 = 0x05000000 | DT_LOAD, + LoadUint8 = 0x05400000 | DT_LOAD, + StoreUint32 = 0x05000000, + StoreUint8 = 0x05400000, + }; + + enum DataTransferTypeB { + LoadUint16 = 0x010000b0 | DT_LOAD, + LoadInt16 = 0x010000f0 | DT_LOAD, + LoadInt8 = 0x010000d0 | DT_LOAD, + StoreUint16 = 0x010000b0, + }; + + enum DataTransferTypeFloat { + LoadFloat = 0x0d000a00 | DT_LOAD, + LoadDouble = 0x0d000b00 | DT_LOAD, + StoreFloat = 0x0d000a00, + StoreDouble = 0x0d000b00, }; // Masks of ARM instructions @@ -218,7 +241,7 @@ namespace JSC { void emitInst(ARMWord op, int rd, int rn, ARMWord op2) { - ASSERT(((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMMh) <= 0xfff))); + ASSERT(((op2 & ~OP2_IMM) <= 0xfff) || (((op2 & ~OP2_IMM_HALF) <= 0xfff))); m_buffer.putInt(op | RN(rn) | RD(rd) | op2); } @@ -407,6 +430,11 @@ namespace JSC { m_buffer.putInt(static_cast<ARMWord>(cc) | MULL | RN(rdhi) | RD(rdlo) | RS(rn) | RM(rm)); } + void vmov_f64_r(int dd, int dm, Condition cc = AL) + { + emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VMOV_F64, dd, 0, dm); + } + void vadd_f64_r(int dd, int dn, int dm, Condition cc = AL) { emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VADD_F64, dd, dn, dm); @@ -437,100 +465,124 @@ namespace JSC { emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VSQRT_F64, dd, 0, dm); } + void vabs_f64_r(int dd, int dm, Condition cc = AL) + { + emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VABS_F64, dd, 0, dm); + } + + void vneg_f64_r(int dd, int dm, Condition cc = AL) + { + emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VNEG_F64, dd, 0, dm); + } + void ldr_imm(int rd, ARMWord imm, Condition cc = AL) { - m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true); + m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | LoadUint32 | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm, true); } void ldr_un_imm(int rd, ARMWord imm, Condition cc = AL) { - m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | DTR | DT_LOAD | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm); + m_buffer.putIntWithConstantInt(static_cast<ARMWord>(cc) | LoadUint32 | DT_UP | RN(ARMRegisters::pc) | RD(rd), imm); } - void dtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL) + void dtr_u(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP, rd, rb, op2); + emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP, rd, rb, op2); } - void dtr_ur(bool isLoad, int rd, int rb, int rm, Condition cc = AL) + void dtr_ur(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | DT_UP | OP2_OFSREG, rd, rb, rm); + emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP | OP2_OFSREG, rd, rb, rm); } - void dtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL) + void dtr_d(DataTransferTypeA transferType, int rd, int rb, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0), rd, rb, op2); + emitInst(static_cast<ARMWord>(cc) | transferType, rd, rb, op2); } - void dtr_dr(bool isLoad, int rd, int rb, int rm, Condition cc = AL) + void dtr_dr(DataTransferTypeA transferType, int rd, int rb, int rm, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | DTR | (isLoad ? DT_LOAD : 0) | OP2_OFSREG, rd, rb, rm); + emitInst(static_cast<ARMWord>(cc) | transferType | OP2_OFSREG, rd, rb, rm); } - void ldrh_r(int rd, int rn, int rm, Condition cc = AL) + void dtrh_u(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm); + emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP, rd, rb, op2); } - void ldrh_d(int rd, int rb, ARMWord op2, Condition cc = AL) + void dtrh_ur(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_PRE, rd, rb, op2); + emitInst(static_cast<ARMWord>(cc) | transferType | DT_UP, rd, rn, rm); } - void ldrh_u(int rd, int rb, ARMWord op2, Condition cc = AL) + void dtrh_d(DataTransferTypeB transferType, int rd, int rb, ARMWord op2, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | LDRH | HDT_UH | DT_UP | DT_PRE, rd, rb, op2); + emitInst(static_cast<ARMWord>(cc) | transferType, rd, rb, op2); } - void strh_r(int rn, int rm, int rd, Condition cc = AL) + void dtrh_dr(DataTransferTypeB transferType, int rd, int rn, int rm, Condition cc = AL) { - emitInst(static_cast<ARMWord>(cc) | STRH | HDT_UH | DT_UP | DT_PRE, rd, rn, rm); + emitInst(static_cast<ARMWord>(cc) | transferType, rd, rn, rm); } - void fdtr_u(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL) + void fdtr_u(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL) { - ASSERT(op2 <= 0xff); - emitInst(static_cast<ARMWord>(cc) | FDTR | DT_UP | (isLoad ? DT_LOAD : 0), rd, rb, op2); + ASSERT(op2 <= 0xff && rd <= 15); + /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */ + m_buffer.putInt(static_cast<ARMWord>(cc) | DT_UP | type | (rd << 12) | RN(rb) | op2); } - void fdtr_d(bool isLoad, int rd, int rb, ARMWord op2, Condition cc = AL) + void fdtr_d(DataTransferTypeFloat type, int rd, int rb, ARMWord op2, Condition cc = AL) { - ASSERT(op2 <= 0xff); - emitInst(static_cast<ARMWord>(cc) | FDTR | (isLoad ? DT_LOAD : 0), rd, rb, op2); + ASSERT(op2 <= 0xff && rd <= 15); + /* Only d0-d15 and s0, s2, s4 ... s30 are supported. */ + m_buffer.putInt(static_cast<ARMWord>(cc) | type | (rd << 12) | RN(rb) | op2); } void push_r(int reg, Condition cc = AL) { ASSERT(ARMWord(reg) <= 0xf); - m_buffer.putInt(cc | DTR | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4); + m_buffer.putInt(static_cast<ARMWord>(cc) | StoreUint32 | DT_WB | RN(ARMRegisters::sp) | RD(reg) | 0x4); } void pop_r(int reg, Condition cc = AL) { ASSERT(ARMWord(reg) <= 0xf); - m_buffer.putInt(cc | (DTR ^ DT_PRE) | DT_LOAD | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4); + m_buffer.putInt(static_cast<ARMWord>(cc) | (LoadUint32 ^ DT_PRE) | DT_UP | RN(ARMRegisters::sp) | RD(reg) | 0x4); } inline void poke_r(int reg, Condition cc = AL) { - dtr_d(false, ARMRegisters::sp, 0, reg, cc); + dtr_d(StoreUint32, ARMRegisters::sp, 0, reg, cc); } inline void peek_r(int reg, Condition cc = AL) { - dtr_u(true, reg, ARMRegisters::sp, 0, cc); + dtr_u(LoadUint32, reg, ARMRegisters::sp, 0, cc); } - void vmov_vfp_r(int sn, int rt, Condition cc = AL) + void vmov_vfp64_r(int sm, int rt, int rt2, Condition cc = AL) + { + ASSERT(rt != rt2); + m_buffer.putInt(static_cast<ARMWord>(cc) | VMOV_VFP64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4))); + } + + void vmov_arm64_r(int rt, int rt2, int sm, Condition cc = AL) + { + ASSERT(rt != rt2); + m_buffer.putInt(static_cast<ARMWord>(cc) | VMOV_ARM64 | RN(rt2) | RD(rt) | (sm & 0xf) | ((sm & 0x10) << (5 - 4))); + } + + void vmov_vfp32_r(int sn, int rt, Condition cc = AL) { ASSERT(rt <= 15); - emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_VFP, rt << 1, sn, 0); + emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_VFP32, rt << 1, sn, 0); } - void vmov_arm_r(int rt, int sn, Condition cc = AL) + void vmov_arm32_r(int rt, int sn, Condition cc = AL) { ASSERT(rt <= 15); - emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_ARM, rt << 1, sn, 0); + emitSinglePrecisionInst(static_cast<ARMWord>(cc) | VMOV_ARM32, rt << 1, sn, 0); } void vcvt_f64_s32_r(int dd, int sm, Condition cc = AL) @@ -545,26 +597,37 @@ namespace JSC { emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_S32_F64, (sd >> 1), 0, dm); } + void vcvt_u32_f64_r(int sd, int dm, Condition cc = AL) + { + ASSERT(!(sd & 0x1)); // sd must be divisible by 2 + emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_U32_F64, (sd >> 1), 0, dm); + } + + void vcvt_f64_f32_r(int dd, int sm, Condition cc = AL) + { + ASSERT(dd <= 15 && sm <= 15); + emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_F64_F32, dd, 0, sm); + } + + void vcvt_f32_f64_r(int dd, int sm, Condition cc = AL) + { + ASSERT(dd <= 15 && sm <= 15); + emitDoublePrecisionInst(static_cast<ARMWord>(cc) | VCVT_F32_F64, dd, 0, sm); + } + void vmrs_apsr(Condition cc = AL) { m_buffer.putInt(static_cast<ARMWord>(cc) | VMRS_APSR); } -#if WTF_ARM_ARCH_AT_LEAST(5) void clz_r(int rd, int rm, Condition cc = AL) { m_buffer.putInt(static_cast<ARMWord>(cc) | CLZ | RD(rd) | RM(rm)); } -#endif void bkpt(ARMWord value) { -#if WTF_ARM_ARCH_AT_LEAST(5) m_buffer.putInt(BKPT | ((value & 0xff0) << 4) | (value & 0xf)); -#else - // Cannot access to Zero memory address - dtr_dr(true, ARMRegisters::S0, ARMRegisters::S0, ARMRegisters::S0); -#endif } void nop() @@ -574,23 +637,12 @@ namespace JSC { void bx(int rm, Condition cc = AL) { -#if WTF_ARM_ARCH_AT_LEAST(5) || defined(__ARM_ARCH_4T__) emitInst(static_cast<ARMWord>(cc) | BX, 0, 0, RM(rm)); -#else - mov_r(ARMRegisters::pc, RM(rm), cc); -#endif } AssemblerLabel blx(int rm, Condition cc = AL) { -#if WTF_ARM_ARCH_AT_LEAST(5) emitInst(static_cast<ARMWord>(cc) | BLX, 0, 0, RM(rm)); -#else - ASSERT(rm != 14); - ensureSpace(2 * sizeof(ARMWord), 0); - mov_r(ARMRegisters::lr, ARMRegisters::pc, cc); - bx(rm, cc); -#endif return m_buffer.label(); } @@ -653,12 +705,33 @@ namespace JSC { return m_buffer.sizeOfConstantPool(); } - AssemblerLabel label() + AssemblerLabel labelIgnoringWatchpoints() { - m_buffer.ensureSpaceForAnyOneInstruction(); + m_buffer.ensureSpaceForAnyInstruction(); return m_buffer.label(); } + AssemblerLabel labelForWatchpoint() + { + m_buffer.ensureSpaceForAnyInstruction(maxJumpReplacementSize() / sizeof(ARMWord)); + AssemblerLabel result = m_buffer.label(); + if (result.m_offset != (m_indexOfTailOfLastWatchpoint - maxJumpReplacementSize())) + result = label(); + m_indexOfTailOfLastWatchpoint = result.m_offset + maxJumpReplacementSize(); + return label(); + } + + AssemblerLabel label() + { + AssemblerLabel result = labelIgnoringWatchpoints(); + while (result.m_offset + 1 < m_indexOfTailOfLastWatchpoint) { + nop(); + // The available number of instructions are ensured by labelForWatchpoint. + result = m_buffer.label(); + } + return result; + } + AssemblerLabel align(int alignment) { while (!m_buffer.isAligned(alignment)) @@ -684,18 +757,28 @@ namespace JSC { unsigned debugOffset() { return m_buffer.debugOffset(); } + // DFG assembly helpers for moving data between fp and registers. + void vmov(RegisterID rd1, RegisterID rd2, FPRegisterID rn) + { + vmov_arm64_r(rd1, rd2, rn); + } + + void vmov(FPRegisterID rd, RegisterID rn1, RegisterID rn2) + { + vmov_vfp64_r(rd, rn1, rn2); + } + // Patching helpers static ARMWord* getLdrImmAddress(ARMWord* insn) { -#if WTF_ARM_ARCH_AT_LEAST(5) // Check for call if ((*insn & 0x0f7f0000) != 0x051f0000) { // Must be BLX ASSERT((*insn & 0x012fff30) == 0x012fff30); insn--; } -#endif + // Must be an ldr ..., [pc +/- imm] ASSERT((*insn & 0x0f7f0000) == 0x051f0000); @@ -799,6 +882,56 @@ namespace JSC { return reinterpret_cast<void*>(readPointer(reinterpret_cast<void*>(getAbsoluteJumpAddress(from)))); } + static void replaceWithJump(void* instructionStart, void* to) + { + ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart) - 1; + intptr_t difference = reinterpret_cast<intptr_t>(to) - (reinterpret_cast<intptr_t>(instruction) + DefaultPrefetching * sizeof(ARMWord)); + + if (!(difference & 1)) { + difference >>= 2; + if ((difference <= BOFFSET_MAX && difference >= BOFFSET_MIN)) { + // Direct branch. + instruction[0] = B | AL | (difference & BRANCH_MASK); + cacheFlush(instruction, sizeof(ARMWord)); + return; + } + } + + // Load target. + instruction[0] = LoadUint32 | AL | RN(ARMRegisters::pc) | RD(ARMRegisters::pc) | 4; + instruction[1] = reinterpret_cast<ARMWord>(to); + cacheFlush(instruction, sizeof(ARMWord) * 2); + } + + static ptrdiff_t maxJumpReplacementSize() + { + return sizeof(ARMWord) * 2; + } + + static void replaceWithLoad(void* instructionStart) + { + ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart); + cacheFlush(instruction, sizeof(ARMWord)); + + ASSERT((*instruction & 0x0ff00000) == 0x02800000 || (*instruction & 0x0ff00000) == 0x05900000); + if ((*instruction & 0x0ff00000) == 0x02800000) { + *instruction = (*instruction & 0xf00fffff) | 0x05900000; + cacheFlush(instruction, sizeof(ARMWord)); + } + } + + static void replaceWithAddressComputation(void* instructionStart) + { + ARMWord* instruction = reinterpret_cast<ARMWord*>(instructionStart); + cacheFlush(instruction, sizeof(ARMWord)); + + ASSERT((*instruction & 0x0ff00000) == 0x02800000 || (*instruction & 0x0ff00000) == 0x05900000); + if ((*instruction & 0x0ff00000) == 0x05900000) { + *instruction = (*instruction & 0xf00fffff) | 0x02800000; + cacheFlush(instruction, sizeof(ARMWord)); + } + } + // Address operations static void* getRelocatedAddress(void* code, AssemblerLabel label) @@ -820,13 +953,20 @@ namespace JSC { // Handle immediates + static ARMWord getOp2(ARMWord imm); + + // Fast case if imm is known to be between 0 and 0xff static ARMWord getOp2Byte(ARMWord imm) { ASSERT(imm <= 0xff); - return OP2_IMMh | (imm & 0x0f) | ((imm & 0xf0) << 4) ; + return OP2_IMM | imm; } - static ARMWord getOp2(ARMWord imm); + static ARMWord getOp2Half(ARMWord imm) + { + ASSERT(imm <= 0xff); + return OP2_IMM_HALF | (imm & 0x0f) | ((imm & 0xf0) << 4); + } #if WTF_ARM_ARCH_AT_LEAST(7) static ARMWord getImm16Op2(ARMWord imm) @@ -840,20 +980,14 @@ namespace JSC { void moveImm(ARMWord imm, int dest); ARMWord encodeComplexImm(ARMWord imm, int dest); - ARMWord getOffsetForHalfwordDataTransfer(ARMWord imm, int tmpReg) - { - // Encode immediate data in the instruction if it is possible - if (imm <= 0xff) - return getOp2Byte(imm); - // Otherwise, store the data in a temporary register - return encodeComplexImm(imm, tmpReg); - } - // Memory load/store helpers - void dataTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, int32_t offset, bool bytes = false); - void baseIndexTransfer32(bool isLoad, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset, bool bytes = false); - void doubleTransfer(bool isLoad, FPRegisterID srcDst, RegisterID base, int32_t offset); + void dataTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, int32_t offset); + void baseIndexTransfer32(DataTransferTypeA, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset); + void dataTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, int32_t offset); + void baseIndexTransfer16(DataTransferTypeB, RegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset); + void dataTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, int32_t offset); + void baseIndexTransferFloat(DataTransferTypeFloat, FPRegisterID srcDst, RegisterID base, RegisterID index, int scale, int32_t offset); // Constant pool hnadlers @@ -901,25 +1035,25 @@ namespace JSC { #endif private: - ARMWord RM(int reg) + static ARMWord RM(int reg) { ASSERT(reg <= ARMRegisters::pc); return reg; } - ARMWord RS(int reg) + static ARMWord RS(int reg) { ASSERT(reg <= ARMRegisters::pc); return reg << 8; } - ARMWord RD(int reg) + static ARMWord RD(int reg) { ASSERT(reg <= ARMRegisters::pc); return reg << 12; } - ARMWord RN(int reg) + static ARMWord RN(int reg) { ASSERT(reg <= ARMRegisters::pc); return reg << 16; @@ -934,6 +1068,7 @@ namespace JSC { ARMBuffer m_buffer; Jumps m_jumps; + uint32_t m_indexOfTailOfLastWatchpoint; }; } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/ARMv7Assembler.h b/Source/JavaScriptCore/assembler/ARMv7Assembler.h index 81977e2bd..eef0ba8a7 100644 --- a/Source/JavaScriptCore/assembler/ARMv7Assembler.h +++ b/Source/JavaScriptCore/assembler/ARMv7Assembler.h @@ -1018,6 +1018,12 @@ public: else m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, imm.getUInt12()); } + + ALWAYS_INLINE void ldrWide8BitImmediate(RegisterID rt, RegisterID rn, uint8_t immediate) + { + ASSERT(rn != ARMRegisters::pc); + m_formatter.twoWordOp12Reg4Reg4Imm12(OP_LDR_imm_T3, rn, rt, immediate); + } ALWAYS_INLINE void ldrCompact(RegisterID rt, RegisterID rn, ARMThumbImmediate imm) { @@ -2117,6 +2123,46 @@ public: { return 6; } + + static void replaceWithLoad(void* instructionStart) + { + ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1)); + uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart); + switch (ptr[0] & 0xFFF0) { + case OP_LDR_imm_T3: + break; + case OP_ADD_imm_T3: + ASSERT(!(ptr[1] & 0xF000)); + ptr[0] &= 0x000F; + ptr[0] |= OP_LDR_imm_T3; + ptr[1] |= (ptr[1] & 0x0F00) << 4; + ptr[1] &= 0xF0FF; + break; + default: + ASSERT_NOT_REACHED(); + } + cacheFlush(ptr, sizeof(uint16_t) * 2); + } + + static void replaceWithAddressComputation(void* instructionStart) + { + ASSERT(!(bitwise_cast<uintptr_t>(instructionStart) & 1)); + uint16_t* ptr = reinterpret_cast<uint16_t*>(instructionStart); + switch (ptr[0] & 0xFFF0) { + case OP_LDR_imm_T3: + ASSERT(!(ptr[1] & 0x0F00)); + ptr[0] &= 0x000F; + ptr[0] |= OP_ADD_imm_T3; + ptr[1] |= (ptr[1] & 0xF000) >> 4; + ptr[1] &= 0x0FFF; + break; + case OP_ADD_imm_T3: + break; + default: + ASSERT_NOT_REACHED(); + } + cacheFlush(ptr, sizeof(uint16_t) * 2); + } unsigned debugOffset() { return m_formatter.debugOffset(); } diff --git a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h index 0080446c2..a24f7573a 100644 --- a/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h +++ b/Source/JavaScriptCore/assembler/AbstractMacroAssembler.h @@ -295,6 +295,36 @@ public: private: AssemblerLabel m_label; }; + + // ConvertibleLoadLabel: + // + // A ConvertibleLoadLabel records a loadPtr instruction that can be patched to an addPtr + // so that: + // + // loadPtr(Address(a, i), b) + // + // becomes: + // + // addPtr(TrustedImmPtr(i), a, b) + class ConvertibleLoadLabel { + template<class TemplateAssemblerType> + friend class AbstractMacroAssembler; + friend class LinkBuffer; + + public: + ConvertibleLoadLabel() + { + } + + ConvertibleLoadLabel(AbstractMacroAssembler<AssemblerType>* masm) + : m_label(masm->m_assembler.labelIgnoringWatchpoints()) + { + } + + bool isSet() const { return m_label.isSet(); } + private: + AssemblerLabel m_label; + }; // DataLabelPtr: // @@ -562,6 +592,11 @@ public: result.m_label = m_assembler.labelIgnoringWatchpoints(); return result; } +#else + Label labelIgnoringWatchpoints() + { + return label(); + } #endif Label label() @@ -672,6 +707,16 @@ protected: { return AssemblerType::readPointer(dataLabelPtr.dataLocation()); } + + static void replaceWithLoad(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithLoad(label.dataLocation()); + } + + static void replaceWithAddressComputation(CodeLocationConvertibleLoad label) + { + AssemblerType::replaceWithAddressComputation(label.dataLocation()); + } }; } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h index e2ea261ee..430147280 100644 --- a/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h +++ b/Source/JavaScriptCore/assembler/AssemblerBufferWithConstantPool.h @@ -127,9 +127,9 @@ public: AssemblerBuffer::ensureSpace(insnSpace); } - void ensureSpaceForAnyOneInstruction() + void ensureSpaceForAnyInstruction(int amount = 1) { - flushIfNoSpaceFor(maxInstructionSize, sizeof(uint64_t)); + flushIfNoSpaceFor(amount * maxInstructionSize, amount * sizeof(uint64_t)); } bool isAligned(int alignment) diff --git a/Source/JavaScriptCore/assembler/CodeLocation.h b/Source/JavaScriptCore/assembler/CodeLocation.h index 9500b1ee4..86d1f2b75 100644 --- a/Source/JavaScriptCore/assembler/CodeLocation.h +++ b/Source/JavaScriptCore/assembler/CodeLocation.h @@ -40,6 +40,7 @@ class CodeLocationNearCall; class CodeLocationDataLabelCompact; class CodeLocationDataLabel32; class CodeLocationDataLabelPtr; +class CodeLocationConvertibleLoad; // The CodeLocation* types are all pretty much do-nothing wrappers around // CodePtr (or MacroAssemblerCodePtr, to give it its full name). These @@ -62,6 +63,7 @@ public: CodeLocationDataLabelPtr dataLabelPtrAtOffset(int offset); CodeLocationDataLabel32 dataLabel32AtOffset(int offset); CodeLocationDataLabelCompact dataLabelCompactAtOffset(int offset); + CodeLocationConvertibleLoad convertibleLoadAtOffset(int offset); protected: CodeLocationCommon() @@ -146,6 +148,15 @@ public: : CodeLocationCommon(MacroAssemblerCodePtr(location)) {} }; +class CodeLocationConvertibleLoad : public CodeLocationCommon { +public: + CodeLocationConvertibleLoad() { } + explicit CodeLocationConvertibleLoad(MacroAssemblerCodePtr location) + : CodeLocationCommon(location) { } + explicit CodeLocationConvertibleLoad(void* location) + : CodeLocationCommon(MacroAssemblerCodePtr(location)) { } +}; + inline CodeLocationInstruction CodeLocationCommon::instructionAtOffset(int offset) { ASSERT_VALID_CODE_OFFSET(offset); @@ -194,6 +205,12 @@ inline CodeLocationDataLabelCompact CodeLocationCommon::dataLabelCompactAtOffset return CodeLocationDataLabelCompact(reinterpret_cast<char*>(dataLocation()) + offset); } +inline CodeLocationConvertibleLoad CodeLocationCommon::convertibleLoadAtOffset(int offset) +{ + ASSERT_VALID_CODE_OFFSET(offset); + return CodeLocationConvertibleLoad(reinterpret_cast<char*>(dataLocation()) + offset); +} + } // namespace JSC #endif // ENABLE(ASSEMBLER) diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.cpp b/Source/JavaScriptCore/assembler/LinkBuffer.cpp index 58030ba7d..0176e4307 100644 --- a/Source/JavaScriptCore/assembler/LinkBuffer.cpp +++ b/Source/JavaScriptCore/assembler/LinkBuffer.cpp @@ -41,7 +41,7 @@ LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithoutDisassembly() LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, ...) { - ASSERT(Options::showDisassembly); + ASSERT(Options::showDisassembly() || Options::showDFGDisassembly()); CodeRef result = finalizeCodeWithoutDisassembly(); @@ -54,7 +54,7 @@ LinkBuffer::CodeRef LinkBuffer::finalizeCodeWithDisassembly(const char* format, dataLog(" Code at [%p, %p):\n", result.code().executableAddress(), static_cast<char*>(result.code().executableAddress()) + result.size()); if (!tryToDisassemble(result.code(), m_size, " ", WTF::dataFile())) - dataLog(" <no disassembly available>"); + dataLog(" <no disassembly available>\n"); return result; } diff --git a/Source/JavaScriptCore/assembler/LinkBuffer.h b/Source/JavaScriptCore/assembler/LinkBuffer.h index c6e003142..484d3a73f 100644 --- a/Source/JavaScriptCore/assembler/LinkBuffer.h +++ b/Source/JavaScriptCore/assembler/LinkBuffer.h @@ -69,6 +69,7 @@ class LinkBuffer { typedef MacroAssembler::DataLabelCompact DataLabelCompact; typedef MacroAssembler::DataLabel32 DataLabel32; typedef MacroAssembler::DataLabelPtr DataLabelPtr; + typedef MacroAssembler::ConvertibleLoadLabel ConvertibleLoadLabel; #if ENABLE(BRANCH_COMPACTION) typedef MacroAssembler::LinkRecord LinkRecord; typedef MacroAssembler::JumpLinkType JumpLinkType; @@ -180,6 +181,11 @@ public: return CodeLocationDataLabelCompact(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); } + CodeLocationConvertibleLoad locationOf(ConvertibleLoadLabel label) + { + return CodeLocationConvertibleLoad(MacroAssembler::getLinkerAddress(code(), applyOffset(label.m_label))); + } + // This method obtains the return address of the call, given as an offset from // the start of the code. unsigned returnAddressOffset(Call call) @@ -257,6 +263,11 @@ private: #endif }; +#define FINALIZE_CODE_IF(condition, linkBufferReference, dataLogArgumentsForHeading) \ + (UNLIKELY((condition)) \ + ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogArgumentsForHeading) \ + : (linkBufferReference).finalizeCodeWithoutDisassembly()) + // Use this to finalize code, like so: // // CodeRef code = FINALIZE_CODE(linkBuffer, ("my super thingy number %d", number)); @@ -274,9 +285,7 @@ private: // is true, so you can hide expensive disassembly-only computations inside there. #define FINALIZE_CODE(linkBufferReference, dataLogArgumentsForHeading) \ - (UNLIKELY(Options::showDisassembly) \ - ? ((linkBufferReference).finalizeCodeWithDisassembly dataLogArgumentsForHeading) \ - : (linkBufferReference).finalizeCodeWithoutDisassembly()) + FINALIZE_CODE_IF(Options::showDisassembly(), linkBufferReference, dataLogArgumentsForHeading) } // namespace JSC diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp index 2db5df1f8..3408c1230 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.cpp @@ -77,18 +77,18 @@ void MacroAssemblerARM::load32WithUnalignedHalfWords(BaseIndex address, Register if (address.offset >= 0 && address.offset + 0x2 <= 0xff) { m_assembler.add_r(ARMRegisters::S0, address.base, op2); - m_assembler.ldrh_u(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset)); - m_assembler.ldrh_u(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(address.offset + 0x2)); + m_assembler.dtrh_u(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset)); + m_assembler.dtrh_u(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(address.offset + 0x2)); } else if (address.offset < 0 && address.offset >= -0xff) { m_assembler.add_r(ARMRegisters::S0, address.base, op2); - m_assembler.ldrh_d(dest, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset)); - m_assembler.ldrh_d(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(-address.offset - 0x2)); + m_assembler.dtrh_d(ARMAssembler::LoadUint16, dest, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset)); + m_assembler.dtrh_d(ARMAssembler::LoadUint16, ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Half(-address.offset - 0x2)); } else { - m_assembler.ldr_un_imm(ARMRegisters::S0, address.offset); + m_assembler.moveImm(address.offset, ARMRegisters::S0); m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, op2); - m_assembler.ldrh_r(dest, address.base, ARMRegisters::S0); + m_assembler.dtrh_ur(ARMAssembler::LoadUint16, dest, address.base, ARMRegisters::S0); m_assembler.add_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::OP2_IMM | 0x2); - m_assembler.ldrh_r(ARMRegisters::S0, address.base, ARMRegisters::S0); + m_assembler.dtrh_ur(ARMAssembler::LoadUint16, ARMRegisters::S0, address.base, ARMRegisters::S0); } m_assembler.orr_r(dest, dest, m_assembler.lsl(ARMRegisters::S0, 16)); } diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h index 8ea29e3a0..8e123d423 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARM.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARM.h @@ -90,6 +90,11 @@ public: m_assembler.adds_r(dest, dest, src); } + void add32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.adds_r(dest, op1, op2); + } + void add32(TrustedImm32 imm, Address address) { load32(address, ARMRegisters::S1); @@ -118,6 +123,11 @@ public: m_assembler.ands_r(dest, dest, src); } + void and32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.ands_r(dest, op1, op2); + } + void and32(TrustedImm32 imm, RegisterID dest) { ARMWord w = m_assembler.getImm(imm.m_value, ARMRegisters::S0, true); @@ -136,13 +146,17 @@ public: m_assembler.ands_r(dest, src, w); } - void lshift32(RegisterID shift_amount, RegisterID dest) + void lshift32(RegisterID shiftAmount, RegisterID dest) { - ARMWord w = ARMAssembler::getOp2(0x1f); - ASSERT(w != ARMAssembler::INVALID_IMM); - m_assembler.and_r(ARMRegisters::S0, shift_amount, w); + lshift32(dest, shiftAmount, dest); + } + + void lshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + ARMWord w = ARMAssembler::getOp2Byte(0x1f); + m_assembler.and_r(ARMRegisters::S0, shiftAmount, w); - m_assembler.movs_r(dest, m_assembler.lsl_r(dest, ARMRegisters::S0)); + m_assembler.movs_r(dest, m_assembler.lsl_r(src, ARMRegisters::S0)); } void lshift32(TrustedImm32 imm, RegisterID dest) @@ -155,13 +169,25 @@ public: m_assembler.movs_r(dest, m_assembler.lsl(src, imm.m_value & 0x1f)); } - void mul32(RegisterID src, RegisterID dest) + void mul32(RegisterID op1, RegisterID op2, RegisterID dest) { - if (src == dest) { - move(src, ARMRegisters::S0); - src = ARMRegisters::S0; + if (op2 == dest) { + if (op1 == dest) { + move(op2, ARMRegisters::S0); + op2 = ARMRegisters::S0; + } else { + // Swap the operands. + RegisterID tmp = op1; + op1 = op2; + op2 = tmp; + } } - m_assembler.muls_r(dest, dest, src); + m_assembler.muls_r(dest, op1, op2); + } + + void mul32(RegisterID src, RegisterID dest) + { + mul32(src, dest, dest); } void mul32(TrustedImm32 imm, RegisterID src, RegisterID dest) @@ -172,7 +198,7 @@ public: void neg32(RegisterID srcDest) { - m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2(0)); + m_assembler.rsbs_r(srcDest, srcDest, ARMAssembler::getOp2Byte(0)); } void or32(RegisterID src, RegisterID dest) @@ -195,15 +221,19 @@ public: m_assembler.orrs_r(dest, op1, op2); } - void rshift32(RegisterID shift_amount, RegisterID dest) + void rshift32(RegisterID shiftAmount, RegisterID dest) { - ARMWord w = ARMAssembler::getOp2(0x1f); - ASSERT(w != ARMAssembler::INVALID_IMM); - m_assembler.and_r(ARMRegisters::S0, shift_amount, w); + rshift32(dest, shiftAmount, dest); + } + + void rshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + ARMWord w = ARMAssembler::getOp2Byte(0x1f); + m_assembler.and_r(ARMRegisters::S0, shiftAmount, w); - m_assembler.movs_r(dest, m_assembler.asr_r(dest, ARMRegisters::S0)); + m_assembler.movs_r(dest, m_assembler.asr_r(src, ARMRegisters::S0)); } - + void rshift32(TrustedImm32 imm, RegisterID dest) { rshift32(dest, imm, dest); @@ -213,16 +243,20 @@ public: { m_assembler.movs_r(dest, m_assembler.asr(src, imm.m_value & 0x1f)); } - - void urshift32(RegisterID shift_amount, RegisterID dest) + + void urshift32(RegisterID shiftAmount, RegisterID dest) { - ARMWord w = ARMAssembler::getOp2(0x1f); - ASSERT(w != ARMAssembler::INVALID_IMM); - m_assembler.and_r(ARMRegisters::S0, shift_amount, w); - - m_assembler.movs_r(dest, m_assembler.lsr_r(dest, ARMRegisters::S0)); + urshift32(dest, shiftAmount, dest); } - + + void urshift32(RegisterID src, RegisterID shiftAmount, RegisterID dest) + { + ARMWord w = ARMAssembler::getOp2Byte(0x1f); + m_assembler.and_r(ARMRegisters::S0, shiftAmount, w); + + m_assembler.movs_r(dest, m_assembler.lsr_r(src, ARMRegisters::S0)); + } + void urshift32(TrustedImm32 imm, RegisterID dest) { m_assembler.movs_r(dest, m_assembler.lsr(dest, imm.m_value & 0x1f)); @@ -266,6 +300,11 @@ public: m_assembler.eors_r(dest, dest, src); } + void xor32(RegisterID op1, RegisterID op2, RegisterID dest) + { + m_assembler.eors_r(dest, op1, op2); + } + void xor32(TrustedImm32 imm, RegisterID dest) { if (imm.m_value == -1) @@ -295,22 +334,42 @@ public: void load8(ImplicitAddress address, RegisterID dest) { - m_assembler.dataTransfer32(true, dest, address.base, address.offset, true); + m_assembler.dataTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.offset); } void load8(BaseIndex address, RegisterID dest) { - m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset, true); + m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); + } + + void load8Signed(BaseIndex address, RegisterID dest) + { + m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt8, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); + } + + void load16(ImplicitAddress address, RegisterID dest) + { + m_assembler.dataTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.offset); + } + + void load16(BaseIndex address, RegisterID dest) + { + m_assembler.baseIndexTransfer16(ARMAssembler::LoadUint16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); + } + + void load16Signed(BaseIndex address, RegisterID dest) + { + m_assembler.baseIndexTransfer16(ARMAssembler::LoadInt16, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } void load32(ImplicitAddress address, RegisterID dest) { - m_assembler.dataTransfer32(true, dest, address.base, address.offset); + m_assembler.dataTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.offset); } void load32(BaseIndex address, RegisterID dest) { - m_assembler.baseIndexTransfer32(true, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); + m_assembler.baseIndexTransfer32(ARMAssembler::LoadUint32, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } #if CPU(ARMV5_OR_LOWER) @@ -327,11 +386,19 @@ public: load16(address, dest); } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) + { + ConvertibleLoadLabel result(this); + ASSERT(address.offset >= 0 && address.offset <= 255); + m_assembler.dtr_u(ARMAssembler::LoadUint32, dest, address.base, address.offset); + return result; + } + DataLabel32 load32WithAddressOffsetPatch(Address address, RegisterID dest) { DataLabel32 dataLabel(this); m_assembler.ldr_un_imm(ARMRegisters::S0, 0); - m_assembler.dtr_ur(true, dest, address.base, ARMRegisters::S0); + m_assembler.dtr_ur(ARMAssembler::LoadUint32, dest, address.base, ARMRegisters::S0); return dataLabel; } @@ -342,36 +409,32 @@ public: return dataLabel; } - void load16(BaseIndex address, RegisterID dest) + DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) { - m_assembler.add_r(ARMRegisters::S1, address.base, m_assembler.lsl(address.index, address.scale)); - load16(Address(ARMRegisters::S1, address.offset), dest); + DataLabel32 dataLabel(this); + m_assembler.ldr_un_imm(ARMRegisters::S0, 0); + m_assembler.dtr_ur(ARMAssembler::StoreUint32, src, address.base, ARMRegisters::S0); + return dataLabel; } - - void load16(ImplicitAddress address, RegisterID dest) + + void store8(RegisterID src, BaseIndex address) { - if (address.offset >= 0) - m_assembler.ldrh_u(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(address.offset, ARMRegisters::S0)); - else - m_assembler.ldrh_d(dest, address.base, m_assembler.getOffsetForHalfwordDataTransfer(-address.offset, ARMRegisters::S0)); + m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint8, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } - DataLabel32 store32WithAddressOffsetPatch(RegisterID src, Address address) + void store16(RegisterID src, BaseIndex address) { - DataLabel32 dataLabel(this); - m_assembler.ldr_un_imm(ARMRegisters::S0, 0); - m_assembler.dtr_ur(false, src, address.base, ARMRegisters::S0); - return dataLabel; + m_assembler.baseIndexTransfer16(ARMAssembler::StoreUint16, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } void store32(RegisterID src, ImplicitAddress address) { - m_assembler.dataTransfer32(false, src, address.base, address.offset); + m_assembler.dataTransfer32(ARMAssembler::StoreUint32, src, address.base, address.offset); } void store32(RegisterID src, BaseIndex address) { - m_assembler.baseIndexTransfer32(false, src, address.base, address.index, static_cast<int>(address.scale), address.offset); + m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } void store32(TrustedImm32 imm, ImplicitAddress address) @@ -380,17 +443,23 @@ public: store32(ARMRegisters::S1, address); } + void store32(TrustedImm32 imm, BaseIndex address) + { + move(imm, ARMRegisters::S1); + m_assembler.baseIndexTransfer32(ARMAssembler::StoreUint32, ARMRegisters::S1, address.base, address.index, static_cast<int>(address.scale), address.offset); + } + void store32(RegisterID src, void* address) { m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); - m_assembler.dtr_u(false, src, ARMRegisters::S0, 0); + m_assembler.dtr_u(ARMAssembler::StoreUint32, src, ARMRegisters::S0, 0); } void store32(TrustedImm32 imm, void* address) { m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); m_assembler.moveImm(imm.m_value, ARMRegisters::S1); - m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); + m_assembler.dtr_u(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0); } void pop(RegisterID dest) @@ -422,7 +491,8 @@ public: void move(RegisterID src, RegisterID dest) { - m_assembler.mov_r(dest, src); + if (src != dest) + m_assembler.mov_r(dest, src); } void move(TrustedImmPtr imm, RegisterID dest) @@ -566,6 +636,12 @@ public: load32(address, ARMRegisters::pc); } + void jump(AbsoluteAddress address) + { + move(TrustedImmPtr(address.m_ptr), ARMRegisters::S0); + load32(Address(ARMRegisters::S0, 0), ARMRegisters::pc); + } + Jump branchAdd32(ResultCondition cond, RegisterID src, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); @@ -573,6 +649,13 @@ public: return Jump(m_assembler.jmp(ARMCondition(cond))); } + Jump branchAdd32(ResultCondition cond, RegisterID op1, RegisterID op2, RegisterID dest) + { + ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); + add32(op1, op2, dest); + return Jump(m_assembler.jmp(ARMCondition(cond))); + } + Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); @@ -587,28 +670,47 @@ public: return Jump(m_assembler.jmp(ARMCondition(cond))); } - void mull32(RegisterID src1, RegisterID src2, RegisterID dest) + Jump branchAdd32(ResultCondition cond, TrustedImm32 imm, AbsoluteAddress dest) { - if (src1 == dest) { - move(src1, ARMRegisters::S0); - src1 = ARMRegisters::S0; + ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); + add32(imm, dest); + return Jump(m_assembler.jmp(ARMCondition(cond))); + } + + void mull32(RegisterID op1, RegisterID op2, RegisterID dest) + { + if (op2 == dest) { + if (op1 == dest) { + move(op2, ARMRegisters::S0); + op2 = ARMRegisters::S0; + } else { + // Swap the operands. + RegisterID tmp = op1; + op1 = op2; + op2 = tmp; + } } - m_assembler.mull_r(ARMRegisters::S1, dest, src2, src1); + m_assembler.mull_r(ARMRegisters::S1, dest, op1, op2); m_assembler.cmp_r(ARMRegisters::S1, m_assembler.asr(dest, 31)); } - Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) + Jump branchMul32(ResultCondition cond, RegisterID src1, RegisterID src2, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); if (cond == Overflow) { - mull32(src, dest, dest); + mull32(src1, src2, dest); cond = NonZero; } else - mul32(src, dest); + mul32(src1, src2, dest); return Jump(m_assembler.jmp(ARMCondition(cond))); } + Jump branchMul32(ResultCondition cond, RegisterID src, RegisterID dest) + { + return branchMul32(cond, src, dest, dest); + } + Jump branchMul32(ResultCondition cond, TrustedImm32 imm, RegisterID src, RegisterID dest) { ASSERT((cond == Overflow) || (cond == Signed) || (cond == Zero) || (cond == NonZero)); @@ -671,14 +773,8 @@ public: Call nearCall() { -#if WTF_ARM_ARCH_AT_LEAST(5) - ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord)); m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true); return Call(m_assembler.blx(ARMRegisters::S1), Call::LinkableNear); -#else - prepareCall(); - return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::LinkableNear); -#endif } Call call(RegisterID target) @@ -699,15 +795,15 @@ public: void compare32(RelationalCondition cond, RegisterID left, RegisterID right, RegisterID dest) { m_assembler.cmp_r(left, right); - m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); - m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); + m_assembler.mov_r(dest, ARMAssembler::getOp2Byte(0)); + m_assembler.mov_r(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond)); } void compare32(RelationalCondition cond, RegisterID left, TrustedImm32 right, RegisterID dest) { m_assembler.cmp_r(left, m_assembler.getImm(right.m_value, ARMRegisters::S0)); - m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); - m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); + m_assembler.mov_r(dest, ARMAssembler::getOp2Byte(0)); + m_assembler.mov_r(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond)); } void compare8(RelationalCondition cond, Address left, TrustedImm32 right, RegisterID dest) @@ -722,8 +818,8 @@ public: m_assembler.cmp_r(0, reg); else m_assembler.tst_r(reg, m_assembler.getImm(mask.m_value, ARMRegisters::S0)); - m_assembler.mov_r(dest, ARMAssembler::getOp2(0)); - m_assembler.mov_r(dest, ARMAssembler::getOp2(1), ARMCondition(cond)); + m_assembler.mov_r(dest, ARMAssembler::getOp2Byte(0)); + m_assembler.mov_r(dest, ARMAssembler::getOp2Byte(1), ARMCondition(cond)); } void test32(ResultCondition cond, Address address, TrustedImm32 mask, RegisterID dest) @@ -746,25 +842,25 @@ public: void add32(TrustedImm32 imm, AbsoluteAddress address) { m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); + m_assembler.dtr_u(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0); add32(imm, ARMRegisters::S1); m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); + m_assembler.dtr_u(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0); } void sub32(TrustedImm32 imm, AbsoluteAddress address) { m_assembler.ldr_un_imm(ARMRegisters::S1, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); + m_assembler.dtr_u(ARMAssembler::LoadUint32, ARMRegisters::S1, ARMRegisters::S1, 0); sub32(imm, ARMRegisters::S1); m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address.m_ptr)); - m_assembler.dtr_u(false, ARMRegisters::S1, ARMRegisters::S0, 0); + m_assembler.dtr_u(ARMAssembler::StoreUint32, ARMRegisters::S1, ARMRegisters::S0, 0); } void load32(const void* address, RegisterID dest) { m_assembler.ldr_un_imm(ARMRegisters::S0, reinterpret_cast<ARMWord>(address)); - m_assembler.dtr_u(true, dest, ARMRegisters::S0, 0); + m_assembler.dtr_u(ARMAssembler::LoadUint32, dest, ARMRegisters::S0, 0); } Jump branch32(RelationalCondition cond, AbsoluteAddress left, RegisterID right) @@ -790,14 +886,9 @@ public: Call call() { -#if WTF_ARM_ARCH_AT_LEAST(5) ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord)); m_assembler.loadBranchTarget(ARMRegisters::S1, ARMAssembler::AL, true); return Call(m_assembler.blx(ARMRegisters::S1), Call::Linkable); -#else - prepareCall(); - return Call(m_assembler.jmp(ARMAssembler::AL, true), Call::Linkable); -#endif } Call tailRecursiveCall() @@ -861,20 +952,52 @@ public: } static bool supportsFloatingPointAbs() { return false; } + void loadFloat(BaseIndex address, FPRegisterID dest) + { + m_assembler.baseIndexTransferFloat(ARMAssembler::LoadFloat, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); + } + void loadDouble(ImplicitAddress address, FPRegisterID dest) { - m_assembler.doubleTransfer(true, dest, address.base, address.offset); + m_assembler.dataTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.offset); + } + + void loadDouble(BaseIndex address, FPRegisterID dest) + { + m_assembler.baseIndexTransferFloat(ARMAssembler::LoadDouble, dest, address.base, address.index, static_cast<int>(address.scale), address.offset); } void loadDouble(const void* address, FPRegisterID dest) { - m_assembler.ldr_un_imm(ARMRegisters::S0, (ARMWord)address); - m_assembler.fdtr_u(true, dest, ARMRegisters::S0, 0); + move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + m_assembler.fdtr_u(ARMAssembler::LoadDouble, dest, ARMRegisters::S0, 0); + } + + void storeFloat(FPRegisterID src, BaseIndex address) + { + m_assembler.baseIndexTransferFloat(ARMAssembler::StoreFloat, src, address.base, address.index, static_cast<int>(address.scale), address.offset); } void storeDouble(FPRegisterID src, ImplicitAddress address) { - m_assembler.doubleTransfer(false, src, address.base, address.offset); + m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.offset); + } + + void storeDouble(FPRegisterID src, BaseIndex address) + { + m_assembler.baseIndexTransferFloat(ARMAssembler::StoreDouble, src, address.base, address.index, static_cast<int>(address.scale), address.offset); + } + + void storeDouble(FPRegisterID src, const void* address) + { + move(TrustedImm32(reinterpret_cast<ARMWord>(address)), ARMRegisters::S0); + m_assembler.dataTransferFloat(ARMAssembler::StoreDouble, src, ARMRegisters::S0, 0); + } + + void moveDouble(FPRegisterID src, FPRegisterID dest) + { + if (src != dest) + m_assembler.vmov_f64_r(dest, src); } void addDouble(FPRegisterID src, FPRegisterID dest) @@ -882,17 +1005,33 @@ public: m_assembler.vadd_f64_r(dest, dest, src); } + void addDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vadd_f64_r(dest, op1, op2); + } + void addDouble(Address src, FPRegisterID dest) { loadDouble(src, ARMRegisters::SD0); addDouble(ARMRegisters::SD0, dest); } + void addDouble(AbsoluteAddress address, FPRegisterID dest) + { + loadDouble(address.m_ptr, ARMRegisters::SD0); + addDouble(ARMRegisters::SD0, dest); + } + void divDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.vdiv_f64_r(dest, dest, src); } + void divDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vdiv_f64_r(dest, op1, op2); + } + void divDouble(Address src, FPRegisterID dest) { ASSERT_NOT_REACHED(); // Untested @@ -905,6 +1044,11 @@ public: m_assembler.vsub_f64_r(dest, dest, src); } + void subDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vsub_f64_r(dest, op1, op2); + } + void subDouble(Address src, FPRegisterID dest) { loadDouble(src, ARMRegisters::SD0); @@ -922,39 +1066,55 @@ public: mulDouble(ARMRegisters::SD0, dest); } + void mulDouble(FPRegisterID op1, FPRegisterID op2, FPRegisterID dest) + { + m_assembler.vmul_f64_r(dest, op1, op2); + } + void sqrtDouble(FPRegisterID src, FPRegisterID dest) { m_assembler.vsqrt_f64_r(dest, src); } - void absDouble(FPRegisterID, FPRegisterID) + void absDouble(FPRegisterID src, FPRegisterID dest) { - ASSERT_NOT_REACHED(); + m_assembler.vabs_f64_r(dest, src); + } + + void negateDouble(FPRegisterID src, FPRegisterID dest) + { + m_assembler.vneg_f64_r(dest, src); } void convertInt32ToDouble(RegisterID src, FPRegisterID dest) { - m_assembler.vmov_vfp_r(dest << 1, src); + m_assembler.vmov_vfp32_r(dest << 1, src); m_assembler.vcvt_f64_s32_r(dest, dest << 1); } void convertInt32ToDouble(Address src, FPRegisterID dest) { - ASSERT_NOT_REACHED(); // Untested - // flds does not worth the effort here load32(src, ARMRegisters::S1); convertInt32ToDouble(ARMRegisters::S1, dest); } void convertInt32ToDouble(AbsoluteAddress src, FPRegisterID dest) { - ASSERT_NOT_REACHED(); // Untested - // flds does not worth the effort here - m_assembler.ldr_un_imm(ARMRegisters::S1, (ARMWord)src.m_ptr); - m_assembler.dtr_u(true, ARMRegisters::S1, ARMRegisters::S1, 0); + move(TrustedImmPtr(src.m_ptr), ARMRegisters::S1); + load32(Address(ARMRegisters::S1), ARMRegisters::S1); convertInt32ToDouble(ARMRegisters::S1, dest); } + void convertFloatToDouble(FPRegisterID src, FPRegisterID dst) + { + m_assembler.vcvt_f64_f32_r(dst, src); + } + + void convertDoubleToFloat(FPRegisterID src, FPRegisterID dst) + { + m_assembler.vcvt_f32_f64_r(dst, src); + } + Jump branchDouble(DoubleCondition cond, FPRegisterID left, FPRegisterID right) { m_assembler.vcmp_f64_r(left, right); @@ -968,12 +1128,42 @@ public: // If the result is not representable as a 32 bit value, branch. // May also branch for some values that are representable in 32 bits // (specifically, in this case, INT_MIN). - Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest) + enum BranchTruncateType { BranchIfTruncateFailed, BranchIfTruncateSuccessful }; + Jump branchTruncateDoubleToInt32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) { - UNUSED_PARAM(src); - UNUSED_PARAM(dest); - ASSERT_NOT_REACHED(); - return jump(); + truncateDoubleToInt32(src, dest); + + m_assembler.add_r(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1)); + m_assembler.bic_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1)); + + ARMWord w = ARMAssembler::getOp2(0x80000000); + ASSERT(w != ARMAssembler::INVALID_IMM); + m_assembler.cmp_r(ARMRegisters::S0, w); + return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE)); + } + + Jump branchTruncateDoubleToUint32(FPRegisterID src, RegisterID dest, BranchTruncateType branchType = BranchIfTruncateFailed) + { + truncateDoubleToUint32(src, dest); + + m_assembler.add_r(ARMRegisters::S0, dest, ARMAssembler::getOp2Byte(1)); + m_assembler.bic_r(ARMRegisters::S0, ARMRegisters::S0, ARMAssembler::getOp2Byte(1)); + + m_assembler.cmp_r(ARMRegisters::S0, ARMAssembler::getOp2Byte(0)); + return Jump(m_assembler.jmp(branchType == BranchIfTruncateFailed ? ARMAssembler::EQ : ARMAssembler::NE)); + } + + // Result is undefined if the value is outside of the integer range. + void truncateDoubleToInt32(FPRegisterID src, RegisterID dest) + { + m_assembler.vcvt_s32_f64_r(ARMRegisters::SD0 << 1, src); + m_assembler.vmov_arm32_r(dest, ARMRegisters::SD0 << 1); + } + + void truncateDoubleToUint32(FPRegisterID src, RegisterID dest) + { + m_assembler.vcvt_u32_f64_r(ARMRegisters::SD0 << 1, src); + m_assembler.vmov_arm32_r(dest, ARMRegisters::SD0 << 1); } // Convert 'src' to an integer, and places the resulting 'dest'. @@ -983,7 +1173,7 @@ public: void branchConvertDoubleToInt32(FPRegisterID src, RegisterID dest, JumpList& failureCases, FPRegisterID fpTemp) { m_assembler.vcvt_s32_f64_r(ARMRegisters::SD0 << 1, src); - m_assembler.vmov_arm_r(dest, ARMRegisters::SD0 << 1); + m_assembler.vmov_arm32_r(dest, ARMRegisters::SD0 << 1); // Convert the integer result back to float & compare to the original value - if not equal or unordered (NaN) then jump. m_assembler.vcvt_f64_s32_r(ARMRegisters::SD0, ARMRegisters::SD0 << 1); @@ -995,18 +1185,25 @@ public: Jump branchDoubleNonZero(FPRegisterID reg, FPRegisterID scratch) { - m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0)); + m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2Byte(0)); convertInt32ToDouble(ARMRegisters::S0, scratch); return branchDouble(DoubleNotEqual, reg, scratch); } Jump branchDoubleZeroOrNaN(FPRegisterID reg, FPRegisterID scratch) { - m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2(0)); + m_assembler.mov_r(ARMRegisters::S0, ARMAssembler::getOp2Byte(0)); convertInt32ToDouble(ARMRegisters::S0, scratch); return branchDouble(DoubleEqualOrUnordered, reg, scratch); } + // Invert a relational condition, e.g. == becomes !=, < becomes >=, etc. + static RelationalCondition invert(RelationalCondition cond) + { + ASSERT((static_cast<uint32_t>(cond & 0x0fffffff)) == 0 && static_cast<uint32_t>(cond) < static_cast<uint32_t>(ARMAssembler::AL)); + return static_cast<RelationalCondition>(cond ^ 0x10000000); + } + void nop() { m_assembler.nop(); @@ -1019,12 +1216,12 @@ public: static void replaceWithJump(CodeLocationLabel instructionStart, CodeLocationLabel destination) { - ASSERT_NOT_REACHED(); + ARMAssembler::replaceWithJump(instructionStart.dataLocation(), destination.dataLocation()); } static ptrdiff_t maxJumpReplacementSize() { - ASSERT_NOT_REACHED(); + ARMAssembler::maxJumpReplacementSize(); return 0; } @@ -1049,58 +1246,10 @@ protected: return m_assembler.sizeOfConstantPool(); } - void prepareCall() - { -#if WTF_ARM_ARCH_VERSION < 5 - ensureSpace(2 * sizeof(ARMWord), sizeof(ARMWord)); - - m_assembler.mov_r(linkRegister, ARMRegisters::pc); -#endif - } - void call32(RegisterID base, int32_t offset) { -#if WTF_ARM_ARCH_AT_LEAST(5) - int targetReg = ARMRegisters::S1; -#else - int targetReg = ARMRegisters::pc; -#endif - int tmpReg = ARMRegisters::S1; - - if (base == ARMRegisters::sp) - offset += 4; - - if (offset >= 0) { - if (offset <= 0xfff) { - prepareCall(); - m_assembler.dtr_u(true, targetReg, base, offset); - } else if (offset <= 0xfffff) { - m_assembler.add_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); - prepareCall(); - m_assembler.dtr_u(true, targetReg, tmpReg, offset & 0xfff); - } else { - m_assembler.moveImm(offset, tmpReg); - prepareCall(); - m_assembler.dtr_ur(true, targetReg, base, tmpReg); - } - } else { - offset = -offset; - if (offset <= 0xfff) { - prepareCall(); - m_assembler.dtr_d(true, targetReg, base, offset); - } else if (offset <= 0xfffff) { - m_assembler.sub_r(tmpReg, base, ARMAssembler::OP2_IMM | (offset >> 12) | (10 << 8)); - prepareCall(); - m_assembler.dtr_d(true, targetReg, tmpReg, offset & 0xfff); - } else { - m_assembler.moveImm(offset, tmpReg); - prepareCall(); - m_assembler.dtr_dr(true, targetReg, base, tmpReg); - } - } -#if WTF_ARM_ARCH_AT_LEAST(5) - m_assembler.blx(targetReg); -#endif + load32(Address(base, offset), ARMRegisters::S1); + m_assembler.blx(ARMRegisters::S1); } private: diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h index 6c0feffcf..3694c9163 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerARMv7.h @@ -600,6 +600,14 @@ public: move(TrustedImmPtr(address), addressTempRegister); m_assembler.ldr(dest, addressTempRegister, ARMThumbImmediate::makeUInt16(0)); } + + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) + { + ConvertibleLoadLabel result(this); + ASSERT(address.offset >= 0 && address.offset <= 255); + m_assembler.ldrWide8BitImmediate(dest, address.base, address.offset); + return result; + } void load8(ImplicitAddress address, RegisterID dest) { diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h index 3ea40c967..45de8139f 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86.h @@ -89,6 +89,13 @@ public: m_assembler.movl_mr(address, dest); } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) + { + ConvertibleLoadLabel result = ConvertibleLoadLabel(this); + m_assembler.movl_mr(address.offset, address.base, dest); + return result; + } + void addDouble(AbsoluteAddress address, FPRegisterID dest) { m_assembler.addsd_mr(address.m_ptr, dest); diff --git a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h index fa95b335b..1fb574b51 100644 --- a/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h +++ b/Source/JavaScriptCore/assembler/MacroAssemblerX86_64.h @@ -258,6 +258,13 @@ public: m_assembler.movq_mr(address.offset, address.base, dest); } + ConvertibleLoadLabel convertibleLoadPtr(Address address, RegisterID dest) + { + ConvertibleLoadLabel result = ConvertibleLoadLabel(this); + m_assembler.movq_mr(address.offset, address.base, dest); + return result; + } + void loadPtr(BaseIndex address, RegisterID dest) { m_assembler.movq_mr(address.offset, address.base, address.index, address.scale, dest); diff --git a/Source/JavaScriptCore/assembler/RepatchBuffer.h b/Source/JavaScriptCore/assembler/RepatchBuffer.h index a87294b1b..531dda934 100644 --- a/Source/JavaScriptCore/assembler/RepatchBuffer.h +++ b/Source/JavaScriptCore/assembler/RepatchBuffer.h @@ -122,6 +122,24 @@ public: { relinkNearCallerToTrampoline(returnAddress, CodeLocationLabel(newCalleeFunction)); } + + void replaceWithLoad(CodeLocationConvertibleLoad label) + { + MacroAssembler::replaceWithLoad(label); + } + + void replaceWithAddressComputation(CodeLocationConvertibleLoad label) + { + MacroAssembler::replaceWithAddressComputation(label); + } + + void setLoadInstructionIsActive(CodeLocationConvertibleLoad label, bool isActive) + { + if (isActive) + replaceWithLoad(label); + else + replaceWithAddressComputation(label); + } private: void* m_start; diff --git a/Source/JavaScriptCore/assembler/SH4Assembler.h b/Source/JavaScriptCore/assembler/SH4Assembler.h index 59d042244..d55d393f2 100644 --- a/Source/JavaScriptCore/assembler/SH4Assembler.h +++ b/Source/JavaScriptCore/assembler/SH4Assembler.h @@ -1241,7 +1241,7 @@ public: AssemblerLabel label() { - m_buffer.ensureSpaceForAnyOneInstruction(); + m_buffer.ensureSpaceForAnyInstruction(); return m_buffer.label(); } diff --git a/Source/JavaScriptCore/assembler/X86Assembler.h b/Source/JavaScriptCore/assembler/X86Assembler.h index 9c35be8b5..cf8133266 100644 --- a/Source/JavaScriptCore/assembler/X86Assembler.h +++ b/Source/JavaScriptCore/assembler/X86Assembler.h @@ -1839,6 +1839,42 @@ public: return 5; } + static void replaceWithLoad(void* instructionStart) + { + uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); +#if CPU(X86_64) + if ((*ptr & ~15) == PRE_REX) + ptr++; +#endif + switch (*ptr) { + case OP_MOV_GvEv: + break; + case OP_LEA: + *ptr = OP_MOV_GvEv; + break; + default: + ASSERT_NOT_REACHED(); + } + } + + static void replaceWithAddressComputation(void* instructionStart) + { + uint8_t* ptr = reinterpret_cast<uint8_t*>(instructionStart); +#if CPU(X86_64) + if ((*ptr & ~15) == PRE_REX) + ptr++; +#endif + switch (*ptr) { + case OP_MOV_GvEv: + *ptr = OP_LEA; + break; + case OP_LEA: + break; + default: + ASSERT_NOT_REACHED(); + } + } + static unsigned getCallReturnOffset(AssemblerLabel call) { ASSERT(call.isSet()); diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.cpp b/Source/JavaScriptCore/bytecode/CodeBlock.cpp index e0a4da71d..48d0fe728 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.cpp +++ b/Source/JavaScriptCore/bytecode/CodeBlock.cpp @@ -48,6 +48,7 @@ #include "UStringConcatenate.h" #include <stdio.h> #include <wtf/StringExtras.h> +#include <wtf/UnusedParam.h> #if ENABLE(DFG_JIT) #include "DFGOperations.h" @@ -93,6 +94,18 @@ static CString idName(int id0, const Identifier& ident) return makeUString(ident.ustring(), "(@id", UString::number(id0), ")").utf8(); } +void CodeBlock::dumpBytecodeCommentAndNewLine(int location) +{ +#if ENABLE(BYTECODE_COMMENTS) + const char* comment = commentForBytecodeOffset(location); + if (comment) + dataLog("\t\t ; %s", comment); +#else + UNUSED_PARAM(location); +#endif + dataLog("\n"); +} + CString CodeBlock::registerName(ExecState* exec, int r) const { if (r == missingThisObjectMarker()) @@ -156,7 +169,8 @@ void CodeBlock::printUnaryOp(ExecState* exec, int location, Vector<Instruction>: int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] %s\t\t %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data()); + dataLog("[%4d] %s\t\t %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); } void CodeBlock::printBinaryOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it, const char* op) @@ -164,14 +178,16 @@ void CodeBlock::printBinaryOp(ExecState* exec, int location, Vector<Instruction> int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - dataLog("[%4d] %s\t\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dataLog("[%4d] %s\t\t %s, %s, %s", location, op, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dumpBytecodeCommentAndNewLine(location); } void CodeBlock::printConditionalJump(ExecState* exec, const Vector<Instruction>::const_iterator&, Vector<Instruction>::const_iterator& it, int location, const char* op) { int r0 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] %s\t\t %s, %d(->%d)\n", location, op, registerName(exec, r0).data(), offset, location + offset); + dataLog("[%4d] %s\t\t %s, %d(->%d)", location, op, registerName(exec, r0).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); } void CodeBlock::printGetByIdOp(ExecState* exec, int location, Vector<Instruction>::const_iterator& it) @@ -181,6 +197,9 @@ void CodeBlock::printGetByIdOp(ExecState* exec, int location, Vector<Instruction case op_get_by_id: op = "get_by_id"; break; + case op_get_by_id_out_of_line: + op = "get_by_id_out_of_line"; + break; case op_get_by_id_self: op = "get_by_id_self"; break; @@ -402,7 +421,7 @@ void CodeBlock::printCallOp(ExecState* exec, int location, Vector<Instruction>:: } #endif } - dataLog("\n"); + dumpBytecodeCommentAndNewLine(location); it += 2; } @@ -411,7 +430,8 @@ void CodeBlock::printPutByIdOp(ExecState* exec, int location, Vector<Instruction int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] %s\t %s, %s, %s\n", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data()); + dataLog("[%4d] %s\t %s, %s, %s", location, op, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); it += 5; } @@ -650,52 +670,61 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int location = it - begin; switch (exec->interpreter()->getOpcodeID(it->u.opcode)) { case op_enter: { - dataLog("[%4d] enter\n", location); + dataLog("[%4d] enter", location); + dumpBytecodeCommentAndNewLine(location); break; } case op_create_activation: { int r0 = (++it)->u.operand; - dataLog("[%4d] create_activation %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] create_activation %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_create_arguments: { int r0 = (++it)->u.operand; - dataLog("[%4d] create_arguments\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] create_arguments\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_init_lazy_reg: { int r0 = (++it)->u.operand; - dataLog("[%4d] init_lazy_reg\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] init_lazy_reg\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_create_this: { int r0 = (++it)->u.operand; - dataLog("[%4d] create_this %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] create_this %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_convert_this: { int r0 = (++it)->u.operand; - dataLog("[%4d] convert_this\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] convert_this\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); ++it; // Skip value profile. break; } case op_new_object: { int r0 = (++it)->u.operand; - dataLog("[%4d] new_object\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] new_object\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_new_array: { int dst = (++it)->u.operand; int argv = (++it)->u.operand; int argc = (++it)->u.operand; - dataLog("[%4d] new_array\t %s, %s, %d\n", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc); + dataLog("[%4d] new_array\t %s, %s, %d", location, registerName(exec, dst).data(), registerName(exec, argv).data(), argc); + dumpBytecodeCommentAndNewLine(location); break; } case op_new_array_buffer: { int dst = (++it)->u.operand; int argv = (++it)->u.operand; int argc = (++it)->u.operand; - dataLog("[%4d] new_array_buffer %s, %d, %d\n", location, registerName(exec, dst).data(), argv, argc); + dataLog("[%4d] new_array_buffer %s, %d, %d", location, registerName(exec, dst).data(), argv, argc); + dumpBytecodeCommentAndNewLine(location); break; } case op_new_regexp: { @@ -703,15 +732,17 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int re0 = (++it)->u.operand; dataLog("[%4d] new_regexp\t %s, ", location, registerName(exec, r0).data()); if (r0 >=0 && r0 < (int)numberOfRegExps()) - dataLog("%s\n", regexpName(re0, regexp(re0)).data()); + dataLog("%s", regexpName(re0, regexp(re0)).data()); else - dataLog("bad_regexp(%d)\n", re0); + dataLog("bad_regexp(%d)", re0); + dumpBytecodeCommentAndNewLine(location); break; } case op_mov: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] mov\t\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dataLog("[%4d] mov\t\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_not: { @@ -760,12 +791,14 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& } case op_pre_inc: { int r0 = (++it)->u.operand; - dataLog("[%4d] pre_inc\t\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] pre_inc\t\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_pre_dec: { int r0 = (++it)->u.operand; - dataLog("[%4d] pre_dec\t\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] pre_dec\t\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_post_inc: { @@ -837,7 +870,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& } case op_check_has_instance: { int base = (++it)->u.operand; - dataLog("[%4d] check_has_instance\t\t %s\n", location, registerName(exec, base).data()); + dataLog("[%4d] check_has_instance\t\t %s", location, registerName(exec, base).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_instanceof: { @@ -845,7 +879,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; int r3 = (++it)->u.operand; - dataLog("[%4d] instanceof\t\t %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data()); + dataLog("[%4d] instanceof\t\t %s, %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_typeof: { @@ -883,7 +918,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& case op_resolve: { int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; - dataLog("[%4d] resolve\t\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dataLog("[%4d] resolve\t\t %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dumpBytecodeCommentAndNewLine(location); it++; break; } @@ -891,14 +927,16 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; int skipLevels = (++it)->u.operand; - dataLog("[%4d] resolve_skip\t %s, %s, %d\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), skipLevels); + dataLog("[%4d] resolve_skip\t %s, %s, %d", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), skipLevels); + dumpBytecodeCommentAndNewLine(location); it++; break; } case op_resolve_global: { int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; - dataLog("[%4d] resolve_global\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dataLog("[%4d] resolve_global\t %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dumpBytecodeCommentAndNewLine(location); it += 3; break; } @@ -908,7 +946,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& JSValue scope = JSValue((++it)->u.jsCell.get()); ++it; int depth = (++it)->u.operand; - dataLog("[%4d] resolve_global_dynamic\t %s, %s, %s, %d\n", location, registerName(exec, r0).data(), valueToSourceString(exec, scope).utf8().data(), idName(id0, m_identifiers[id0]).data(), depth); + dataLog("[%4d] resolve_global_dynamic\t %s, %s, %s, %d", location, registerName(exec, r0).data(), valueToSourceString(exec, scope).utf8().data(), idName(id0, m_identifiers[id0]).data(), depth); + dumpBytecodeCommentAndNewLine(location); ++it; break; } @@ -916,7 +955,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int index = (++it)->u.operand; int skipLevels = (++it)->u.operand; - dataLog("[%4d] get_scoped_var\t %s, %d, %d\n", location, registerName(exec, r0).data(), index, skipLevels); + dataLog("[%4d] get_scoped_var\t %s, %d, %d", location, registerName(exec, r0).data(), index, skipLevels); + dumpBytecodeCommentAndNewLine(location); it++; break; } @@ -924,20 +964,23 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int index = (++it)->u.operand; int skipLevels = (++it)->u.operand; int r0 = (++it)->u.operand; - dataLog("[%4d] put_scoped_var\t %d, %d, %s\n", location, index, skipLevels, registerName(exec, r0).data()); + dataLog("[%4d] put_scoped_var\t %d, %d, %s", location, index, skipLevels, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_get_global_var: { int r0 = (++it)->u.operand; WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer; - dataLog("[%4d] get_global_var\t %s, g%d(%p)\n", location, registerName(exec, r0).data(), m_globalObject->findRegisterIndex(registerPointer), registerPointer); + dataLog("[%4d] get_global_var\t %s, g%d(%p)", location, registerName(exec, r0).data(), m_globalObject->findRegisterIndex(registerPointer), registerPointer); + dumpBytecodeCommentAndNewLine(location); it++; break; } case op_get_global_var_watchable: { int r0 = (++it)->u.operand; WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer; - dataLog("[%4d] get_global_var_watchable\t %s, g%d(%p)\n", location, registerName(exec, r0).data(), m_globalObject->findRegisterIndex(registerPointer), registerPointer); + dataLog("[%4d] get_global_var_watchable\t %s, g%d(%p)", location, registerName(exec, r0).data(), m_globalObject->findRegisterIndex(registerPointer), registerPointer); + dumpBytecodeCommentAndNewLine(location); it++; it++; break; @@ -945,13 +988,15 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& case op_put_global_var: { WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer; int r0 = (++it)->u.operand; - dataLog("[%4d] put_global_var\t g%d(%p), %s\n", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data()); + dataLog("[%4d] put_global_var\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_put_global_var_check: { WriteBarrier<Unknown>* registerPointer = (++it)->u.registerPointer; int r0 = (++it)->u.operand; - dataLog("[%4d] put_global_var_check\t g%d(%p), %s\n", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data()); + dataLog("[%4d] put_global_var_check\t g%d(%p), %s", location, m_globalObject->findRegisterIndex(registerPointer), registerPointer, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); it++; it++; break; @@ -960,21 +1005,24 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; int isStrict = (++it)->u.operand; - dataLog("[%4d] resolve_base%s\t %s, %s\n", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dataLog("[%4d] resolve_base%s\t %s, %s", location, isStrict ? "_strict" : "", registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dumpBytecodeCommentAndNewLine(location); it++; break; } case op_ensure_property_exists: { int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; - dataLog("[%4d] ensure_property_exists\t %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dataLog("[%4d] ensure_property_exists\t %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_resolve_with_base: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int id0 = (++it)->u.operand; - dataLog("[%4d] resolve_with_base %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + dataLog("[%4d] resolve_with_base %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + dumpBytecodeCommentAndNewLine(location); it++; break; } @@ -982,11 +1030,13 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int id0 = (++it)->u.operand; - dataLog("[%4d] resolve_with_this %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + dataLog("[%4d] resolve_with_this %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + dumpBytecodeCommentAndNewLine(location); it++; break; } case op_get_by_id: + case op_get_by_id_out_of_line: case op_get_by_id_self: case op_get_by_id_proto: case op_get_by_id_chain: @@ -1001,7 +1051,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& case op_get_string_length: { printGetByIdOp(exec, location, it); printGetByIdCacheStatus(exec, location); - dataLog("\n"); + dumpBytecodeCommentAndNewLine(location); break; } case op_get_arguments_length: { @@ -1013,6 +1063,10 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& printPutByIdOp(exec, location, it, "put_by_id"); break; } + case op_put_by_id_out_of_line: { + printPutByIdOp(exec, location, it, "put_by_id_out_of_line"); + break; + } case op_put_by_id_replace: { printPutByIdOp(exec, location, it, "put_by_id_replace"); break; @@ -1025,10 +1079,18 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& printPutByIdOp(exec, location, it, "put_by_id_transition_direct"); break; } + case op_put_by_id_transition_direct_out_of_line: { + printPutByIdOp(exec, location, it, "put_by_id_transition_direct_out_of_line"); + break; + } case op_put_by_id_transition_normal: { printPutByIdOp(exec, location, it, "put_by_id_transition_normal"); break; } + case op_put_by_id_transition_normal_out_of_line: { + printPutByIdOp(exec, location, it, "put_by_id_transition_normal_out_of_line"); + break; + } case op_put_by_id_generic: { printPutByIdOp(exec, location, it, "put_by_id_generic"); break; @@ -1038,7 +1100,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int id0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - dataLog("[%4d] put_getter_setter\t %s, %s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dataLog("[%4d] put_getter_setter\t %s, %s, %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_method_check: { @@ -1070,7 +1133,7 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& dataLog(")"); } #endif - dataLog("\n"); + dumpBytecodeCommentAndNewLine(location); ++it; printGetByIdOp(exec, location, it); printGetByIdCacheStatus(exec, location); @@ -1081,14 +1144,16 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int id0 = (++it)->u.operand; - dataLog("[%4d] del_by_id\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + dataLog("[%4d] del_by_id\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), idName(id0, m_identifiers[id0]).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_get_by_val: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - dataLog("[%4d] get_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dataLog("[%4d] get_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dumpBytecodeCommentAndNewLine(location); it++; break; } @@ -1096,7 +1161,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - dataLog("[%4d] get_argument_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dataLog("[%4d] get_argument_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dumpBytecodeCommentAndNewLine(location); ++it; break; } @@ -1107,38 +1173,44 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r3 = (++it)->u.operand; int r4 = (++it)->u.operand; int r5 = (++it)->u.operand; - dataLog("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data()); + dataLog("[%4d] get_by_pname\t %s, %s, %s, %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), registerName(exec, r4).data(), registerName(exec, r5).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_put_by_val: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - dataLog("[%4d] put_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dataLog("[%4d] put_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_del_by_val: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int r2 = (++it)->u.operand; - dataLog("[%4d] del_by_val\t %s, %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dataLog("[%4d] del_by_val\t %s, %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_put_by_index: { int r0 = (++it)->u.operand; unsigned n0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] put_by_index\t %s, %u, %s\n", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data()); + dataLog("[%4d] put_by_index\t %s, %u, %s", location, registerName(exec, r0).data(), n0, registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_jmp: { int offset = (++it)->u.operand; - dataLog("[%4d] jmp\t\t %d(->%d)\n", location, offset, location + offset); + dataLog("[%4d] jmp\t\t %d(->%d)", location, offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_loop: { int offset = (++it)->u.operand; - dataLog("[%4d] loop\t\t %d(->%d)\n", location, offset, location + offset); + dataLog("[%4d] loop\t\t %d(->%d)", location, offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jtrue: { @@ -1169,129 +1241,148 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; void* pointer = (++it)->u.pointer; int offset = (++it)->u.operand; - dataLog("[%4d] jneq_ptr\t\t %s, %p, %d(->%d)\n", location, registerName(exec, r0).data(), pointer, offset, location + offset); + dataLog("[%4d] jneq_ptr\t\t %s, %p, %d(->%d)", location, registerName(exec, r0).data(), pointer, offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jless: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jlesseq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jgreater: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jgreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jgreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jgreatereq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jgreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jgreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jnless: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jnless\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jnless\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jnlesseq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jnlesseq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jnlesseq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jngreater: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jngreater\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jngreater\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_jngreatereq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jngreatereq\t\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] jngreatereq\t\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_loop_if_less: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] loop_if_less\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] loop_if_less\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_loop_if_lesseq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] loop_if_lesseq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] loop_if_lesseq\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_loop_if_greater: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] loop_if_greater\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] loop_if_greater\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_loop_if_greatereq: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] loop_if_greatereq\t %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dataLog("[%4d] loop_if_greatereq\t %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_loop_hint: { - dataLog("[%4d] loop_hint\n", location); + dataLog("[%4d] loop_hint", location); + dumpBytecodeCommentAndNewLine(location); break; } case op_switch_imm: { int tableIndex = (++it)->u.operand; int defaultTarget = (++it)->u.operand; int scrutineeRegister = (++it)->u.operand; - dataLog("[%4d] switch_imm\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + dataLog("[%4d] switch_imm\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_switch_char: { int tableIndex = (++it)->u.operand; int defaultTarget = (++it)->u.operand; int scrutineeRegister = (++it)->u.operand; - dataLog("[%4d] switch_char\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + dataLog("[%4d] switch_char\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_switch_string: { int tableIndex = (++it)->u.operand; int defaultTarget = (++it)->u.operand; int scrutineeRegister = (++it)->u.operand; - dataLog("[%4d] switch_string\t %d, %d(->%d), %s\n", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + dataLog("[%4d] switch_string\t %d, %d(->%d), %s", location, tableIndex, defaultTarget, location + defaultTarget, registerName(exec, scrutineeRegister).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_new_func: { int r0 = (++it)->u.operand; int f0 = (++it)->u.operand; int shouldCheck = (++it)->u.operand; - dataLog("[%4d] new_func\t\t %s, f%d, %s\n", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>"); + dataLog("[%4d] new_func\t\t %s, f%d, %s", location, registerName(exec, r0).data(), f0, shouldCheck ? "<Checked>" : "<Unchecked>"); + dumpBytecodeCommentAndNewLine(location); break; } case op_new_func_exp: { int r0 = (++it)->u.operand; int f0 = (++it)->u.operand; - dataLog("[%4d] new_func_exp\t %s, f%d\n", location, registerName(exec, r0).data(), f0); + dataLog("[%4d] new_func_exp\t %s, f%d", location, registerName(exec, r0).data(), f0); + dumpBytecodeCommentAndNewLine(location); break; } case op_call: { @@ -1307,35 +1398,41 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int thisValue = (++it)->u.operand; int arguments = (++it)->u.operand; int firstFreeRegister = (++it)->u.operand; - dataLog("[%4d] call_varargs\t %s, %s, %s, %d\n", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister); + dataLog("[%4d] call_varargs\t %s, %s, %s, %d", location, registerName(exec, callee).data(), registerName(exec, thisValue).data(), registerName(exec, arguments).data(), firstFreeRegister); + dumpBytecodeCommentAndNewLine(location); break; } case op_tear_off_activation: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] tear_off_activation\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dataLog("[%4d] tear_off_activation\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_tear_off_arguments: { int r0 = (++it)->u.operand; - dataLog("[%4d] tear_off_arguments %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] tear_off_arguments %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_ret: { int r0 = (++it)->u.operand; - dataLog("[%4d] ret\t\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] ret\t\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_call_put_result: { int r0 = (++it)->u.operand; - dataLog("[%4d] call_put_result\t\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] call_put_result\t\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); it++; break; } case op_ret_object_or_this: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] constructor_ret\t\t %s %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dataLog("[%4d] constructor_ret\t\t %s %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_construct: { @@ -1346,13 +1443,15 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; int count = (++it)->u.operand; - dataLog("[%4d] strcat\t\t %s, %s, %d\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count); + dataLog("[%4d] strcat\t\t %s, %s, %d", location, registerName(exec, r0).data(), registerName(exec, r1).data(), count); + dumpBytecodeCommentAndNewLine(location); break; } case op_to_primitive: { int r0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] to_primitive\t %s, %s\n", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dataLog("[%4d] to_primitive\t %s, %s", location, registerName(exec, r0).data(), registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_get_pnames: { @@ -1361,7 +1460,8 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int r2 = it[3].u.operand; int r3 = it[4].u.operand; int offset = it[5].u.operand; - dataLog("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset); + dataLog("[%4d] get_pnames\t %s, %s, %s, %s, %d(->%d)", location, registerName(exec, r0).data(), registerName(exec, r1).data(), registerName(exec, r2).data(), registerName(exec, r3).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); it += OPCODE_LENGTH(op_get_pnames) - 1; break; } @@ -1372,67 +1472,79 @@ void CodeBlock::dump(ExecState* exec, const Vector<Instruction>::const_iterator& int size = it[4].u.operand; int iter = it[5].u.operand; int offset = it[6].u.operand; - dataLog("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)\n", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset); + dataLog("[%4d] next_pname\t %s, %s, %s, %s, %s, %d(->%d)", location, registerName(exec, dest).data(), registerName(exec, base).data(), registerName(exec, i).data(), registerName(exec, size).data(), registerName(exec, iter).data(), offset, location + offset); + dumpBytecodeCommentAndNewLine(location); it += OPCODE_LENGTH(op_next_pname) - 1; break; } case op_push_scope: { int r0 = (++it)->u.operand; - dataLog("[%4d] push_scope\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] push_scope\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_pop_scope: { - dataLog("[%4d] pop_scope\n", location); + dataLog("[%4d] pop_scope", location); + dumpBytecodeCommentAndNewLine(location); break; } case op_push_new_scope: { int r0 = (++it)->u.operand; int id0 = (++it)->u.operand; int r1 = (++it)->u.operand; - dataLog("[%4d] push_new_scope \t%s, %s, %s\n", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data()); + dataLog("[%4d] push_new_scope \t%s, %s, %s", location, registerName(exec, r0).data(), idName(id0, m_identifiers[id0]).data(), registerName(exec, r1).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_jmp_scopes: { int scopeDelta = (++it)->u.operand; int offset = (++it)->u.operand; - dataLog("[%4d] jmp_scopes\t^%d, %d(->%d)\n", location, scopeDelta, offset, location + offset); + dataLog("[%4d] jmp_scopes\t^%d, %d(->%d)", location, scopeDelta, offset, location + offset); + dumpBytecodeCommentAndNewLine(location); break; } case op_catch: { int r0 = (++it)->u.operand; - dataLog("[%4d] catch\t\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] catch\t\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_throw: { int r0 = (++it)->u.operand; - dataLog("[%4d] throw\t\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] throw\t\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_throw_reference_error: { int k0 = (++it)->u.operand; - dataLog("[%4d] throw_reference_error\t %s\n", location, constantName(exec, k0, getConstant(k0)).data()); + dataLog("[%4d] throw_reference_error\t %s", location, constantName(exec, k0, getConstant(k0)).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_debug: { int debugHookID = (++it)->u.operand; int firstLine = (++it)->u.operand; int lastLine = (++it)->u.operand; - dataLog("[%4d] debug\t\t %s, %d, %d\n", location, debugHookName(debugHookID), firstLine, lastLine); + dataLog("[%4d] debug\t\t %s, %d, %d", location, debugHookName(debugHookID), firstLine, lastLine); + dumpBytecodeCommentAndNewLine(location); break; } case op_profile_will_call: { int function = (++it)->u.operand; - dataLog("[%4d] profile_will_call %s\n", location, registerName(exec, function).data()); + dataLog("[%4d] profile_will_call %s", location, registerName(exec, function).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_profile_did_call: { int function = (++it)->u.operand; - dataLog("[%4d] profile_did_call\t %s\n", location, registerName(exec, function).data()); + dataLog("[%4d] profile_did_call\t %s", location, registerName(exec, function).data()); + dumpBytecodeCommentAndNewLine(location); break; } case op_end: { int r0 = (++it)->u.operand; - dataLog("[%4d] end\t\t %s\n", location, registerName(exec, r0).data()); + dataLog("[%4d] end\t\t %s", location, registerName(exec, r0).data()); + dumpBytecodeCommentAndNewLine(location); break; } } @@ -1595,12 +1707,13 @@ CodeBlock::CodeBlock(CopyParsedBlockTag, CodeBlock& other, SymbolTable* symTab) , m_functionDecls(other.m_functionDecls) , m_functionExprs(other.m_functionExprs) , m_symbolTable(symTab) - , m_speculativeSuccessCounter(0) - , m_speculativeFailCounter(0) - , m_forcedOSRExitCounter(0) + , m_osrExitCounter(0) , m_optimizationDelayCounter(0) , m_reoptimizationRetryCounter(0) , m_lineInfo(other.m_lineInfo) +#if ENABLE(BYTECODE_COMMENTS) + , m_bytecodeCommentIterator(0) +#endif #if ENABLE(JIT) , m_canCompileWithDFGState(DFG::CapabilityLevelNotSet) #endif @@ -1649,10 +1762,12 @@ CodeBlock::CodeBlock(ScriptExecutable* ownerExecutable, CodeType codeType, JSGlo #endif , m_symbolTable(symTab) , m_alternative(alternative) - , m_speculativeSuccessCounter(0) - , m_speculativeFailCounter(0) + , m_osrExitCounter(0) , m_optimizationDelayCounter(0) , m_reoptimizationRetryCounter(0) +#if ENABLE(BYTECODE_COMMENTS) + , m_bytecodeCommentIterator(0) +#endif { ASSERT(m_source); @@ -1930,7 +2045,9 @@ void CodeBlock::finalizeUnconditionally() Instruction* curInstruction = &instructions()[m_propertyAccessInstructions[i]]; switch (interpreter->getOpcodeID(curInstruction[0].u.opcode)) { case op_get_by_id: + case op_get_by_id_out_of_line: case op_put_by_id: + case op_put_by_id_out_of_line: if (!curInstruction[4].u.structure || Heap::isMarked(curInstruction[4].u.structure.get())) break; if (verboseUnlinking) @@ -1940,6 +2057,8 @@ void CodeBlock::finalizeUnconditionally() break; case op_put_by_id_transition_direct: case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: if (Heap::isMarked(curInstruction[4].u.structure.get()) && Heap::isMarked(curInstruction[6].u.structure.get()) && Heap::isMarked(curInstruction[7].u.structureChain.get())) @@ -2127,6 +2246,82 @@ void CodeBlock::stronglyVisitWeakReferences(SlotVisitor& visitor) #endif } +#if ENABLE(BYTECODE_COMMENTS) +// Finds the comment string for the specified bytecode offset/PC is available. +const char* CodeBlock::commentForBytecodeOffset(unsigned bytecodeOffset) +{ + ASSERT(bytecodeOffset < instructions().size()); + + Vector<Comment>& comments = m_bytecodeComments; + size_t numberOfComments = comments.size(); + const char* result = 0; + + if (!numberOfComments) + return 0; // No comments to match with. + + // The next match is most likely the next comment in the list. + // Do a quick check to see if that is a match first. + // m_bytecodeCommentIterator should already be pointing to the + // next comment we should check. + + ASSERT(m_bytecodeCommentIterator < comments.size()); + + size_t i = m_bytecodeCommentIterator; + size_t commentPC = comments[i].pc; + if (commentPC == bytecodeOffset) { + // We've got a match. All done! + m_bytecodeCommentIterator = i; + result = comments[i].string; + } else if (commentPC > bytecodeOffset) { + // The current comment is already greater than the requested PC. + // Start searching from the first comment. + i = 0; + } else { + // Otherwise, the current comment's PC is less than the requested PC. + // Hence, we can just start searching from the next comment in the + // list. + i++; + } + + // If the result is still not found, do a linear search in the range + // that we've determined above. + if (!result) { + for (; i < comments.size(); ++i) { + commentPC = comments[i].pc; + if (commentPC == bytecodeOffset) { + result = comments[i].string; + break; + } + if (comments[i].pc > bytecodeOffset) { + // The current comment PC is already past the requested + // bytecodeOffset. Hence, there are no more possible + // matches. Just fail. + break; + } + } + } + + // Update the iterator to point to the next comment. + if (++i >= numberOfComments) { + // At most point to the last comment entry. This ensures that the + // next time we call this function, the quick checks will at least + // have one entry to check and can fail fast if appropriate. + i = numberOfComments - 1; + } + m_bytecodeCommentIterator = i; + return result; +} + +void CodeBlock::dumpBytecodeComments() +{ + Vector<Comment>& comments = m_bytecodeComments; + printf("Comments for codeblock %p: size %lu\n", this, comments.size()); + for (size_t i = 0; i < comments.size(); ++i) + printf(" pc %lu : '%s'\n", comments[i].pc, comments[i].string); + printf("End of comments for codeblock %p\n", this); +} +#endif // ENABLE_BYTECODE_COMMENTS + HandlerInfo* CodeBlock::handlerForBytecodeOffset(unsigned bytecodeOffset) { ASSERT(bytecodeOffset < instructions().size()); @@ -2303,6 +2498,8 @@ void CodeBlock::shrinkToFit(ShrinkMode shrinkMode) m_dfgData->speculationRecovery.shrinkToFit(); m_dfgData->weakReferences.shrinkToFit(); m_dfgData->transitions.shrinkToFit(); + m_dfgData->minifiedDFG.prepareAndShrink(); + m_dfgData->variableEventStream.shrinkToFit(); } #endif } @@ -2596,7 +2793,7 @@ bool CodeBlock::shouldOptimizeNow() dumpValueProfiles(); #endif - if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay) + if (m_optimizationDelayCounter >= Options::maximumOptimizationDelay()) return true; unsigned numberOfLiveNonArgumentValueProfiles; @@ -2607,9 +2804,9 @@ bool CodeBlock::shouldOptimizeNow() dataLog("Profile hotness: %lf, %lf\n", (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles(), (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / numberOfValueProfiles()); #endif - if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate) - && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate) - && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay) + if ((!numberOfValueProfiles() || (double)numberOfLiveNonArgumentValueProfiles / numberOfValueProfiles() >= Options::desiredProfileLivenessRate()) + && (!totalNumberOfValueProfiles() || (double)numberOfSamplesInProfiles / ValueProfile::numberOfBuckets / totalNumberOfValueProfiles() >= Options::desiredProfileFullnessRate()) + && static_cast<unsigned>(m_optimizationDelayCounter) + 1 >= Options::minimumOptimizationDelay()) return true; ASSERT(m_optimizationDelayCounter < std::numeric_limits<uint8_t>::max()); @@ -2670,7 +2867,7 @@ void CodeBlock::dumpValueProfiles() dataLog(" bc = %d: %u\n", profile->m_bytecodeOffset, profile->m_counter); } } -#endif +#endif // ENABLE(VERBOSE_VALUE_PROFILE) size_t CodeBlock::predictedMachineCodeSize() { diff --git a/Source/JavaScriptCore/bytecode/CodeBlock.h b/Source/JavaScriptCore/bytecode/CodeBlock.h index 4e4fee2b7..ed072f832 100644 --- a/Source/JavaScriptCore/bytecode/CodeBlock.h +++ b/Source/JavaScriptCore/bytecode/CodeBlock.h @@ -35,12 +35,15 @@ #include "CallReturnOffsetToBytecodeOffset.h" #include "CodeOrigin.h" #include "CodeType.h" +#include "Comment.h" #include "CompactJITCodeMap.h" #include "DFGCodeBlocks.h" #include "DFGCommon.h" #include "DFGExitProfile.h" +#include "DFGMinifiedGraph.h" #include "DFGOSREntry.h" #include "DFGOSRExit.h" +#include "DFGVariableEventStream.h" #include "EvalCodeCache.h" #include "ExecutionCounter.h" #include "ExpressionRangeInfo.h" @@ -66,10 +69,35 @@ #include <wtf/RefCountedArray.h> #include <wtf/FastAllocBase.h> #include <wtf/PassOwnPtr.h> +#include <wtf/Platform.h> #include <wtf/RefPtr.h> #include <wtf/SegmentedVector.h> #include <wtf/Vector.h> +// Set ENABLE_BYTECODE_COMMENTS to 1 to enable recording bytecode generator +// comments for the bytecodes that it generates. This will allow +// CodeBlock::dump() to provide some contextual info about the bytecodes. +// +// The way this comment system works is as follows: +// 1. The BytecodeGenerator calls prependComment() with a constant comment +// string in .text. The string must not be a stack or heap allocated +// string. +// 2. When the BytecodeGenerator's emitOpcode() is called, the last +// prepended comment will be recorded with the PC of the opcode being +// emitted. This comment is being recorded in the CodeBlock's +// m_bytecodeComments. +// 3. When CodeBlock::dump() is called, it will pair up the comments with +// their corresponding bytecodes based on the bytecode and comment's +// PC. If a matching pair is found, the comment will be printed after +// the bytecode. If not, no comment is printed. +// +// NOTE: Enabling this will consume additional memory at runtime to store +// the comments. Since these comments are only useful for VM debugging +// (as opposed to app debugging), this feature is to be disabled by default, +// and can be enabled as needed for VM development use only. + +#define ENABLE_BYTECODE_COMMENTS 0 + namespace JSC { class DFGCodeBlocks; @@ -155,6 +183,12 @@ namespace JSC { return index >= m_numVars; } + void dumpBytecodeCommentAndNewLine(int location); +#if ENABLE(BYTECODE_COMMENTS) + const char* commentForBytecodeOffset(unsigned bytecodeOffset); + void dumpBytecodeComments(); +#endif + HandlerInfo* handlerForBytecodeOffset(unsigned bytecodeOffset); int lineNumberForBytecodeOffset(unsigned bytecodeOffset); void expressionRangeForBytecodeOffset(unsigned bytecodeOffset, int& divot, int& startOffset, int& endOffset); @@ -350,6 +384,18 @@ namespace JSC { m_dfgData->transitions.append( WeakReferenceTransition(*globalData(), ownerExecutable(), codeOrigin, from, to)); } + + DFG::MinifiedGraph& minifiedDFG() + { + createDFGDataIfNecessary(); + return m_dfgData->minifiedDFG; + } + + DFG::VariableEventStream& variableEventStream() + { + createDFGDataIfNecessary(); + return m_dfgData->variableEventStream; + } #endif unsigned bytecodeOffset(Instruction* returnAddress) @@ -365,6 +411,10 @@ namespace JSC { RefCountedArray<Instruction>& instructions() { return m_instructions; } const RefCountedArray<Instruction>& instructions() const { return m_instructions; } +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment>& bytecodeComments() { return m_bytecodeComments; } +#endif + size_t predictedMachineCodeSize(); bool usesOpcode(OpcodeID); @@ -635,7 +685,7 @@ namespace JSC { if (!numberOfRareCaseProfiles()) return false; unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); } bool couldTakeSlowCase(int bytecodeOffset) @@ -643,7 +693,7 @@ namespace JSC { if (!numberOfRareCaseProfiles()) return false; unsigned value = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - return value >= Options::couldTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold; + return value >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold(); } RareCaseProfile* addSpecialFastCaseProfile(int bytecodeOffset) @@ -663,7 +713,15 @@ namespace JSC { if (!numberOfRareCaseProfiles()) return false; unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; - return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return specialFastCaseCount >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); + } + + bool couldTakeSpecialFastCase(int bytecodeOffset) + { + if (!numberOfRareCaseProfiles()) + return false; + unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; + return specialFastCaseCount >= Options::couldTakeSlowCaseMinimumCount() && static_cast<double>(specialFastCaseCount) / m_executionEntryCount >= Options::couldTakeSlowCaseThreshold(); } bool likelyToTakeDeepestSlowCase(int bytecodeOffset) @@ -673,7 +731,7 @@ namespace JSC { unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned value = slowCaseCount - specialFastCaseCount; - return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); } bool likelyToTakeAnySlowCase(int bytecodeOffset) @@ -683,7 +741,7 @@ namespace JSC { unsigned slowCaseCount = rareCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned specialFastCaseCount = specialFastCaseProfileForBytecodeOffset(bytecodeOffset)->m_counter; unsigned value = slowCaseCount + specialFastCaseCount; - return value >= Options::likelyToTakeSlowCaseMinimumCount && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold; + return value >= Options::likelyToTakeSlowCaseMinimumCount() && static_cast<double>(value) / m_executionEntryCount >= Options::likelyToTakeSlowCaseThreshold(); } unsigned executionEntryCount() const { return m_executionEntryCount; } @@ -905,12 +963,12 @@ namespace JSC { void jitAfterWarmUp() { - m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp, this); + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITAfterWarmUp(), this); } void jitSoon() { - m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon, this); + m_llintExecuteCounter.setNewThreshold(Options::thresholdForJITSoon(), this); } const ExecutionCounter& llintExecuteCounter() const @@ -941,25 +999,25 @@ namespace JSC { // to avoid thrashing. unsigned reoptimizationRetryCounter() const { - ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax); + ASSERT(m_reoptimizationRetryCounter <= Options::reoptimizationRetryCounterMax()); return m_reoptimizationRetryCounter; } void countReoptimization() { m_reoptimizationRetryCounter++; - if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax) - m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax; + if (m_reoptimizationRetryCounter > Options::reoptimizationRetryCounterMax()) + m_reoptimizationRetryCounter = Options::reoptimizationRetryCounterMax(); } int32_t counterValueForOptimizeAfterWarmUp() { - return Options::thresholdForOptimizeAfterWarmUp << reoptimizationRetryCounter(); + return Options::thresholdForOptimizeAfterWarmUp() << reoptimizationRetryCounter(); } int32_t counterValueForOptimizeAfterLongWarmUp() { - return Options::thresholdForOptimizeAfterLongWarmUp << reoptimizationRetryCounter(); + return Options::thresholdForOptimizeAfterLongWarmUp() << reoptimizationRetryCounter(); } int32_t* addressOfJITExecuteCounter() @@ -1039,62 +1097,51 @@ namespace JSC { // in the baseline code. void optimizeSoon() { - m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon << reoptimizationRetryCounter(), this); + m_jitExecuteCounter.setNewThreshold(Options::thresholdForOptimizeSoon() << reoptimizationRetryCounter(), this); } - // The speculative JIT tracks its success rate, so that we can - // decide when to reoptimize. It's interesting to note that these - // counters may overflow without any protection. The success - // counter will overflow before the fail one does, becuase the - // fail one is used as a trigger to reoptimize. So the worst case - // is that the success counter overflows and we reoptimize without - // needing to. But this is harmless. If a method really did - // execute 2^32 times then compiling it again probably won't hurt - // anyone. + uint32_t osrExitCounter() const { return m_osrExitCounter; } - void countSpeculationSuccess() - { - m_speculativeSuccessCounter++; + void countOSRExit() { m_osrExitCounter++; } + + uint32_t* addressOfOSRExitCounter() { return &m_osrExitCounter; } + + static ptrdiff_t offsetOfOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_osrExitCounter); } + +#if ENABLE(JIT) + uint32_t adjustedExitCountThreshold(uint32_t desiredThreshold) + { + ASSERT(getJITType() == JITCode::DFGJIT); + // Compute this the lame way so we don't saturate. This is called infrequently + // enough that this loop won't hurt us. + unsigned result = desiredThreshold; + for (unsigned n = baselineVersion()->reoptimizationRetryCounter(); n--;) { + unsigned newResult = result << 1; + if (newResult < result) + return std::numeric_limits<uint32_t>::max(); + result = newResult; + } + return result; } - void countSpeculationFailure() + uint32_t exitCountThresholdForReoptimization() { - m_speculativeFailCounter++; + return adjustedExitCountThreshold(Options::osrExitCountForReoptimization()); } - uint32_t speculativeSuccessCounter() const { return m_speculativeSuccessCounter; } - uint32_t speculativeFailCounter() const { return m_speculativeFailCounter; } - uint32_t forcedOSRExitCounter() const { return m_forcedOSRExitCounter; } - - uint32_t* addressOfSpeculativeSuccessCounter() { return &m_speculativeSuccessCounter; } - uint32_t* addressOfSpeculativeFailCounter() { return &m_speculativeFailCounter; } - uint32_t* addressOfForcedOSRExitCounter() { return &m_forcedOSRExitCounter; } - - static ptrdiff_t offsetOfSpeculativeSuccessCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeSuccessCounter); } - static ptrdiff_t offsetOfSpeculativeFailCounter() { return OBJECT_OFFSETOF(CodeBlock, m_speculativeFailCounter); } - static ptrdiff_t offsetOfForcedOSRExitCounter() { return OBJECT_OFFSETOF(CodeBlock, m_forcedOSRExitCounter); } - -#if ENABLE(JIT) - // The number of failures that triggers the use of the ratio. - unsigned largeFailCountThreshold() { return Options::largeFailCountThresholdBase << baselineVersion()->reoptimizationRetryCounter(); } - unsigned largeFailCountThresholdForLoop() { return Options::largeFailCountThresholdBaseForLoop << baselineVersion()->reoptimizationRetryCounter(); } + uint32_t exitCountThresholdForReoptimizationFromLoop() + { + return adjustedExitCountThreshold(Options::osrExitCountForReoptimizationFromLoop()); + } bool shouldReoptimizeNow() { - return (Options::desiredSpeculativeSuccessFailRatio * - speculativeFailCounter() >= speculativeSuccessCounter() - && speculativeFailCounter() >= largeFailCountThreshold()) - || forcedOSRExitCounter() >= - Options::forcedOSRExitCountForReoptimization; + return osrExitCounter() >= exitCountThresholdForReoptimization(); } - + bool shouldReoptimizeFromLoopNow() { - return (Options::desiredSpeculativeSuccessFailRatio * - speculativeFailCounter() >= speculativeSuccessCounter() - && speculativeFailCounter() >= largeFailCountThresholdForLoop()) - || forcedOSRExitCounter() >= - Options::forcedOSRExitCountForReoptimization; + return osrExitCounter() >= exitCountThresholdForReoptimizationFromLoop(); } #endif @@ -1255,6 +1302,8 @@ namespace JSC { SegmentedVector<Watchpoint, 1, 0> watchpoints; Vector<WeakReferenceTransition> transitions; Vector<WriteBarrier<JSCell> > weakReferences; + DFG::VariableEventStream variableEventStream; + DFG::MinifiedGraph minifiedDFG; bool mayBeExecuting; bool isJettisoned; bool livenessHasBeenProved; // Initialized and used on every GC. @@ -1295,13 +1344,15 @@ namespace JSC { ExecutionCounter m_jitExecuteCounter; int32_t m_totalJITExecutions; - uint32_t m_speculativeSuccessCounter; - uint32_t m_speculativeFailCounter; - uint32_t m_forcedOSRExitCounter; + uint32_t m_osrExitCounter; uint16_t m_optimizationDelayCounter; uint16_t m_reoptimizationRetryCounter; Vector<LineInfo> m_lineInfo; +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment> m_bytecodeComments; + size_t m_bytecodeCommentIterator; +#endif struct RareData { WTF_MAKE_FAST_ALLOCATED; diff --git a/Source/JavaScriptCore/bytecode/Comment.h b/Source/JavaScriptCore/bytecode/Comment.h new file mode 100644 index 000000000..c28f3a068 --- /dev/null +++ b/Source/JavaScriptCore/bytecode/Comment.h @@ -0,0 +1,37 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE + * DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS BE LIABLE FOR ANY + * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; + * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON + * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS + * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef Comment_h +#define Comment_h + +namespace JSC { + +struct Comment { + size_t pc; + const char* string; +}; + +} // namespace JSC + +#endif // Comment_h diff --git a/Source/JavaScriptCore/bytecode/DataFormat.h b/Source/JavaScriptCore/bytecode/DataFormat.h index 4f015486a..51c8afbf6 100644 --- a/Source/JavaScriptCore/bytecode/DataFormat.h +++ b/Source/JavaScriptCore/bytecode/DataFormat.h @@ -47,7 +47,14 @@ enum DataFormat { DataFormatJSInteger = DataFormatJS | DataFormatInteger, DataFormatJSDouble = DataFormatJS | DataFormatDouble, DataFormatJSCell = DataFormatJS | DataFormatCell, - DataFormatJSBoolean = DataFormatJS | DataFormatBoolean + DataFormatJSBoolean = DataFormatJS | DataFormatBoolean, + + // Marker deliminating ordinary data formats and OSR-only data formats. + DataFormatOSRMarker = 32, + + // Special data formats used only for OSR. + DataFormatDead = 33, // Implies jsUndefined(). + DataFormatArguments = 34 // Implies that the arguments object must be reified. }; inline const char* dataFormatToString(DataFormat dataFormat) diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp index 1f2e8260a..12a404981 100644 --- a/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.cpp @@ -144,13 +144,7 @@ bool ExecutionCounter::setThreshold(CodeBlock* codeBlock) return true; } - int32_t maxThreshold; - if (Options::randomizeExecutionCountsBetweenCheckpoints) - maxThreshold = codeBlock->globalObject()->weakRandomInteger() % Options::maximumExecutionCountsBetweenCheckpoints; - else - maxThreshold = Options::maximumExecutionCountsBetweenCheckpoints; - if (threshold > maxThreshold) - threshold = maxThreshold; + threshold = clippedThreshold(codeBlock->globalObject(), threshold); m_counter = static_cast<int32_t>(-threshold); diff --git a/Source/JavaScriptCore/bytecode/ExecutionCounter.h b/Source/JavaScriptCore/bytecode/ExecutionCounter.h index 1c0d23f0f..f40650a31 100644 --- a/Source/JavaScriptCore/bytecode/ExecutionCounter.h +++ b/Source/JavaScriptCore/bytecode/ExecutionCounter.h @@ -26,6 +26,8 @@ #ifndef ExecutionCounter_h #define ExecutionCounter_h +#include "JSGlobalObject.h" +#include "Options.h" #include <wtf/SimpleStats.h> namespace JSC { @@ -42,6 +44,18 @@ public: const char* status() const; static double applyMemoryUsageHeuristics(int32_t value, CodeBlock*); static int32_t applyMemoryUsageHeuristicsAndConvertToInt(int32_t value, CodeBlock*); + template<typename T> + static T clippedThreshold(JSGlobalObject* globalObject, T threshold) + { + int32_t maxThreshold; + if (Options::randomizeExecutionCountsBetweenCheckpoints()) + maxThreshold = globalObject->weakRandomInteger() % Options::maximumExecutionCountsBetweenCheckpoints(); + else + maxThreshold = Options::maximumExecutionCountsBetweenCheckpoints(); + if (threshold > maxThreshold) + threshold = maxThreshold; + return threshold; + } static int32_t formattedTotalCount(float value) { diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp index a62a43f7f..cb3e8e8b6 100644 --- a/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.cpp @@ -48,9 +48,9 @@ GetByIdStatus GetByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned unsigned attributesIgnored; JSCell* specificValue; - size_t offset = structure->get( + PropertyOffset offset = structure->get( *profiledBlock->globalData(), ident, attributesIgnored, specificValue); - if (offset == notFound) + if (!isValidOffset(offset)) return GetByIdStatus(NoInformation, false); return GetByIdStatus(Simple, false, StructureSet(structure), offset, specificValue); @@ -88,7 +88,7 @@ void GetByIdStatus::computeForChain(GetByIdStatus& result, CodeBlock* profiledBl result.m_offset = currentStructure->get( *profiledBlock->globalData(), ident, attributesIgnored, specificValue); - if (result.m_offset == notFound) + if (!isValidOffset(result.m_offset)) return; result.m_structureSet.add(structure); @@ -156,12 +156,12 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec result.m_offset = structure->get( *profiledBlock->globalData(), ident, attributesIgnored, specificValue); - if (result.m_offset != notFound) { + if (isValidOffset(result.m_offset)) { result.m_structureSet.add(structure); result.m_specificValue = JSValue(specificValue); } - if (result.m_offset != notFound) + if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } @@ -176,11 +176,11 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec unsigned attributesIgnored; JSCell* specificValue; - size_t myOffset = structure->get( + PropertyOffset myOffset = structure->get( *profiledBlock->globalData(), ident, attributesIgnored, specificValue); - if (myOffset == notFound) { - result.m_offset = notFound; + if (!isValidOffset(myOffset)) { + result.m_offset = invalidOffset; break; } @@ -188,7 +188,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec result.m_offset = myOffset; result.m_specificValue = JSValue(specificValue); } else if (result.m_offset != myOffset) { - result.m_offset = notFound; + result.m_offset = invalidOffset; break; } else if (result.m_specificValue != JSValue(specificValue)) result.m_specificValue = JSValue(); @@ -196,7 +196,7 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec result.m_structureSet.add(structure); } - if (result.m_offset != notFound) + if (isValidOffset(result.m_offset)) ASSERT(result.m_structureSet.size()); break; } @@ -223,11 +223,11 @@ GetByIdStatus GetByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec } default: - ASSERT(result.m_offset == notFound); + ASSERT(!isValidOffset(result.m_offset)); break; } - if (result.m_offset == notFound) { + if (!isValidOffset(result.m_offset)) { result.m_state = TakesSlowPath; result.m_structureSet.clear(); result.m_chain.clear(); diff --git a/Source/JavaScriptCore/bytecode/GetByIdStatus.h b/Source/JavaScriptCore/bytecode/GetByIdStatus.h index 42eadfd68..297ec335f 100644 --- a/Source/JavaScriptCore/bytecode/GetByIdStatus.h +++ b/Source/JavaScriptCore/bytecode/GetByIdStatus.h @@ -26,6 +26,7 @@ #ifndef GetByIdStatus_h #define GetByIdStatus_h +#include "PropertyOffset.h" #include "StructureSet.h" #include <wtf/NotFound.h> @@ -46,13 +47,13 @@ public: GetByIdStatus() : m_state(NoInformation) - , m_offset(notFound) + , m_offset(invalidOffset) { } GetByIdStatus( State state, bool wasSeenInJIT, const StructureSet& structureSet = StructureSet(), - size_t offset = notFound, JSValue specificValue = JSValue(), Vector<Structure*> chain = Vector<Structure*>()) + size_t offset = invalidOffset, JSValue specificValue = JSValue(), Vector<Structure*> chain = Vector<Structure*>()) : m_state(state) , m_structureSet(structureSet) , m_chain(chain) @@ -76,7 +77,7 @@ public: const StructureSet& structureSet() const { return m_structureSet; } const Vector<Structure*>& chain() const { return m_chain; } // Returns empty vector if this is a direct access. JSValue specificValue() const { return m_specificValue; } // Returns JSValue() if there is no specific value. - size_t offset() const { return m_offset; } + PropertyOffset offset() const { return m_offset; } bool wasSeenInJIT() const { return m_wasSeenInJIT; } @@ -88,7 +89,7 @@ private: StructureSet m_structureSet; Vector<Structure*> m_chain; JSValue m_specificValue; - size_t m_offset; + PropertyOffset m_offset; bool m_wasSeenInJIT; }; diff --git a/Source/JavaScriptCore/bytecode/Instruction.h b/Source/JavaScriptCore/bytecode/Instruction.h index 6e76512ff..2e94b452c 100644 --- a/Source/JavaScriptCore/bytecode/Instruction.h +++ b/Source/JavaScriptCore/bytecode/Instruction.h @@ -29,6 +29,7 @@ #ifndef Instruction_h #define Instruction_h +#include "JITStubRoutine.h" #include "MacroAssembler.h" #include "Opcode.h" #include "PropertySlot.h" @@ -52,8 +53,6 @@ namespace JSC { struct ValueProfile; #if ENABLE(JIT) - typedef MacroAssemblerCodeRef PolymorphicAccessStructureListStubRoutineType; - // Structure used by op_get_by_id_self_list and op_get_by_id_proto_list instruction to hold data off the main opcode stream. struct PolymorphicAccessStructureList { WTF_MAKE_FAST_ALLOCATED; @@ -61,7 +60,7 @@ namespace JSC { struct PolymorphicStubInfo { bool isChain; bool isDirect; - PolymorphicAccessStructureListStubRoutineType stubRoutine; + RefPtr<JITStubRoutine> stubRoutine; WriteBarrier<Structure> base; union { WriteBarrierBase<Structure> proto; @@ -73,7 +72,7 @@ namespace JSC { u.proto.clear(); } - void set(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, bool isDirect) + void set(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, bool isDirect) { stubRoutine = _stubRoutine; base.set(globalData, owner, _base); @@ -82,7 +81,7 @@ namespace JSC { this->isDirect = isDirect; } - void set(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, Structure* _proto, bool isDirect) + void set(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, Structure* _proto, bool isDirect) { stubRoutine = _stubRoutine; base.set(globalData, owner, _base); @@ -91,7 +90,7 @@ namespace JSC { this->isDirect = isDirect; } - void set(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType _stubRoutine, Structure* _base, StructureChain* _chain, bool isDirect) + void set(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> _stubRoutine, Structure* _base, StructureChain* _chain, bool isDirect) { stubRoutine = _stubRoutine; base.set(globalData, owner, _base); @@ -105,17 +104,17 @@ namespace JSC { { } - PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, bool isDirect) + PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, bool isDirect) { list[0].set(globalData, owner, stubRoutine, firstBase, isDirect); } - PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect) + PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, Structure* firstProto, bool isDirect) { list[0].set(globalData, owner, stubRoutine, firstBase, firstProto, isDirect); } - PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PolymorphicAccessStructureListStubRoutineType stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect) + PolymorphicAccessStructureList(JSGlobalData& globalData, JSCell* owner, PassRefPtr<JITStubRoutine> stubRoutine, Structure* firstBase, StructureChain* firstChain, bool isDirect) { list[0].set(globalData, owner, stubRoutine, firstBase, firstChain, isDirect); } diff --git a/Source/JavaScriptCore/bytecode/Opcode.h b/Source/JavaScriptCore/bytecode/Opcode.h index e0cff165a..14cefb9d9 100644 --- a/Source/JavaScriptCore/bytecode/Opcode.h +++ b/Source/JavaScriptCore/bytecode/Opcode.h @@ -108,6 +108,7 @@ namespace JSC { macro(op_resolve_with_base, 5) /* has value profiling */ \ macro(op_resolve_with_this, 5) /* has value profiling */ \ macro(op_get_by_id, 9) /* has value profiling */ \ + macro(op_get_by_id_out_of_line, 9) /* has value profiling */ \ macro(op_get_by_id_self, 9) /* has value profiling */ \ macro(op_get_by_id_proto, 9) /* has value profiling */ \ macro(op_get_by_id_chain, 9) /* has value profiling */ \ @@ -122,9 +123,12 @@ namespace JSC { macro(op_get_string_length, 9) /* has value profiling */ \ macro(op_get_arguments_length, 4) \ macro(op_put_by_id, 9) \ + macro(op_put_by_id_out_of_line, 9) \ macro(op_put_by_id_transition, 9) \ macro(op_put_by_id_transition_direct, 9) \ + macro(op_put_by_id_transition_direct_out_of_line, 9) \ macro(op_put_by_id_transition_normal, 9) \ + macro(op_put_by_id_transition_normal_out_of_line, 9) \ macro(op_put_by_id_replace, 9) \ macro(op_put_by_id_generic, 9) \ macro(op_del_by_id, 4) \ diff --git a/Source/JavaScriptCore/bytecode/Operands.h b/Source/JavaScriptCore/bytecode/Operands.h index 05a24d0fd..8ea3e5b60 100644 --- a/Source/JavaScriptCore/bytecode/Operands.h +++ b/Source/JavaScriptCore/bytecode/Operands.h @@ -115,6 +115,13 @@ public: const T& operand(int operand) const { return const_cast<const T&>(const_cast<Operands*>(this)->operand(operand)); } + bool hasOperand(int operand) const + { + if (operandIsArgument(operand)) + return true; + return static_cast<size_t>(operand) < numberOfLocals(); + } + void setOperand(int operand, const T& value) { if (operandIsArgument(operand)) { @@ -126,6 +133,39 @@ public: setLocal(operand, value); } + size_t size() const { return numberOfArguments() + numberOfLocals(); } + const T& at(size_t index) const + { + if (index < numberOfArguments()) + return m_arguments[index]; + return m_locals[index - numberOfArguments()]; + } + T& at(size_t index) + { + if (index < numberOfArguments()) + return m_arguments[index]; + return m_locals[index - numberOfArguments()]; + } + const T& operator[](size_t index) const { return at(index); } + T& operator[](size_t index) { return at(index); } + + bool isArgument(size_t index) const { return index < numberOfArguments(); } + bool isVariable(size_t index) const { return !isArgument(index); } + int argumentForIndex(size_t index) const + { + return index; + } + int variableForIndex(size_t index) const + { + return index - m_arguments.size(); + } + int operandForIndex(size_t index) const + { + if (index < numberOfArguments()) + return argumentToOperand(index); + return index - numberOfArguments(); + } + void setOperandFirstTime(int operand, const T& value) { if (operandIsArgument(operand)) { @@ -165,6 +205,16 @@ void dumpOperands(Operands<T, Traits>& operands, FILE* out) } } +template<typename T, typename Traits> +void dumpOperands(const Operands<T, Traits>& operands, FILE* out) +{ + // Use const-cast because: + // 1) I don't feel like writing this code twice, and + // 2) Some dump() methods may not be const, and I don't really care if that's + // the case. + dumpOperands(*const_cast<Operands<T, Traits>*>(&operands), out); +} + } // namespace JSC #endif // Operands_h diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp index 170615b73..3a87567d8 100644 --- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp +++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.cpp @@ -42,7 +42,7 @@ PutByIdAccess PutByIdAccess::fromStructureStubInfo( case access_put_by_id_replace: result.m_type = Replace; result.m_oldStructure.copyFrom(stubInfo.u.putByIdReplace.baseObjectStructure); - result.m_stubRoutine = MacroAssemblerCodeRef::createSelfManagedCodeRef(initialSlowPath); + result.m_stubRoutine = JITStubRoutine::createSelfManagedRoutine(initialSlowPath); break; case access_put_by_id_transition_direct: diff --git a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h index 60b632d52..4a20b6d1c 100644 --- a/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h +++ b/Source/JavaScriptCore/bytecode/PolymorphicPutByIdList.h @@ -60,7 +60,7 @@ public: Structure* oldStructure, Structure* newStructure, StructureChain* chain, - MacroAssemblerCodeRef stubRoutine) + PassRefPtr<JITStubRoutine> stubRoutine) { PutByIdAccess result; result.m_type = Transition; @@ -75,7 +75,7 @@ public: JSGlobalData& globalData, JSCell* owner, Structure* structure, - MacroAssemblerCodeRef stubRoutine) + PassRefPtr<JITStubRoutine> stubRoutine) { PutByIdAccess result; result.m_type = Replace; @@ -123,7 +123,7 @@ public: return m_chain.get(); } - MacroAssemblerCodeRef stubRoutine() const + PassRefPtr<JITStubRoutine> stubRoutine() const { ASSERT(isTransition() || isReplace()); return m_stubRoutine; @@ -136,7 +136,7 @@ private: WriteBarrier<Structure> m_oldStructure; WriteBarrier<Structure> m_newStructure; WriteBarrier<StructureChain> m_chain; - MacroAssemblerCodeRef m_stubRoutine; + RefPtr<JITStubRoutine> m_stubRoutine; }; class PolymorphicPutByIdList { @@ -161,7 +161,7 @@ public: MacroAssemblerCodePtr currentSlowPathTarget() const { - return m_list.last().stubRoutine().code(); + return m_list.last().stubRoutine()->code().code(); } void addAccess(const PutByIdAccess&); diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp index 3715606fe..e9456313a 100644 --- a/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp +++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.cpp @@ -43,12 +43,13 @@ PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned Structure* structure = instruction[4].u.structure.get(); if (!structure) - return PutByIdStatus(NoInformation, 0, 0, 0, notFound); + return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); - if (instruction[0].u.opcode == llint_op_put_by_id) { - size_t offset = structure->get(*profiledBlock->globalData(), ident); - if (offset == notFound) - return PutByIdStatus(NoInformation, 0, 0, 0, notFound); + if (instruction[0].u.opcode == llint_op_put_by_id + || instruction[0].u.opcode == llint_op_put_by_id_out_of_line) { + PropertyOffset offset = structure->get(*profiledBlock->globalData(), ident); + if (!isValidOffset(offset)) + return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); return PutByIdStatus(SimpleReplace, structure, 0, 0, offset); } @@ -56,20 +57,22 @@ PutByIdStatus PutByIdStatus::computeFromLLInt(CodeBlock* profiledBlock, unsigned ASSERT(structure->transitionWatchpointSetHasBeenInvalidated()); ASSERT(instruction[0].u.opcode == llint_op_put_by_id_transition_direct - || instruction[0].u.opcode == llint_op_put_by_id_transition_normal); + || instruction[0].u.opcode == llint_op_put_by_id_transition_normal + || instruction[0].u.opcode == llint_op_put_by_id_transition_direct_out_of_line + || instruction[0].u.opcode == llint_op_put_by_id_transition_normal_out_of_line); Structure* newStructure = instruction[6].u.structure.get(); StructureChain* chain = instruction[7].u.structureChain.get(); ASSERT(newStructure); ASSERT(chain); - size_t offset = newStructure->get(*profiledBlock->globalData(), ident); - if (offset == notFound) - return PutByIdStatus(NoInformation, 0, 0, 0, notFound); + PropertyOffset offset = newStructure->get(*profiledBlock->globalData(), ident); + if (!isValidOffset(offset)) + return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); return PutByIdStatus(SimpleTransition, structure, newStructure, chain, offset); #else - return PutByIdStatus(NoInformation, 0, 0, 0, notFound); + return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); #endif } @@ -83,7 +86,7 @@ PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec return computeFromLLInt(profiledBlock, bytecodeIndex, ident); if (profiledBlock->likelyToTakeSlowCase(bytecodeIndex)) - return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); + return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); StructureStubInfo& stubInfo = profiledBlock->getStubInfo(bytecodeIndex); if (!stubInfo.seen) @@ -94,24 +97,24 @@ PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec return computeFromLLInt(profiledBlock, bytecodeIndex, ident); case access_put_by_id_replace: { - size_t offset = stubInfo.u.putByIdReplace.baseObjectStructure->get( + PropertyOffset offset = stubInfo.u.putByIdReplace.baseObjectStructure->get( *profiledBlock->globalData(), ident); - if (offset != notFound) { + if (isValidOffset(offset)) { return PutByIdStatus( SimpleReplace, stubInfo.u.putByIdReplace.baseObjectStructure.get(), 0, 0, offset); } - return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); + return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } case access_put_by_id_transition_normal: case access_put_by_id_transition_direct: { ASSERT(stubInfo.u.putByIdTransition.previousStructure->transitionWatchpointSetHasBeenInvalidated()); - size_t offset = stubInfo.u.putByIdTransition.structure->get( + PropertyOffset offset = stubInfo.u.putByIdTransition.structure->get( *profiledBlock->globalData(), ident); - if (offset != notFound) { + if (isValidOffset(offset)) { return PutByIdStatus( SimpleTransition, stubInfo.u.putByIdTransition.previousStructure.get(), @@ -119,14 +122,14 @@ PutByIdStatus PutByIdStatus::computeFor(CodeBlock* profiledBlock, unsigned bytec stubInfo.u.putByIdTransition.chain.get(), offset); } - return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); + return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } default: - return PutByIdStatus(TakesSlowPath, 0, 0, 0, notFound); + return PutByIdStatus(TakesSlowPath, 0, 0, 0, invalidOffset); } #else // ENABLE(JIT) - return PutByIdStatus(NoInformation, 0, 0, 0, notFound); + return PutByIdStatus(NoInformation, 0, 0, 0, invalidOffset); #endif // ENABLE(JIT) } diff --git a/Source/JavaScriptCore/bytecode/PutByIdStatus.h b/Source/JavaScriptCore/bytecode/PutByIdStatus.h index a6d95a449..694915244 100644 --- a/Source/JavaScriptCore/bytecode/PutByIdStatus.h +++ b/Source/JavaScriptCore/bytecode/PutByIdStatus.h @@ -26,6 +26,7 @@ #ifndef PutByIdStatus_h #define PutByIdStatus_h +#include "PropertyOffset.h" #include <wtf/NotFound.h> namespace JSC { @@ -55,7 +56,7 @@ public: , m_oldStructure(0) , m_newStructure(0) , m_structureChain(0) - , m_offset(notFound) + , m_offset(invalidOffset) { } @@ -64,7 +65,7 @@ public: Structure* oldStructure, Structure* newStructure, StructureChain* structureChain, - size_t offset) + PropertyOffset offset) : m_state(state) , m_oldStructure(oldStructure) , m_newStructure(newStructure) @@ -74,7 +75,7 @@ public: ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == !m_oldStructure); ASSERT((m_state != SimpleTransition) == !m_newStructure); ASSERT((m_state != SimpleTransition) == !m_structureChain); - ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == (m_offset == notFound)); + ASSERT((m_state == NoInformation || m_state == TakesSlowPath) == (m_offset == invalidOffset)); } static PutByIdStatus computeFor(CodeBlock*, unsigned bytecodeIndex, Identifier&); @@ -90,7 +91,7 @@ public: Structure* oldStructure() const { return m_oldStructure; } Structure* newStructure() const { return m_newStructure; } StructureChain* structureChain() const { return m_structureChain; } - size_t offset() const { return m_offset; } + PropertyOffset offset() const { return m_offset; } private: static PutByIdStatus computeFromLLInt(CodeBlock*, unsigned bytecodeIndex, Identifier&); @@ -99,7 +100,7 @@ private: Structure* m_oldStructure; Structure* m_newStructure; StructureChain* m_structureChain; - size_t m_offset; + PropertyOffset m_offset; }; } // namespace JSC diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp index ff138704c..4afee248d 100644 --- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp +++ b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.cpp @@ -32,17 +32,19 @@ namespace JSC { +#if ENABLE(LLINT) || ENABLE(JIT) static ResolveGlobalStatus computeForStructure(CodeBlock* codeBlock, Structure* structure, Identifier& identifier) { unsigned attributesIgnored; JSCell* specificValue; - size_t offset = structure->get( + PropertyOffset offset = structure->get( *codeBlock->globalData(), identifier, attributesIgnored, specificValue); - if (offset == notFound) + if (!isValidOffset(offset)) return ResolveGlobalStatus(); return ResolveGlobalStatus(ResolveGlobalStatus::Simple, structure, offset, specificValue); } +#endif // ENABLE(LLINT) || ENABLE(JIT) static ResolveGlobalStatus computeForLLInt(CodeBlock* codeBlock, unsigned bytecodeIndex, Identifier& identifier) { diff --git a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h index 4698332f7..cbe4d3b5f 100644 --- a/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h +++ b/Source/JavaScriptCore/bytecode/ResolveGlobalStatus.h @@ -27,6 +27,7 @@ #define ResolveGlobalStatus_h #include "JSValue.h" +#include "PropertyOffset.h" #include <wtf/NotFound.h> namespace JSC { @@ -46,12 +47,12 @@ public: ResolveGlobalStatus() : m_state(NoInformation) , m_structure(0) - , m_offset(notFound) + , m_offset(invalidOffset) { } ResolveGlobalStatus( - State state, Structure* structure = 0, size_t offset = notFound, + State state, Structure* structure = 0, PropertyOffset offset = invalidOffset, JSValue specificValue = JSValue()) : m_state(state) , m_structure(structure) @@ -70,13 +71,13 @@ public: bool takesSlowPath() const { return m_state == TakesSlowPath; } Structure* structure() const { return m_structure; } - size_t offset() const { return m_offset; } + PropertyOffset offset() const { return m_offset; } JSValue specificValue() const { return m_specificValue; } private: State m_state; Structure* m_structure; - size_t m_offset; + PropertyOffset m_offset; JSValue m_specificValue; }; // class ResolveGlobalStatus diff --git a/Source/JavaScriptCore/bytecode/StructureSet.h b/Source/JavaScriptCore/bytecode/StructureSet.h index 2bbc50cad..ebde9779f 100644 --- a/Source/JavaScriptCore/bytecode/StructureSet.h +++ b/Source/JavaScriptCore/bytecode/StructureSet.h @@ -113,15 +113,6 @@ public: size_t size() const { return m_structures.size(); } - bool allAreUsingInlinePropertyStorage() const - { - for (size_t i = 0; i < m_structures.size(); ++i) { - if (!m_structures[i]->isUsingInlineStorage()) - return false; - } - return true; - } - // Call this if you know that the structure set must consist of exactly // one structure. Structure* singletonStructure() const diff --git a/Source/JavaScriptCore/bytecode/StructureStubInfo.h b/Source/JavaScriptCore/bytecode/StructureStubInfo.h index 573f6e975..807966cf3 100644 --- a/Source/JavaScriptCore/bytecode/StructureStubInfo.h +++ b/Source/JavaScriptCore/bytecode/StructureStubInfo.h @@ -32,6 +32,7 @@ #include "CodeOrigin.h" #include "Instruction.h" +#include "JITStubRoutine.h" #include "MacroAssembler.h" #include "Opcode.h" #include "Structure.h" @@ -168,7 +169,7 @@ namespace JSC { { deref(); accessType = access_unset; - stubRoutine = MacroAssemblerCodeRef(); + stubRoutine.clear(); } void deref(); @@ -204,6 +205,7 @@ namespace JSC { int8_t valueGPR; int8_t scratchGPR; int32_t deltaCallToDone; + int32_t deltaCallToStorageLoad; int32_t deltaCallToStructCheck; int32_t deltaCallToSlowCase; int32_t deltaCheckImmToCall; @@ -219,6 +221,7 @@ namespace JSC { struct { int16_t structureToCompare; int16_t structureCheck; + int16_t propertyStorageLoad; #if USE(JSVALUE64) int16_t displacementLabel; #else @@ -230,6 +233,7 @@ namespace JSC { } get; struct { int16_t structureToCompare; + int16_t propertyStorageLoad; #if USE(JSVALUE64) int16_t displacementLabel; #else @@ -283,7 +287,7 @@ namespace JSC { } putByIdList; } u; - MacroAssemblerCodeRef stubRoutine; + RefPtr<JITStubRoutine> stubRoutine; CodeLocationCall callReturnLocation; CodeLocationLabel hotPathBegin; }; diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp index 8969a7f25..b104788c8 100644 --- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp +++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.cpp @@ -32,6 +32,7 @@ #include "BytecodeGenerator.h" #include "BatchedTransitionOptimizer.h" +#include "Comment.h" #include "JSActivation.h" #include "JSFunction.h" #include "Interpreter.h" @@ -245,6 +246,9 @@ BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, ScopeChainNode* s , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get())) , m_scopeChain(*scopeChain->globalData, scopeChain) , m_symbolTable(symbolTable) +#if ENABLE(BYTECODE_COMMENTS) + , m_currentCommentString(0) +#endif , m_scopeNode(programNode) , m_codeBlock(codeBlock) , m_thisRegister(CallFrame::thisArgumentOffset()) @@ -270,6 +274,7 @@ BytecodeGenerator::BytecodeGenerator(ProgramNode* programNode, ScopeChainNode* s if (m_shouldEmitDebugHooks) m_codeBlock->setNeedsFullScopeChain(true); + prependComment("entering Program block"); emitOpcode(op_enter); codeBlock->setGlobalData(m_globalData); @@ -322,6 +327,9 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get())) , m_scopeChain(*scopeChain->globalData, scopeChain) , m_symbolTable(symbolTable) +#if ENABLE(BYTECODE_COMMENTS) + , m_currentCommentString(0) +#endif , m_scopeNode(functionBody) , m_codeBlock(codeBlock) , m_activationRegister(0) @@ -349,9 +357,11 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN codeBlock->setGlobalData(m_globalData); + prependComment("entering Function block"); emitOpcode(op_enter); if (m_codeBlock->needsFullScopeChain()) { m_activationRegister = addVar(); + prependComment("activation for Full Scope Chain"); emitInitLazyRegister(m_activationRegister); m_codeBlock->setActivationRegister(m_activationRegister->index()); } @@ -368,10 +378,13 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN codeBlock->setArgumentsRegister(argumentsRegister->index()); ASSERT_UNUSED(unmodifiedArgumentsRegister, unmodifiedArgumentsRegister->index() == JSC::unmodifiedArgumentsRegister(codeBlock->argumentsRegister())); + prependComment("arguments for Full Scope Chain"); emitInitLazyRegister(argumentsRegister); + prependComment("unmodified arguments for Full Scope Chain"); emitInitLazyRegister(unmodifiedArgumentsRegister); if (m_codeBlock->isStrictMode()) { + prependComment("create arguments for strict mode"); emitOpcode(op_create_arguments); instructions().append(argumentsRegister->index()); } @@ -380,6 +393,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN // it from a call frame. In the long-term it should stop doing that (<rdar://problem/6911886>), // but for now we force eager creation of the arguments object when debugging. if (m_shouldEmitDebugHooks) { + prependComment("create arguments for debug hooks"); emitOpcode(op_create_arguments); instructions().append(argumentsRegister->index()); } @@ -398,10 +412,12 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN if (functionBody->captures(ident)) { if (!m_hasCreatedActivation) { m_hasCreatedActivation = true; + prependComment("activation for captured vars"); emitOpcode(op_create_activation); instructions().append(m_activationRegister->index()); } m_functions.add(ident.impl()); + prependComment("captured function var"); emitNewFunction(addVar(ident, false), function); } } @@ -414,6 +430,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN bool canLazilyCreateFunctions = !functionBody->needsActivationForMoreThanVariables() && !m_shouldEmitDebugHooks; if (!canLazilyCreateFunctions && !m_hasCreatedActivation) { m_hasCreatedActivation = true; + prependComment("cannot lazily create functions"); emitOpcode(op_create_activation); instructions().append(m_activationRegister->index()); } @@ -428,6 +445,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN RefPtr<RegisterID> reg = addVar(ident, false); // Don't lazily create functions that override the name 'arguments' // as this would complicate lazy instantiation of actual arguments. + prependComment("a function that override 'arguments'"); if (!canLazilyCreateFunctions || ident == propertyNames().arguments) emitNewFunction(reg.get(), function); else { @@ -460,6 +478,7 @@ BytecodeGenerator::BytecodeGenerator(FunctionBodyNode* functionBody, ScopeChainN preserveLastVar(); if (isConstructor()) { + prependComment("'this' because we are a Constructor function"); emitOpcode(op_create_this); instructions().append(m_thisRegister.index()); } else if (!codeBlock->isStrictMode() && (functionBody->usesThis() || codeBlock->usesEval() || m_shouldEmitDebugHooks)) { @@ -475,6 +494,9 @@ BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, ScopeChainNode* scopeCh , m_shouldEmitRichSourceInfo(scopeChain->globalObject->globalObjectMethodTable()->supportsRichSourceInfo(scopeChain->globalObject.get())) , m_scopeChain(*scopeChain->globalData, scopeChain) , m_symbolTable(symbolTable) +#if ENABLE(BYTECODE_COMMENTS) + , m_currentCommentString(0) +#endif , m_scopeNode(evalNode) , m_codeBlock(codeBlock) , m_thisRegister(CallFrame::thisArgumentOffset()) @@ -500,6 +522,7 @@ BytecodeGenerator::BytecodeGenerator(EvalNode* evalNode, ScopeChainNode* scopeCh if (m_shouldEmitDebugHooks || m_baseScopeDepth) m_codeBlock->setNeedsFullScopeChain(true); + prependComment("entering Eval block"); emitOpcode(op_enter); codeBlock->setGlobalData(m_globalData); m_codeBlock->setNumParameters(1); @@ -658,10 +681,31 @@ void BytecodeGenerator::emitOpcode(OpcodeID opcodeID) ASSERT(opcodePosition - m_lastOpcodePosition == opcodeLength(m_lastOpcodeID) || m_lastOpcodeID == op_end); m_lastOpcodePosition = opcodePosition; #endif + emitComment(); instructions().append(globalData()->interpreter->getOpcode(opcodeID)); m_lastOpcodeID = opcodeID; } +#if ENABLE(BYTECODE_COMMENTS) +// Record a comment in the CodeBlock's comments list for the current opcode +// that is about to be emitted. +void BytecodeGenerator::emitComment() +{ + if (m_currentCommentString) { + size_t opcodePosition = instructions().size(); + Comment comment = { opcodePosition, m_currentCommentString }; + m_codeBlock->bytecodeComments().append(comment); + m_currentCommentString = 0; + } +} + +// Register a comment to be associated with the next opcode that will be emitted. +void BytecodeGenerator::prependComment(const char* string) +{ + m_currentCommentString = string; +} +#endif + ValueProfile* BytecodeGenerator::emitProfiledOpcode(OpcodeID opcodeID) { #if ENABLE(VALUE_PROFILER) diff --git a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h index 8b1d1d744..52fd7e83c 100644 --- a/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h +++ b/Source/JavaScriptCore/bytecompiler/BytecodeGenerator.h @@ -542,6 +542,18 @@ namespace JSC { private: friend class Label; +#if ENABLE(BYTECODE_COMMENTS) + // Record a comment in the CodeBlock's comments list for the current + // opcode that is about to be emitted. + void emitComment(); + // Register a comment to be associated with the next opcode that will + // be emitted. + void prependComment(const char* string); +#else + ALWAYS_INLINE void emitComment() { } + ALWAYS_INLINE void prependComment(const char*) { } +#endif + void emitOpcode(OpcodeID); ValueProfile* emitProfiledOpcode(OpcodeID); void retrieveLastBinaryOp(int& dstIndex, int& src1Index, int& src2Index); @@ -623,6 +635,9 @@ namespace JSC { Vector<Instruction>& instructions() { return m_instructions; } SymbolTable& symbolTable() { return *m_symbolTable; } +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment>& comments() { return m_comments; } +#endif bool shouldOptimizeLocals() { @@ -664,6 +679,11 @@ namespace JSC { Strong<ScopeChainNode> m_scopeChain; SymbolTable* m_symbolTable; +#if ENABLE(BYTECODE_COMMENTS) + Vector<Comment> m_comments; + const char *m_currentCommentString; +#endif + ScopeNode* m_scopeNode; CodeBlock* m_codeBlock; diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp index c2d49f7ee..4cd31f2a8 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractState.cpp +++ b/Source/JavaScriptCore/dfg/DFGAbstractState.cpp @@ -311,31 +311,35 @@ bool AbstractState::execute(unsigned indexInBlock) if (left && right && left.isInt32() && right.isInt32()) { int32_t a = left.asInt32(); int32_t b = right.asInt32(); + bool constantWasSet; switch (node.op()) { case BitAnd: - forNode(nodeIndex).set(JSValue(a & b)); + constantWasSet = trySetConstant(nodeIndex, JSValue(a & b)); break; case BitOr: - forNode(nodeIndex).set(JSValue(a | b)); + constantWasSet = trySetConstant(nodeIndex, JSValue(a | b)); break; case BitXor: - forNode(nodeIndex).set(JSValue(a ^ b)); + constantWasSet = trySetConstant(nodeIndex, JSValue(a ^ b)); break; case BitRShift: - forNode(nodeIndex).set(JSValue(a >> static_cast<uint32_t>(b))); + constantWasSet = trySetConstant(nodeIndex, JSValue(a >> static_cast<uint32_t>(b))); break; case BitLShift: - forNode(nodeIndex).set(JSValue(a << static_cast<uint32_t>(b))); + constantWasSet = trySetConstant(nodeIndex, JSValue(a << static_cast<uint32_t>(b))); break; case BitURShift: - forNode(nodeIndex).set(JSValue(static_cast<uint32_t>(a) >> static_cast<uint32_t>(b))); + constantWasSet = trySetConstant(nodeIndex, JSValue(static_cast<uint32_t>(a) >> static_cast<uint32_t>(b))); break; default: ASSERT_NOT_REACHED(); + constantWasSet = false; + } + if (constantWasSet) { + m_foundConstants = true; + node.setCanExit(false); + break; } - m_foundConstants = true; - node.setCanExit(false); - break; } speculateInt32Binary(node); forNode(nodeIndex).set(SpecInt32); @@ -346,10 +350,11 @@ bool AbstractState::execute(unsigned indexInBlock) JSValue child = forNode(node.child1()).value(); if (child && child.isNumber()) { ASSERT(child.isInt32()); - forNode(nodeIndex).set(JSValue(child.asUInt32())); - m_foundConstants = true; - node.setCanExit(false); - break; + if (trySetConstant(nodeIndex, JSValue(child.asUInt32()))) { + m_foundConstants = true; + node.setCanExit(false); + break; + } } if (!node.canSpeculateInteger()) { forNode(nodeIndex).set(SpecDouble); @@ -367,8 +372,8 @@ bool AbstractState::execute(unsigned indexInBlock) if (child && child.isNumber()) { double asDouble = child.asNumber(); int32_t asInt = JSC::toInt32(asDouble); - if (bitwise_cast<int64_t>(static_cast<double>(asInt)) == bitwise_cast<int64_t>(asDouble)) { - forNode(nodeIndex).set(JSValue(asInt)); + if (bitwise_cast<int64_t>(static_cast<double>(asInt)) == bitwise_cast<int64_t>(asDouble) + && trySetConstant(nodeIndex, JSValue(asInt))) { m_foundConstants = true; break; } @@ -382,13 +387,16 @@ bool AbstractState::execute(unsigned indexInBlock) case ValueToInt32: { JSValue child = forNode(node.child1()).value(); if (child && child.isNumber()) { + bool constantWasSet; if (child.isInt32()) - forNode(nodeIndex).set(child); + constantWasSet = trySetConstant(nodeIndex, child); else - forNode(nodeIndex).set(JSValue(JSC::toInt32(child.asDouble()))); - m_foundConstants = true; - node.setCanExit(false); - break; + constantWasSet = trySetConstant(nodeIndex, JSValue(JSC::toInt32(child.asDouble()))); + if (constantWasSet) { + m_foundConstants = true; + node.setCanExit(false); + break; + } } if (m_graph[node.child1()].shouldSpeculateInteger()) speculateInt32Unary(node); @@ -405,8 +413,8 @@ bool AbstractState::execute(unsigned indexInBlock) case Int32ToDouble: { JSValue child = forNode(node.child1()).value(); - if (child && child.isNumber()) { - forNode(nodeIndex).set(JSValue(JSValue::EncodeAsDouble, child.asNumber())); + if (child && child.isNumber() + && trySetConstant(nodeIndex, JSValue(JSValue::EncodeAsDouble, child.asNumber()))) { m_foundConstants = true; node.setCanExit(false); break; @@ -424,8 +432,8 @@ bool AbstractState::execute(unsigned indexInBlock) case ArithAdd: { JSValue left = forNode(node.child1()).value(); JSValue right = forNode(node.child2()).value(); - if (left && right && left.isNumber() && right.isNumber()) { - forNode(nodeIndex).set(JSValue(left.asNumber() + right.asNumber())); + if (left && right && left.isNumber() && right.isNumber() + && trySetConstant(nodeIndex, JSValue(left.asNumber() + right.asNumber()))) { m_foundConstants = true; node.setCanExit(false); break; @@ -456,8 +464,8 @@ bool AbstractState::execute(unsigned indexInBlock) case ArithSub: { JSValue left = forNode(node.child1()).value(); JSValue right = forNode(node.child2()).value(); - if (left && right && left.isNumber() && right.isNumber()) { - forNode(nodeIndex).set(JSValue(left.asNumber() - right.asNumber())); + if (left && right && left.isNumber() && right.isNumber() + && trySetConstant(nodeIndex, JSValue(left.asNumber() - right.asNumber()))) { m_foundConstants = true; node.setCanExit(false); break; @@ -475,8 +483,8 @@ bool AbstractState::execute(unsigned indexInBlock) case ArithNegate: { JSValue child = forNode(node.child1()).value(); - if (child && child.isNumber()) { - forNode(nodeIndex).set(JSValue(-child.asNumber())); + if (child && child.isNumber() + && trySetConstant(nodeIndex, JSValue(-child.asNumber()))) { m_foundConstants = true; node.setCanExit(false); break; @@ -495,8 +503,8 @@ bool AbstractState::execute(unsigned indexInBlock) case ArithMul: { JSValue left = forNode(node.child1()).value(); JSValue right = forNode(node.child2()).value(); - if (left && right && left.isNumber() && right.isNumber()) { - forNode(nodeIndex).set(JSValue(left.asNumber() * right.asNumber())); + if (left && right && left.isNumber() && right.isNumber() + && trySetConstant(nodeIndex, JSValue(left.asNumber() * right.asNumber()))) { m_foundConstants = true; node.setCanExit(false); break; @@ -523,26 +531,30 @@ bool AbstractState::execute(unsigned indexInBlock) if (left && right && left.isNumber() && right.isNumber()) { double a = left.asNumber(); double b = right.asNumber(); + bool constantWasSet; switch (node.op()) { case ArithDiv: - forNode(nodeIndex).set(JSValue(a / b)); + constantWasSet = trySetConstant(nodeIndex, JSValue(a / b)); break; case ArithMin: - forNode(nodeIndex).set(JSValue(a < b ? a : (b <= a ? b : a + b))); + constantWasSet = trySetConstant(nodeIndex, JSValue(a < b ? a : (b <= a ? b : a + b))); break; case ArithMax: - forNode(nodeIndex).set(JSValue(a > b ? a : (b >= a ? b : a + b))); + constantWasSet = trySetConstant(nodeIndex, JSValue(a > b ? a : (b >= a ? b : a + b))); break; case ArithMod: - forNode(nodeIndex).set(JSValue(fmod(a, b))); + constantWasSet = trySetConstant(nodeIndex, JSValue(fmod(a, b))); break; default: ASSERT_NOT_REACHED(); + constantWasSet = false; + break; + } + if (constantWasSet) { + m_foundConstants = true; + node.setCanExit(false); break; } - m_foundConstants = true; - node.setCanExit(false); - break; } if (Node::shouldSpeculateInteger( m_graph[node.child1()], m_graph[node.child2()]) @@ -558,8 +570,8 @@ bool AbstractState::execute(unsigned indexInBlock) case ArithAbs: { JSValue child = forNode(node.child1()).value(); - if (child && child.isNumber()) { - forNode(nodeIndex).set(JSValue(fabs(child.asNumber()))); + if (child && child.isNumber() + && trySetConstant(nodeIndex, JSValue(fabs(child.asNumber())))) { m_foundConstants = true; node.setCanExit(false); break; @@ -577,8 +589,8 @@ bool AbstractState::execute(unsigned indexInBlock) case ArithSqrt: { JSValue child = forNode(node.child1()).value(); - if (child && child.isNumber()) { - forNode(nodeIndex).set(JSValue(sqrt(child.asNumber()))); + if (child && child.isNumber() + && trySetConstant(nodeIndex, JSValue(sqrt(child.asNumber())))) { m_foundConstants = true; node.setCanExit(false); break; @@ -590,8 +602,7 @@ bool AbstractState::execute(unsigned indexInBlock) case LogicalNot: { JSValue childConst = forNode(node.child1()).value(); - if (childConst) { - forNode(nodeIndex).set(jsBoolean(!childConst.toBoolean())); + if (childConst && trySetConstant(nodeIndex, jsBoolean(!childConst.toBoolean()))) { m_foundConstants = true; node.setCanExit(false); break; @@ -626,27 +637,28 @@ bool AbstractState::execute(unsigned indexInBlock) node.setCanExit(false); JSValue child = forNode(node.child1()).value(); if (child) { - bool foundConstant = true; + bool constantWasSet; switch (node.op()) { case IsUndefined: - forNode(nodeIndex).set(jsBoolean( + constantWasSet = trySetConstant(nodeIndex, jsBoolean( child.isCell() ? child.asCell()->structure()->typeInfo().masqueradesAsUndefined() : child.isUndefined())); break; case IsBoolean: - forNode(nodeIndex).set(jsBoolean(child.isBoolean())); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(child.isBoolean())); break; case IsNumber: - forNode(nodeIndex).set(jsBoolean(child.isNumber())); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(child.isNumber())); break; case IsString: - forNode(nodeIndex).set(jsBoolean(isJSString(child))); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(isJSString(child))); break; default: + constantWasSet = false; break; } - if (foundConstant) { + if (constantWasSet) { m_foundConstants = true; break; } @@ -665,29 +677,33 @@ bool AbstractState::execute(unsigned indexInBlock) if (leftConst && rightConst && leftConst.isNumber() && rightConst.isNumber()) { double a = leftConst.asNumber(); double b = rightConst.asNumber(); + bool constantWasSet; switch (node.op()) { case CompareLess: - forNode(nodeIndex).set(jsBoolean(a < b)); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(a < b)); break; case CompareLessEq: - forNode(nodeIndex).set(jsBoolean(a <= b)); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(a <= b)); break; case CompareGreater: - forNode(nodeIndex).set(jsBoolean(a > b)); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(a > b)); break; case CompareGreaterEq: - forNode(nodeIndex).set(jsBoolean(a >= b)); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(a >= b)); break; case CompareEq: - forNode(nodeIndex).set(jsBoolean(a == b)); + constantWasSet = trySetConstant(nodeIndex, jsBoolean(a == b)); break; default: ASSERT_NOT_REACHED(); + constantWasSet = false; + break; + } + if (constantWasSet) { + m_foundConstants = true; + node.setCanExit(false); break; } - m_foundConstants = true; - node.setCanExit(false); - break; } forNode(nodeIndex).set(SpecBoolean); @@ -767,8 +783,8 @@ bool AbstractState::execute(unsigned indexInBlock) case CompareStrictEq: { JSValue left = forNode(node.child1()).value(); JSValue right = forNode(node.child2()).value(); - if (left && right && left.isNumber() && right.isNumber()) { - forNode(nodeIndex).set(jsBoolean(left.asNumber() == right.asNumber())); + if (left && right && left.isNumber() && right.isNumber() + && trySetConstant(nodeIndex, jsBoolean(left.asNumber() == right.asNumber()))) { m_foundConstants = true; node.setCanExit(false); break; @@ -1106,8 +1122,7 @@ bool AbstractState::execute(unsigned indexInBlock) case ToPrimitive: { JSValue childConst = forNode(node.child1()).value(); - if (childConst && childConst.isNumber()) { - forNode(nodeIndex).set(childConst); + if (childConst && childConst.isNumber() && trySetConstant(nodeIndex, childConst)) { m_foundConstants = true; node.setCanExit(false); break; diff --git a/Source/JavaScriptCore/dfg/DFGAbstractState.h b/Source/JavaScriptCore/dfg/DFGAbstractState.h index 9bb74cd86..95cadecbb 100644 --- a/Source/JavaScriptCore/dfg/DFGAbstractState.h +++ b/Source/JavaScriptCore/dfg/DFGAbstractState.h @@ -267,6 +267,23 @@ private: childValue2.filter(SpecNumber); } + bool trySetConstant(NodeIndex nodeIndex, JSValue value) + { + // Make sure we don't constant fold something that will produce values that contravene + // predictions. If that happens then we know that the code will OSR exit, forcing + // recompilation. But if we tried to constant fold then we'll have a very degenerate + // IR: namely we'll have a JSConstant that contravenes its own prediction. There's a + // lot of subtle code that assumes that + // speculationFromValue(jsConstant) == jsConstant.prediction(). "Hardening" that code + // is probably less sane than just pulling back on constant folding. + SpeculatedType oldType = m_graph[nodeIndex].prediction(); + if (mergeSpeculations(speculationFromValue(value), oldType) != oldType) + return false; + + forNode(nodeIndex).set(value); + return true; + } + CodeBlock* m_codeBlock; Graph& m_graph; diff --git a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp index 28e686aef..9208cde1b 100644 --- a/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGArgumentsSimplificationPhase.cpp @@ -627,8 +627,9 @@ public: continue; // If this is a CreateArguments for an InlineCallFrame* that does // not create arguments, then replace it with a PhantomArguments. - // PhantomArguments is a constant that represents JSValue() (the - // empty value) in DFG and arguments creation for OSR exit. + // PhantomArguments is a non-executing node that just indicates + // that the node should be reified as an arguments object on OSR + // exit. if (m_createsArguments.contains(node.codeOrigin.inlineCallFrame)) continue; if (node.shouldGenerate()) { @@ -641,12 +642,30 @@ public: } node.setOpAndDefaultFlags(PhantomArguments); node.children.reset(); + changed = true; } insertionSet.execute(*block); } - if (changed) + if (changed) { m_graph.collectGarbage(); + + // Verify that PhantomArguments nodes are not shouldGenerate(). +#if !ASSERT_DISABLED + for (BlockIndex blockIndex = 0; blockIndex < m_graph.m_blocks.size(); ++blockIndex) { + BasicBlock* block = m_graph.m_blocks[blockIndex].get(); + if (!block) + continue; + for (unsigned indexInBlock = 0; indexInBlock < block->size(); ++indexInBlock) { + NodeIndex nodeIndex = block->at(indexInBlock); + Node& node = m_graph[nodeIndex]; + if (node.op() != PhantomArguments) + continue; + ASSERT(!node.shouldGenerate()); + } + } +#endif + } return changed; } @@ -815,6 +834,7 @@ private: bool performArgumentsSimplification(Graph& graph) { + SamplingRegion samplingRegion("DFG Arguments Simplification Phase"); return runPhase<ArgumentsSimplificationPhase>(graph); } diff --git a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h index f86c15e65..4bea292f3 100644 --- a/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h +++ b/Source/JavaScriptCore/dfg/DFGAssemblyHelpers.h @@ -182,7 +182,7 @@ public: move(TrustedImmPtr(scratchBuffer->activeLengthPtr()), GPRInfo::regT0); storePtr(TrustedImmPtr(scratchSize), GPRInfo::regT0); -#if CPU(X86_64) || CPU(ARM_THUMB2) +#if CPU(X86_64) || CPU(ARM) move(TrustedImmPtr(argument), GPRInfo::argumentGPR1); move(GPRInfo::callFrameRegister, GPRInfo::argumentGPR0); GPRReg scratch = selectScratchGPR(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1); diff --git a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp index cdb0b639a..91b882399 100644 --- a/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp +++ b/Source/JavaScriptCore/dfg/DFGByteCodeParser.cpp @@ -99,7 +99,7 @@ private: bool handleConstantInternalFunction(bool usesResult, int resultOperand, InternalFunction*, int registerOffset, int argumentCountIncludingThis, SpeculatedType prediction, CodeSpecializationKind); void handleGetByOffset( int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber, - bool useInlineStorage, size_t offset); + PropertyOffset); void handleGetById( int destinationOperand, SpeculatedType, NodeIndex base, unsigned identifierNumber, const GetByIdStatus&); @@ -871,7 +871,7 @@ private: // care about when the outcome of the division is not an integer, which // is what the special fast case counter tells us. - if (!m_inlineStackTop->m_profiledBlock->likelyToTakeSpecialFastCase(m_currentIndex) + if (!m_inlineStackTop->m_profiledBlock->couldTakeSpecialFastCase(m_currentIndex) && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, Overflow) && !m_inlineStackTop->m_exitProfile.hasExitSite(m_currentIndex, NegativeZero)) return nodeIndex; @@ -1273,7 +1273,7 @@ bool ByteCodeParser::handleInlining(bool usesResult, int callTarget, NodeIndex c unsigned depth = 0; for (InlineStackEntry* entry = m_inlineStackTop; entry; entry = entry->m_caller) { ++depth; - if (depth >= Options::maximumInliningDepth) + if (depth >= Options::maximumInliningDepth()) return false; // Depth exceeded. if (entry->executable() == executable) @@ -1630,25 +1630,20 @@ bool ByteCodeParser::handleConstantInternalFunction( void ByteCodeParser::handleGetByOffset( int destinationOperand, SpeculatedType prediction, NodeIndex base, unsigned identifierNumber, - bool useInlineStorage, size_t offset) + PropertyOffset offset) { NodeIndex propertyStorage; - size_t offsetOffset; - if (useInlineStorage) { + if (isInlineOffset(offset)) propertyStorage = base; - ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue))); - offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue); - } else { + else propertyStorage = addToGraph(GetPropertyStorage, base); - offsetOffset = 0; - } set(destinationOperand, addToGraph( GetByOffset, OpInfo(m_graph.m_storageAccessData.size()), OpInfo(prediction), propertyStorage)); StorageAccessData storageAccessData; - storageAccessData.offset = offset + offsetOffset; + storageAccessData.offset = indexRelativeToBase(offset); storageAccessData.identifierNumber = identifierNumber; m_graph.m_storageAccessData.append(storageAccessData); } @@ -1677,7 +1672,6 @@ void ByteCodeParser::handleGetById( addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(getByIdStatus.structureSet())), base); - bool useInlineStorage; if (!getByIdStatus.chain().isEmpty()) { Structure* currentStructure = getByIdStatus.structureSet().singletonStructure(); JSObject* currentObject = 0; @@ -1686,9 +1680,7 @@ void ByteCodeParser::handleGetById( currentStructure = getByIdStatus.chain()[i]; base = addStructureTransitionCheck(currentObject, currentStructure); } - useInlineStorage = currentStructure->isUsingInlineStorage(); - } else - useInlineStorage = getByIdStatus.structureSet().allAreUsingInlinePropertyStorage(); + } // Unless we want bugs like https://bugs.webkit.org/show_bug.cgi?id=88783, we need to // ensure that the base of the original get_by_id is kept alive until we're done with @@ -1707,8 +1699,7 @@ void ByteCodeParser::handleGetById( } handleGetByOffset( - destinationOperand, prediction, base, identifierNumber, useInlineStorage, - getByIdStatus.offset()); + destinationOperand, prediction, base, identifierNumber, getByIdStatus.offset()); } void ByteCodeParser::prepareToParseBlock() @@ -2172,7 +2163,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) SpeculatedType prediction = getPrediction(); - ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id); + ASSERT(interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id + || interpreter->getOpcodeID(getInstruction->u.opcode) == op_get_by_id_out_of_line); NodeIndex base = get(getInstruction[2].u.operand); unsigned identifier = m_inlineStackTop->m_identifierRemap[getInstruction[3].u.operand]; @@ -2225,7 +2217,8 @@ bool ByteCodeParser::parseBlock(unsigned limit) addToGraph(PutScopedVar, OpInfo(slot), getScopeChain, get(source)); NEXT_OPCODE(op_put_scoped_var); } - case op_get_by_id: { + case op_get_by_id: + case op_get_by_id_out_of_line: { SpeculatedType prediction = getPredictionWithoutOSRExit(); NodeIndex base = get(currentInstruction[2].u.operand); @@ -2241,8 +2234,11 @@ bool ByteCodeParser::parseBlock(unsigned limit) NEXT_OPCODE(op_get_by_id); } case op_put_by_id: + case op_put_by_id_out_of_line: case op_put_by_id_transition_direct: - case op_put_by_id_transition_normal: { + case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: { NodeIndex value = get(currentInstruction[3].u.operand); NodeIndex base = get(currentInstruction[1].u.operand); unsigned identifierNumber = m_inlineStackTop->m_identifierRemap[currentInstruction[2].u.operand]; @@ -2259,25 +2255,20 @@ bool ByteCodeParser::parseBlock(unsigned limit) if (!hasExitSite && putByIdStatus.isSimpleReplace()) { addToGraph(CheckStructure, OpInfo(m_graph.addStructureSet(putByIdStatus.oldStructure())), base); - size_t offsetOffset; NodeIndex propertyStorage; - if (putByIdStatus.oldStructure()->isUsingInlineStorage()) { + if (isInlineOffset(putByIdStatus.offset())) propertyStorage = base; - ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue))); - offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue); - } else { + else propertyStorage = addToGraph(GetPropertyStorage, base); - offsetOffset = 0; - } addToGraph(PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), propertyStorage, base, value); StorageAccessData storageAccessData; - storageAccessData.offset = putByIdStatus.offset() + offsetOffset; + storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset()); storageAccessData.identifierNumber = identifierNumber; m_graph.m_storageAccessData.append(storageAccessData); } else if (!hasExitSite && putByIdStatus.isSimpleTransition() - && putByIdStatus.oldStructure()->propertyStorageCapacity() == putByIdStatus.newStructure()->propertyStorageCapacity() + && putByIdStatus.oldStructure()->outOfLineCapacity() == putByIdStatus.newStructure()->outOfLineCapacity() && structureChainIsStillValid( direct, putByIdStatus.oldStructure(), @@ -2308,16 +2299,11 @@ bool ByteCodeParser::parseBlock(unsigned limit) putByIdStatus.newStructure()))), base); - size_t offsetOffset; NodeIndex propertyStorage; - if (putByIdStatus.newStructure()->isUsingInlineStorage()) { + if (isInlineOffset(putByIdStatus.offset())) propertyStorage = base; - ASSERT(!(sizeof(JSObject) % sizeof(EncodedJSValue))); - offsetOffset = sizeof(JSObject) / sizeof(EncodedJSValue); - } else { + else propertyStorage = addToGraph(GetPropertyStorage, base); - offsetOffset = 0; - } addToGraph( PutByOffset, OpInfo(m_graph.m_storageAccessData.size()), @@ -2326,7 +2312,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) value); StorageAccessData storageAccessData; - storageAccessData.offset = putByIdStatus.offset() + offsetOffset; + storageAccessData.offset = indexRelativeToBase(putByIdStatus.offset()); storageAccessData.identifierNumber = identifierNumber; m_graph.m_storageAccessData.append(storageAccessData); } else { @@ -2738,8 +2724,7 @@ bool ByteCodeParser::parseBlock(unsigned limit) } else { handleGetByOffset( currentInstruction[1].u.operand, prediction, globalObject, - identifierNumber, status.structure()->isUsingInlineStorage(), - status.offset()); + identifierNumber, status.offset()); } m_globalResolveNumber++; // Skip over the unused global resolve info. @@ -3341,6 +3326,7 @@ bool ByteCodeParser::parse() bool parse(ExecState* exec, Graph& graph) { + SamplingRegion samplingRegion("DFG Parsing"); #if DFG_DEBUG_LOCAL_DISBALE UNUSED_PARAM(exec); UNUSED_PARAM(graph); diff --git a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h index b60290870..9c1718bdb 100644 --- a/Source/JavaScriptCore/dfg/DFGCCallHelpers.h +++ b/Source/JavaScriptCore/dfg/DFGCCallHelpers.h @@ -434,7 +434,7 @@ public: { setupTwoStubArgs<FPRInfo::argumentFPR0, FPRInfo::argumentFPR1>(arg1, arg2); } -#else +#elif CPU(ARM) ALWAYS_INLINE void setupArguments(FPRReg arg1) { assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1); @@ -445,6 +445,8 @@ public: assembler().vmov(GPRInfo::argumentGPR0, GPRInfo::argumentGPR1, arg1); assembler().vmov(GPRInfo::argumentGPR2, GPRInfo::argumentGPR3, arg2); } +#else +#error "DFG JIT not supported on this platform." #endif ALWAYS_INLINE void setupArguments(GPRReg arg1) diff --git a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp index c6042448a..c52349645 100644 --- a/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCFAPhase.cpp @@ -132,6 +132,7 @@ private: bool performCFA(Graph& graph) { + SamplingRegion samplingRegion("DFG CFA Phase"); return runPhase<CFAPhase>(graph); } diff --git a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp index 161f51e30..c234e6e4e 100644 --- a/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCFGSimplificationPhase.cpp @@ -613,7 +613,7 @@ private: ASSERT(node.shouldGenerate()); Node& possibleLocalOp = m_graph[node.child1()]; - if (possibleLocalOp.hasLocal()) { + if (possibleLocalOp.hasLocal() && !possibleLocalOp.variableAccessData()->isCaptured()) { NodeIndex setLocalIndex = firstBlock->variablesAtTail.operand(possibleLocalOp.local()); Node& setLocal = m_graph[setLocalIndex]; @@ -745,6 +745,7 @@ private: bool performCFGSimplification(Graph& graph) { + SamplingRegion samplingRegion("DFG CFG Simplification Phase"); return runPhase<CFGSimplificationPhase>(graph); } diff --git a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp index be0012f56..108cf1965 100644 --- a/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGCSEPhase.cpp @@ -1172,6 +1172,7 @@ private: bool performCSE(Graph& graph, OptimizationFixpointState fixpointState) { + SamplingRegion samplingRegion("DFG CSE Phase"); return runPhase<CSEPhase>(graph, fixpointState); } diff --git a/Source/JavaScriptCore/dfg/DFGCapabilities.h b/Source/JavaScriptCore/dfg/DFGCapabilities.h index 1aec0bca1..2bc9b2965 100644 --- a/Source/JavaScriptCore/dfg/DFGCapabilities.h +++ b/Source/JavaScriptCore/dfg/DFGCapabilities.h @@ -41,29 +41,29 @@ namespace JSC { namespace DFG { // check opcodes. inline bool mightCompileEval(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount; + return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); } inline bool mightCompileProgram(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount; + return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); } inline bool mightCompileFunctionForCall(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount; + return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); } inline bool mightCompileFunctionForConstruct(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount; + return codeBlock->instructionCount() <= Options::maximumOptimizationCandidateInstructionCount(); } inline bool mightInlineFunctionForCall(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumFunctionForCallInlineCandidateInstructionCount + return codeBlock->instructionCount() <= Options::maximumFunctionForCallInlineCandidateInstructionCount() && !codeBlock->ownerExecutable()->needsActivation(); } inline bool mightInlineFunctionForConstruct(CodeBlock* codeBlock) { - return codeBlock->instructionCount() <= Options::maximumFunctionForConstructInlineCandidateInstructionCount + return codeBlock->instructionCount() <= Options::maximumFunctionForConstructInlineCandidateInstructionCount() && !codeBlock->ownerExecutable()->needsActivation(); } @@ -119,9 +119,13 @@ inline CapabilityLevel canCompileOpcode(OpcodeID opcodeID, CodeBlock*, Instructi case op_get_scoped_var: case op_put_scoped_var: case op_get_by_id: + case op_get_by_id_out_of_line: case op_put_by_id: + case op_put_by_id_out_of_line: case op_put_by_id_transition_direct: + case op_put_by_id_transition_direct_out_of_line: case op_put_by_id_transition_normal: + case op_put_by_id_transition_normal_out_of_line: case op_get_global_var: case op_get_global_var_watchable: case op_put_global_var: diff --git a/Source/JavaScriptCore/dfg/DFGCommon.h b/Source/JavaScriptCore/dfg/DFGCommon.h index c9d3cbc32..1a64a248c 100644 --- a/Source/JavaScriptCore/dfg/DFGCommon.h +++ b/Source/JavaScriptCore/dfg/DFGCommon.h @@ -136,7 +136,7 @@ enum OptimizationFixpointState { FixpointConverged, FixpointNotConverged }; inline bool shouldShowDisassembly() { - return Options::showDisassembly || Options::showDFGDisassembly; + return Options::showDisassembly() || Options::showDFGDisassembly(); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp index 9e6720c80..d3029b39a 100644 --- a/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGConstantFoldingPhase.cpp @@ -159,6 +159,7 @@ public: bool performConstantFolding(Graph& graph) { + SamplingRegion samplingRegion("DFG Constant Folding Phase"); return runPhase<ConstantFoldingPhase>(graph); } diff --git a/Source/JavaScriptCore/dfg/DFGDisassembler.cpp b/Source/JavaScriptCore/dfg/DFGDisassembler.cpp index 1dde37cf2..cfbb936b8 100644 --- a/Source/JavaScriptCore/dfg/DFGDisassembler.cpp +++ b/Source/JavaScriptCore/dfg/DFGDisassembler.cpp @@ -43,7 +43,7 @@ void Disassembler::dump(LinkBuffer& linkBuffer) { m_graph.m_dominators.computeIfNecessary(m_graph); - dataLog("Generated JIT code for DFG CodeBlock %p:\n", m_graph.m_codeBlock); + dataLog("Generated JIT code for DFG CodeBlock %p, instruction count = %u:\n", m_graph.m_codeBlock, m_graph.m_codeBlock->instructionCount()); dataLog(" Code at [%p, %p):\n", linkBuffer.debugAddress(), static_cast<char*>(linkBuffer.debugAddress()) + linkBuffer.debugSize()); const char* prefix = " "; @@ -59,7 +59,7 @@ void Disassembler::dump(LinkBuffer& linkBuffer) m_graph.dumpBlockHeader(prefix, blockIndex, Graph::DumpLivePhisOnly); NodeIndex lastNodeIndexForDisassembly = block->at(0); for (size_t i = 0; i < block->size(); ++i) { - if (!m_graph[block->at(i)].willHaveCodeGen()) + if (!m_graph[block->at(i)].willHaveCodeGenOrOSR()) continue; MacroAssembler::Label currentLabel; if (m_labelForNodeIndex[block->at(i)].isSet()) diff --git a/Source/JavaScriptCore/dfg/DFGDriver.cpp b/Source/JavaScriptCore/dfg/DFGDriver.cpp index 5033aa2c0..64fc0c7e5 100644 --- a/Source/JavaScriptCore/dfg/DFGDriver.cpp +++ b/Source/JavaScriptCore/dfg/DFGDriver.cpp @@ -40,6 +40,7 @@ #include "DFGRedundantPhiEliminationPhase.h" #include "DFGValidate.h" #include "DFGVirtualRegisterAllocationPhase.h" +#include "Options.h" namespace JSC { namespace DFG { @@ -60,7 +61,10 @@ inline bool compile(CompileMode compileMode, ExecState* exec, CodeBlock* codeBlo ASSERT(codeBlock); ASSERT(codeBlock->alternative()); ASSERT(codeBlock->alternative()->getJITType() == JITCode::BaselineJIT); - + + if (!Options::useDFGJIT()) + return false; + #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("DFG compiling code block %p(%p) for executable %p, number of instructions = %u.\n", codeBlock, codeBlock->alternative(), codeBlock->ownerExecutable(), codeBlock->instructionCount()); #endif diff --git a/Source/JavaScriptCore/dfg/DFGFPRInfo.h b/Source/JavaScriptCore/dfg/DFGFPRInfo.h index 6af45dd81..e817ed396 100644 --- a/Source/JavaScriptCore/dfg/DFGFPRInfo.h +++ b/Source/JavaScriptCore/dfg/DFGFPRInfo.h @@ -102,7 +102,7 @@ public: #endif -#if CPU(ARM_THUMB2) +#if CPU(ARM) class FPRInfo { public: diff --git a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp index f6e3c0a96..2e7389f21 100644 --- a/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGFixupPhase.cpp @@ -402,6 +402,7 @@ private: bool performFixup(Graph& graph) { + SamplingRegion samplingRegion("DFG Fixup Phase"); return runPhase<FixupPhase>(graph); } diff --git a/Source/JavaScriptCore/dfg/DFGGPRInfo.h b/Source/JavaScriptCore/dfg/DFGGPRInfo.h index bd4fa32d1..89faef94b 100644 --- a/Source/JavaScriptCore/dfg/DFGGPRInfo.h +++ b/Source/JavaScriptCore/dfg/DFGGPRInfo.h @@ -384,7 +384,7 @@ private: #endif -#if CPU(ARM_THUMB2) +#if CPU(ARM) #define NUMBER_OF_ARGUMENT_REGISTERS 4 class GPRInfo { @@ -410,7 +410,7 @@ public: static const GPRReg argumentGPR1 = ARMRegisters::r1; // regT1 static const GPRReg argumentGPR2 = ARMRegisters::r2; // regT2 // FIXME: r3 is currently used be the MacroAssembler as a temporary - it seems - // This could threoretically be a problem if theis is used in code generation + // This could threoretically be a problem if this is used in code generation // between the arguments being set up, and the call being made. That said, // any change introducing a problem here is likely to be immediately apparent! static const GPRReg argumentGPR3 = ARMRegisters::r3; // FIXME! diff --git a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h index 125a5a4f9..905c5c5fb 100644 --- a/Source/JavaScriptCore/dfg/DFGGenerationInfo.h +++ b/Source/JavaScriptCore/dfg/DFGGenerationInfo.h @@ -29,8 +29,10 @@ #if ENABLE(DFG_JIT) +#include "DFGJITCompiler.h" +#include "DFGVariableEvent.h" +#include "DFGVariableEventStream.h" #include "DataFormat.h" -#include <dfg/DFGJITCompiler.h> namespace JSC { namespace DFG { @@ -51,6 +53,7 @@ public: , m_registerFormat(DataFormatNone) , m_spillFormat(DataFormatNone) , m_canFill(false) + , m_bornForOSR(false) { } @@ -61,6 +64,7 @@ public: m_registerFormat = DataFormatNone; m_spillFormat = DataFormatNone; m_canFill = true; + m_bornForOSR = false; ASSERT(m_useCount); } void initInteger(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr) @@ -71,6 +75,7 @@ public: m_spillFormat = DataFormatNone; m_canFill = false; u.gpr = gpr; + m_bornForOSR = false; ASSERT(m_useCount); } #if USE(JSVALUE64) @@ -84,6 +89,7 @@ public: m_spillFormat = DataFormatNone; m_canFill = false; u.gpr = gpr; + m_bornForOSR = false; ASSERT(m_useCount); } #elif USE(JSVALUE32_64) @@ -98,6 +104,7 @@ public: m_canFill = false; u.v.tagGPR = tagGPR; u.v.payloadGPR = payloadGPR; + m_bornForOSR = false; ASSERT(m_useCount); } #endif @@ -109,6 +116,7 @@ public: m_spillFormat = DataFormatNone; m_canFill = false; u.gpr = gpr; + m_bornForOSR = false; ASSERT(m_useCount); } void initBoolean(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr) @@ -119,6 +127,7 @@ public: m_spillFormat = DataFormatNone; m_canFill = false; u.gpr = gpr; + m_bornForOSR = false; ASSERT(m_useCount); } void initDouble(NodeIndex nodeIndex, uint32_t useCount, FPRReg fpr) @@ -130,6 +139,7 @@ public: m_spillFormat = DataFormatNone; m_canFill = false; u.fpr = fpr; + m_bornForOSR = false; ASSERT(m_useCount); } void initStorage(NodeIndex nodeIndex, uint32_t useCount, GPRReg gpr) @@ -140,19 +150,44 @@ public: m_spillFormat = DataFormatNone; m_canFill = false; u.gpr = gpr; + m_bornForOSR = false; ASSERT(m_useCount); } // Get the index of the node that produced this value. NodeIndex nodeIndex() { return m_nodeIndex; } + + void noticeOSRBirth(VariableEventStream& stream, NodeIndex nodeIndex, VirtualRegister virtualRegister) + { + if (m_nodeIndex != nodeIndex) + return; + if (!alive()) + return; + if (m_bornForOSR) + return; + + m_bornForOSR = true; + + if (m_registerFormat != DataFormatNone) + appendFill(BirthToFill, stream); + else if (m_spillFormat != DataFormatNone) + appendSpill(BirthToSpill, stream, virtualRegister); + } // Mark the value as having been used (decrement the useCount). // Returns true if this was the last use of the value, and any // associated machine registers may be freed. - bool use() + bool use(VariableEventStream& stream) { ASSERT(m_useCount); - return !--m_useCount; + bool result = !--m_useCount; + + if (result && m_bornForOSR) { + ASSERT(m_nodeIndex != NoNode); + stream.appendAndLog(VariableEvent::death(m_nodeIndex)); + } + + return result; } // Used to check the operands of operations to see if they are on @@ -225,7 +260,7 @@ public: } // Called when a VirtualRegister is being spilled to the RegisterFile for the first time. - void spill(DataFormat spillFormat) + void spill(VariableEventStream& stream, VirtualRegister virtualRegister, DataFormat spillFormat) { // We shouldn't be spill values that don't need spilling. ASSERT(!m_canFill); @@ -236,15 +271,21 @@ public: m_registerFormat = DataFormatNone; m_spillFormat = spillFormat; m_canFill = true; + + if (m_bornForOSR) + appendSpill(Spill, stream, virtualRegister); } // Called on values that don't need spilling (constants and values that have // already been spilled), to mark them as no longer being in machine registers. - void setSpilled() + void setSpilled(VariableEventStream& stream, VirtualRegister virtualRegister) { // Should only be called on values that don't need spilling, and are currently in registers. ASSERT(m_canFill && m_registerFormat != DataFormatNone); m_registerFormat = DataFormatNone; + + if (m_bornForOSR) + appendSpill(Spill, stream, virtualRegister); } void killSpilled() @@ -256,46 +297,67 @@ public: // Record that this value is filled into machine registers, // tracking which registers, and what format the value has. #if USE(JSVALUE64) - void fillJSValue(GPRReg gpr, DataFormat format = DataFormatJS) + void fillJSValue(VariableEventStream& stream, GPRReg gpr, DataFormat format = DataFormatJS) { ASSERT(format & DataFormatJS); m_registerFormat = format; u.gpr = gpr; + + if (m_bornForOSR) + appendFill(Fill, stream); } #elif USE(JSVALUE32_64) - void fillJSValue(GPRReg tagGPR, GPRReg payloadGPR, DataFormat format = DataFormatJS) + void fillJSValue(VariableEventStream& stream, GPRReg tagGPR, GPRReg payloadGPR, DataFormat format = DataFormatJS) { ASSERT(format & DataFormatJS); m_registerFormat = format; u.v.tagGPR = tagGPR; // FIXME: for JSValues with known type (boolean, integer, cell etc.) no tagGPR is needed? u.v.payloadGPR = payloadGPR; + + if (m_bornForOSR) + appendFill(Fill, stream); } - void fillCell(GPRReg gpr) + void fillCell(VariableEventStream& stream, GPRReg gpr) { m_registerFormat = DataFormatCell; u.gpr = gpr; + + if (m_bornForOSR) + appendFill(Fill, stream); } #endif - void fillInteger(GPRReg gpr) + void fillInteger(VariableEventStream& stream, GPRReg gpr) { m_registerFormat = DataFormatInteger; u.gpr = gpr; + + if (m_bornForOSR) + appendFill(Fill, stream); } - void fillBoolean(GPRReg gpr) + void fillBoolean(VariableEventStream& stream, GPRReg gpr) { m_registerFormat = DataFormatBoolean; u.gpr = gpr; + + if (m_bornForOSR) + appendFill(Fill, stream); } - void fillDouble(FPRReg fpr) + void fillDouble(VariableEventStream& stream, FPRReg fpr) { ASSERT(fpr != InvalidFPRReg); m_registerFormat = DataFormatDouble; u.fpr = fpr; + + if (m_bornForOSR) + appendFill(Fill, stream); } - void fillStorage(GPRReg gpr) + void fillStorage(VariableEventStream& stream, GPRReg gpr) { m_registerFormat = DataFormatStorage; u.gpr = gpr; + + if (m_bornForOSR) + appendFill(Fill, stream); } bool alive() @@ -304,12 +366,33 @@ public: } private: + void appendFill(VariableEventKind kind, VariableEventStream& stream) + { + if (m_registerFormat == DataFormatDouble) { + stream.appendAndLog(VariableEvent::fillFPR(kind, m_nodeIndex, u.fpr)); + return; + } +#if USE(JSVALUE32_64) + if (m_registerFormat & DataFormatJS) { + stream.appendAndLog(VariableEvent::fillPair(kind, m_nodeIndex, u.v.tagGPR, u.v.payloadGPR)); + return; + } +#endif + stream.appendAndLog(VariableEvent::fillGPR(kind, m_nodeIndex, u.gpr, m_registerFormat)); + } + + void appendSpill(VariableEventKind kind, VariableEventStream& stream, VirtualRegister virtualRegister) + { + stream.appendAndLog(VariableEvent::spill(kind, m_nodeIndex, virtualRegister, m_spillFormat)); + } + // The index of the node whose result is stored in this virtual register. NodeIndex m_nodeIndex; uint32_t m_useCount; DataFormat m_registerFormat; DataFormat m_spillFormat; bool m_canFill; + bool m_bornForOSR; union { GPRReg gpr; FPRReg fpr; diff --git a/Source/JavaScriptCore/dfg/DFGGraph.cpp b/Source/JavaScriptCore/dfg/DFGGraph.cpp index 4689470c8..c7a4d94d2 100644 --- a/Source/JavaScriptCore/dfg/DFGGraph.cpp +++ b/Source/JavaScriptCore/dfg/DFGGraph.cpp @@ -327,14 +327,11 @@ void Graph::dumpBlockHeader(const char* prefix, BlockIndex blockIndex, PhiNodeDu dataLog("\n"); } dataLog("%s Phi Nodes:", prefix); - unsigned count = 0; for (size_t i = 0; i < block->phis.size(); ++i) { NodeIndex phiNodeIndex = block->phis[i]; Node& phiNode = at(phiNodeIndex); if (!phiNode.shouldGenerate() && phiNodeDumpMode == DumpLivePhisOnly) continue; - if (!((++count) % 4)) - dataLog("\n%s ", prefix); dataLog(" @%u->(", phiNodeIndex); if (phiNode.child1()) { dataLog("@%u", phiNode.child1().index()); diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp index 3c85cc77c..497fc346f 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.cpp @@ -88,8 +88,6 @@ void JITCompiler::compileBody(SpeculativeJIT& speculative) breakpoint(); #endif - addPtr(TrustedImm32(1), AbsoluteAddress(codeBlock()->addressOfSpeculativeSuccessCounter())); - bool compiledSpeculative = speculative.compile(); ASSERT_UNUSED(compiledSpeculative, compiledSpeculative); } @@ -174,6 +172,7 @@ void JITCompiler::link(LinkBuffer& linkBuffer) #endif info.patch.dfg.deltaCallToSlowCase = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_slowPathGenerator->label())); info.patch.dfg.deltaCallToDone = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_done)); + info.patch.dfg.deltaCallToStorageLoad = differenceBetweenCodePtr(callReturnLocation, linkBuffer.locationOf(m_propertyAccesses[i].m_propertyStorageLoad)); info.patch.dfg.baseGPR = m_propertyAccesses[i].m_baseGPR; #if USE(JSVALUE64) info.patch.dfg.valueGPR = m_propertyAccesses[i].m_valueGPR; @@ -205,11 +204,14 @@ void JITCompiler::link(LinkBuffer& linkBuffer) codeBlock()->watchpoint(exit.m_watchpointIndex).correctLabels(linkBuffer); } + codeBlock()->minifiedDFG().setOriginalGraphSize(m_graph.size()); codeBlock()->shrinkToFit(CodeBlock::LateShrink); } bool JITCompiler::compile(JITCode& entry) { + SamplingRegion samplingRegion("DFG Backend"); + setStartOfCode(); compileEntry(); SpeculativeJIT speculative(*this); @@ -243,6 +245,8 @@ bool JITCompiler::compile(JITCode& entry) bool JITCompiler::compileFunction(JITCode& entry, MacroAssemblerCodePtr& entryWithArityCheck) { + SamplingRegion samplingRegion("DFG Backend"); + setStartOfCode(); compileEntry(); diff --git a/Source/JavaScriptCore/dfg/DFGJITCompiler.h b/Source/JavaScriptCore/dfg/DFGJITCompiler.h index ed16459cc..24dbbdcd0 100644 --- a/Source/JavaScriptCore/dfg/DFGJITCompiler.h +++ b/Source/JavaScriptCore/dfg/DFGJITCompiler.h @@ -136,6 +136,7 @@ struct PropertyAccessRecord { CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr structureImm, MacroAssembler::PatchableJump structureCheck, + MacroAssembler::ConvertibleLoadLabel propertyStorageLoad, MacroAssembler::DataLabelCompact loadOrStore, SlowPathGenerator* slowPathGenerator, MacroAssembler::Label done, @@ -148,6 +149,7 @@ struct PropertyAccessRecord { CodeOrigin codeOrigin, MacroAssembler::DataLabelPtr structureImm, MacroAssembler::PatchableJump structureCheck, + MacroAssembler::ConvertibleLoadLabel propertyStorageLoad, MacroAssembler::DataLabelCompact tagLoadOrStore, MacroAssembler::DataLabelCompact payloadLoadOrStore, SlowPathGenerator* slowPathGenerator, @@ -161,6 +163,7 @@ struct PropertyAccessRecord { : m_codeOrigin(codeOrigin) , m_structureImm(structureImm) , m_structureCheck(structureCheck) + , m_propertyStorageLoad(propertyStorageLoad) #if USE(JSVALUE64) , m_loadOrStore(loadOrStore) #elif USE(JSVALUE32_64) @@ -182,6 +185,7 @@ struct PropertyAccessRecord { CodeOrigin m_codeOrigin; MacroAssembler::DataLabelPtr m_structureImm; MacroAssembler::PatchableJump m_structureCheck; + MacroAssembler::ConvertibleLoadLabel m_propertyStorageLoad; #if USE(JSVALUE64) MacroAssembler::DataLabelCompact m_loadOrStore; #elif USE(JSVALUE32_64) diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h new file mode 100644 index 000000000..b38ef07ed --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMinifiedGraph.h @@ -0,0 +1,81 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGMinifiedGraph_h +#define DFGMinifiedGraph_h + +#include <wtf/Platform.h> + +#if ENABLE(DFG_JIT) + +#include "DFGMinifiedNode.h" +#include <algorithm> +#include <wtf/StdLibExtras.h> +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +class MinifiedGraph { +public: + MinifiedGraph() { } + + MinifiedNode* at(NodeIndex nodeIndex) + { + if (!m_list.size()) + return 0; + MinifiedNode* entry = + binarySearch<MinifiedNode, NodeIndex, MinifiedNode::getIndex>( + m_list.begin(), m_list.size(), nodeIndex, WTF::KeyMustNotBePresentInArray); + if (entry->index() != nodeIndex) + return 0; + return entry; + } + + void append(const MinifiedNode& node) + { + m_list.append(node); + } + + void prepareAndShrink() + { + std::sort(m_list.begin(), m_list.end(), MinifiedNode::compareByNodeIndex); + m_list.shrinkToFit(); + } + + void setOriginalGraphSize(size_t size) { m_size = size; } + + size_t originalGraphSize() const { return m_size; } + +private: + Vector<MinifiedNode> m_list; + size_t m_size; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGMinifiedGraph_h + diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp b/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp new file mode 100644 index 000000000..6362344fb --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMinifiedNode.cpp @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGMinifiedNode.h" + +#if ENABLE(DFG_JIT) + +#include "DFGNode.h" + +namespace JSC { namespace DFG { + +MinifiedNode MinifiedNode::fromNode(NodeIndex nodeIndex, Node& node) +{ + ASSERT(belongsInMinifiedGraph(node.op())); + MinifiedNode result; + result.m_index = nodeIndex; + result.m_op = node.op(); + if (hasChild(node.op())) + result.m_childOrInfo = node.child1().index(); + else if (hasConstantNumber(node.op())) + result.m_childOrInfo = node.constantNumber(); + else if (hasWeakConstant(node.op())) + result.m_childOrInfo = bitwise_cast<uintptr_t>(node.weakConstant()); + else { + ASSERT(node.op() == PhantomArguments); + result.m_childOrInfo = 0; + } + return result; +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGMinifiedNode.h b/Source/JavaScriptCore/dfg/DFGMinifiedNode.h new file mode 100644 index 000000000..b80cbd777 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGMinifiedNode.h @@ -0,0 +1,129 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGMinifiedNode_h +#define DFGMinifiedNode_h + +#include <wtf/Platform.h> + +#if ENABLE(DFG_JIT) + +#include "DFGCommon.h" +#include "DFGNodeType.h" + +namespace JSC { namespace DFG { + +struct Node; + +inline bool belongsInMinifiedGraph(NodeType type) +{ + switch (type) { + case JSConstant: + case WeakJSConstant: + case ValueToInt32: + case Int32ToDouble: + case UInt32ToNumber: + case DoubleAsInt32: + case PhantomArguments: + return true; + default: + return false; + } +} + +class MinifiedNode { +public: + MinifiedNode() { } + + static MinifiedNode fromNode(NodeIndex, Node&); + + NodeIndex index() const { return m_index; } + NodeType op() const { return m_op; } + + bool hasChild1() const { return hasChild(m_op); } + + NodeIndex child1() const + { + ASSERT(hasChild(m_op)); + return m_childOrInfo; + } + + bool hasConstant() const { return hasConstantNumber() || hasWeakConstant(); } + + bool hasConstantNumber() const { return hasConstantNumber(m_op); } + + unsigned constantNumber() const + { + ASSERT(hasConstantNumber(m_op)); + return m_childOrInfo; + } + + bool hasWeakConstant() const { return hasWeakConstant(m_op); } + + JSCell* weakConstant() const + { + ASSERT(hasWeakConstant(m_op)); + return bitwise_cast<JSCell*>(m_childOrInfo); + } + + static NodeIndex getIndex(MinifiedNode* node) { return node->index(); } + static bool compareByNodeIndex(const MinifiedNode& a, const MinifiedNode& b) + { + return a.m_index < b.m_index; + } + +private: + static bool hasChild(NodeType type) + { + switch (type) { + case ValueToInt32: + case Int32ToDouble: + case UInt32ToNumber: + case DoubleAsInt32: + return true; + default: + return false; + } + } + static bool hasConstantNumber(NodeType type) + { + return type == JSConstant; + } + static bool hasWeakConstant(NodeType type) + { + return type == WeakJSConstant; + } + + NodeIndex m_index; + NodeType m_op; + uintptr_t m_childOrInfo; // Nodes in the minified graph have only one child each. +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGMinifiedNode_h + diff --git a/Source/JavaScriptCore/dfg/DFGNode.h b/Source/JavaScriptCore/dfg/DFGNode.h index 40701c3bd..ae07d5512 100644 --- a/Source/JavaScriptCore/dfg/DFGNode.h +++ b/Source/JavaScriptCore/dfg/DFGNode.h @@ -40,6 +40,7 @@ #include "JSValue.h" #include "Operands.h" #include "SpeculatedType.h" +#include "StructureSet.h" #include "ValueProfile.h" namespace JSC { namespace DFG { @@ -707,7 +708,7 @@ struct Node { ASSERT(m_virtualRegister != InvalidVirtualRegister); return m_virtualRegister; } - + void setVirtualRegister(VirtualRegister virtualRegister) { ASSERT(hasResult()); @@ -731,9 +732,21 @@ struct Node { return m_refCount; } - bool willHaveCodeGen() + bool willHaveCodeGenOrOSR() { - return shouldGenerate() && op() != Phantom && op() != Nop; + switch (op()) { + case SetLocal: + case Int32ToDouble: + case ValueToInt32: + case UInt32ToNumber: + case DoubleAsInt32: + return true; + case Phantom: + case Nop: + return false; + default: + return shouldGenerate(); + } } unsigned refCount() diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp index d0e0de9da..e9b02b2e3 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExit.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExit.cpp @@ -33,17 +33,7 @@ namespace JSC { namespace DFG { -static unsigned computeNumVariablesForCodeOrigin( - CodeBlock* codeBlock, const CodeOrigin& codeOrigin) -{ - if (!codeOrigin.inlineCallFrame) - return codeBlock->m_numCalleeRegisters; - return - codeOrigin.inlineCallFrame->stackOffset + - baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters; -} - -OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned recoveryIndex) +OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAValueProfile valueProfile, MacroAssembler::Jump check, SpeculativeJIT* jit, unsigned streamIndex, unsigned recoveryIndex) : m_jsValueSource(jsValueSource) , m_valueProfile(valueProfile) , m_check(check) @@ -54,29 +44,15 @@ OSRExit::OSRExit(ExitKind kind, JSValueSource jsValueSource, MethodOfGettingAVal , m_watchpointIndex(std::numeric_limits<unsigned>::max()) , m_kind(kind) , m_count(0) - , m_arguments(jit->m_arguments.size()) - , m_variables(computeNumVariablesForCodeOrigin(jit->m_jit.graph().m_profiledBlock, jit->m_codeOriginForOSR)) + , m_streamIndex(streamIndex) , m_lastSetOperand(jit->m_lastSetOperand) { ASSERT(m_codeOrigin.isSet()); - for (unsigned argument = 0; argument < m_arguments.size(); ++argument) - m_arguments[argument] = jit->computeValueRecoveryFor(jit->m_arguments[argument]); - for (unsigned variable = 0; variable < m_variables.size(); ++variable) - m_variables[variable] = jit->computeValueRecoveryFor(jit->m_variables[variable]); -} - -void OSRExit::dump(FILE* out) const -{ - for (unsigned argument = 0; argument < m_arguments.size(); ++argument) - m_arguments[argument].dump(out); - fprintf(out, " : "); - for (unsigned variable = 0; variable < m_variables.size(); ++variable) - m_variables[variable].dump(out); } bool OSRExit::considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock) { - if (static_cast<double>(m_count) / dfgCodeBlock->speculativeFailCounter() <= Options::osrExitProminenceForFrequentExitSite) + if (static_cast<double>(m_count) / dfgCodeBlock->osrExitCounter() <= Options::osrExitProminenceForFrequentExitSite()) return false; FrequentExitSite exitSite; diff --git a/Source/JavaScriptCore/dfg/DFGOSRExit.h b/Source/JavaScriptCore/dfg/DFGOSRExit.h index 683f260f1..cd2434c11 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExit.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExit.h @@ -35,6 +35,7 @@ #include "DFGCorrectableJumpPoint.h" #include "DFGExitProfile.h" #include "DFGGPRInfo.h" +#include "DFGValueRecoveryOverride.h" #include "MacroAssembler.h" #include "MethodOfGettingAValueProfile.h" #include "Operands.h" @@ -83,7 +84,7 @@ private: // This structure describes how to exit the speculative path by // going into baseline code. struct OSRExit { - OSRExit(ExitKind, JSValueSource, MethodOfGettingAValueProfile, MacroAssembler::Jump, SpeculativeJIT*, unsigned recoveryIndex = 0); + OSRExit(ExitKind, JSValueSource, MethodOfGettingAValueProfile, MacroAssembler::Jump, SpeculativeJIT*, unsigned streamIndex, unsigned recoveryIndex = 0); MacroAssemblerCodeRef m_code; @@ -101,38 +102,6 @@ struct OSRExit { ExitKind m_kind; uint32_t m_count; - // Convenient way of iterating over ValueRecoveries while being - // generic over argument versus variable. - int numberOfRecoveries() const { return m_arguments.size() + m_variables.size(); } - const ValueRecovery& valueRecovery(int index) const - { - if (index < (int)m_arguments.size()) - return m_arguments[index]; - return m_variables[index - m_arguments.size()]; - } - ValueRecovery& valueRecoveryForOperand(int operand) - { - if (operandIsArgument(operand)) - return m_arguments[operandToArgument(operand)]; - return m_variables[operand]; - } - bool isArgument(int index) const { return index < (int)m_arguments.size(); } - bool isVariable(int index) const { return !isArgument(index); } - int argumentForIndex(int index) const - { - return index; - } - int variableForIndex(int index) const - { - return index - m_arguments.size(); - } - int operandForIndex(int index) const - { - if (index < (int)m_arguments.size()) - return operandToArgument(index); - return index - m_arguments.size(); - } - bool considerAddingAsFrequentExitSite(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock) { if (!m_count || !exitKindIsCountable(m_kind)) @@ -140,11 +109,10 @@ struct OSRExit { return considerAddingAsFrequentExitSiteSlow(dfgCodeBlock, profiledCodeBlock); } - void dump(FILE* out) const; - - Vector<ValueRecovery, 0> m_arguments; - Vector<ValueRecovery, 0> m_variables; + unsigned m_streamIndex; int m_lastSetOperand; + + RefPtr<ValueRecoveryOverride> m_valueRecoveryOverride; private: bool considerAddingAsFrequentExitSiteSlow(CodeBlock* dfgCodeBlock, CodeBlock* profiledCodeBlock); diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp index e617b5479..2ce1c887b 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.cpp @@ -29,6 +29,7 @@ #if ENABLE(DFG_JIT) #include "CallFrame.h" +#include "DFGCommon.h" #include "LinkBuffer.h" #include "RepatchBuffer.h" @@ -38,6 +39,8 @@ extern "C" { void compileOSRExit(ExecState* exec) { + SamplingRegion samplingRegion("DFG OSR Exit Compilation"); + CodeBlock* codeBlock = exec->codeBlock(); ASSERT(codeBlock); @@ -63,12 +66,22 @@ void compileOSRExit(ExecState* exec) ->jitCompile(exec); } + // Compute the value recoveries. + Operands<ValueRecovery> operands; + codeBlock->variableEventStream().reconstruct(codeBlock, exit.m_codeOrigin, codeBlock->minifiedDFG(), exit.m_streamIndex, operands); + + // There may be an override, for forward speculations. + if (!!exit.m_valueRecoveryOverride) { + operands.setOperand( + exit.m_valueRecoveryOverride->operand, exit.m_valueRecoveryOverride->recovery); + } + SpeculationRecovery* recovery = 0; if (exit.m_recoveryIndex) recovery = &codeBlock->speculationRecovery(exit.m_recoveryIndex - 1); #if DFG_ENABLE(DEBUG_VERBOSE) - dataLog("Generating OSR exit #%u (bc#%u, @%u, %s) for code block %p.\n", exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, exitKindToString(exit.m_kind), codeBlock); + dataLog("Generating OSR exit #%u (seq#%u, bc#%u, @%u, %s) for code block %p.\n", exitIndex, exit.m_streamIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, exitKindToString(exit.m_kind), codeBlock); #endif { @@ -76,10 +89,11 @@ void compileOSRExit(ExecState* exec) OSRExitCompiler exitCompiler(jit); jit.jitAssertHasValidCallFrame(); - exitCompiler.compileExit(exit, recovery); + exitCompiler.compileExit(exit, operands, recovery); LinkBuffer patchBuffer(*globalData, &jit, codeBlock); - exit.m_code = FINALIZE_CODE( + exit.m_code = FINALIZE_CODE_IF( + shouldShowDisassembly(), patchBuffer, ("DFG OSR exit #%u (bc#%u, @%u, %s) from CodeBlock %p", exitIndex, exit.m_codeOrigin.bytecodeIndex, exit.m_nodeIndex, @@ -102,42 +116,14 @@ void OSRExitCompiler::handleExitCounts(const OSRExit& exit) m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.codeBlock()), GPRInfo::regT0); - AssemblyHelpers::JumpList tooFewFails; + AssemblyHelpers::Jump tooFewFails; - if (exit.m_kind == InadequateCoverage) { - // Proceed based on the assumption that we can profitably optimize this code once - // it has executed enough times. - - m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter()), GPRInfo::regT2); - m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); - m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); - m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); - m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfForcedOSRExitCounter())); - m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); - - m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0); - - tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(Options::forcedOSRExitCountForReoptimization))); - - } else { - // Proceed based on the assumption that we can handle these exits so long as they - // don't get too frequent. - - m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter()), GPRInfo::regT2); - m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter()), GPRInfo::regT1); - m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); - m_jit.add32(AssemblyHelpers::TrustedImm32(-1), GPRInfo::regT1); - m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeFailCounter())); - m_jit.store32(GPRInfo::regT1, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfSpeculativeSuccessCounter())); - - m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0); + m_jit.load32(AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter()), GPRInfo::regT2); + m_jit.add32(AssemblyHelpers::TrustedImm32(1), GPRInfo::regT2); + m_jit.store32(GPRInfo::regT2, AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfOSRExitCounter())); + m_jit.move(AssemblyHelpers::TrustedImmPtr(m_jit.baselineCodeBlock()), GPRInfo::regT0); + tooFewFails = m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->exitCountThresholdForReoptimization())); - tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, AssemblyHelpers::TrustedImm32(m_jit.codeBlock()->largeFailCountThreshold()))); - m_jit.mul32(AssemblyHelpers::TrustedImm32(Options::desiredSpeculativeSuccessFailRatio), GPRInfo::regT2, GPRInfo::regT2); - - tooFewFails.append(m_jit.branch32(AssemblyHelpers::BelowOrEqual, GPRInfo::regT2, GPRInfo::regT1)); - } - // Reoptimize as soon as possible. #if !NUMBER_OF_ARGUMENT_REGISTERS m_jit.poke(GPRInfo::regT0); @@ -157,6 +143,7 @@ void OSRExitCompiler::handleExitCounts(const OSRExit& exit) m_jit.baselineCodeBlock()->counterValueForOptimizeAfterLongWarmUp(), m_jit.baselineCodeBlock()); m_jit.store32(AssemblyHelpers::TrustedImm32(-targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecuteCounter())); + targetValue = ExecutionCounter::clippedThreshold(m_jit.codeBlock()->globalObject(), targetValue); m_jit.store32(AssemblyHelpers::TrustedImm32(targetValue), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionActiveThreshold())); m_jit.store32(AssemblyHelpers::TrustedImm32(ExecutionCounter::formattedTotalCount(targetValue)), AssemblyHelpers::Address(GPRInfo::regT0, CodeBlock::offsetOfJITExecutionTotalCount())); diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h index ae29a92d5..a2be5b849 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler.h @@ -48,7 +48,7 @@ public: { } - void compileExit(const OSRExit&, SpeculationRecovery*); + void compileExit(const OSRExit&, const Operands<ValueRecovery>&, SpeculationRecovery*); private: #if !ASSERT_DISABLED diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp index 09912b3e5..6bc136da4 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler32_64.cpp @@ -29,10 +29,11 @@ #if ENABLE(DFG_JIT) && USE(JSVALUE32_64) #include "DFGOperations.h" +#include <wtf/DataLog.h> namespace JSC { namespace DFG { -void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery) +void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery) { // 1) Pro-forma stuff. #if DFG_ENABLE(DEBUG_VERBOSE) @@ -44,7 +45,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get()); } dataLog(") at JIT offset 0x%x ", m_jit.debugOffset()); - exit.dump(WTF::dataFile()); + dumpOperands(operands, WTF::dataFile()); #endif #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE) SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; @@ -113,7 +114,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // GPRInfo::numberOfRegisters of them. Also see if there are any constants, // any undefined slots, any FPR slots, and any unboxed ints. - Vector<bool> poisonedVirtualRegisters(exit.m_variables.size()); + Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals()); for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i) poisonedVirtualRegisters[i] = false; @@ -133,8 +134,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco bool haveUndefined = false; bool haveArguments = false; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: case Int32DisplacedInRegisterFile: @@ -150,8 +151,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // to ensure this happens efficiently. Note that we expect this case // to be rare, so the handling of it is optimized for the cases in // which it does not happen. - if (recovery.virtualRegister() < (int)exit.m_variables.size()) { - switch (exit.m_variables[recovery.virtualRegister()].technique()) { + if (recovery.virtualRegister() < (int)operands.numberOfLocals()) { + switch (operands.local(recovery.virtualRegister()).technique()) { case InGPR: case UnboxedInt32InGPR: case UnboxedBooleanInGPR: @@ -214,19 +215,19 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 5) Perform all reboxing of integers and cells, except for those in registers. if (haveUnboxedInt32InRegisterFile || haveUnboxedCellInRegisterFile || haveUnboxedBooleanInRegisterFile) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case AlreadyInRegisterFileAsUnboxedInt32: - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index)))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; case AlreadyInRegisterFileAsUnboxedCell: - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index)))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; case AlreadyInRegisterFileAsUnboxedBoolean: - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index)))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; default: @@ -239,19 +240,19 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // Note that GPRs do not have a fast change (like haveFPRs) because we expect that // most OSR failure points will have at least one GPR that needs to be dumped. - initializePoisoned(exit.m_variables.size()); + initializePoisoned(operands.numberOfLocals()); unsigned currentPoisonIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); - int operand = exit.operandForIndex(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; + int operand = operands.operandForIndex(index); switch (recovery.technique()) { case InGPR: case UnboxedInt32InGPR: case UnboxedBooleanInGPR: - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } else { uint32_t tag = JSValue::EmptyValueTag; @@ -266,10 +267,10 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } break; case InPair: - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { m_jit.store32(recovery.tagGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); m_jit.store32(recovery.payloadGPR(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } else { m_jit.store32(recovery.tagGPR(), AssemblyHelpers::tagFor((VirtualRegister)operand)); @@ -291,7 +292,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco m_jit.convertInt32ToDouble(recovery.gpr(), FPRInfo::fpRegT0); m_jit.addDouble(AssemblyHelpers::AbsoluteAddress(&AssemblyHelpers::twoToThe32), FPRInfo::fpRegT0); - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { m_jit.move(AssemblyHelpers::TrustedImmPtr(scratchDataBuffer + currentPoisonIndex), addressGPR); m_jit.storeDouble(FPRInfo::fpRegT0, addressGPR); } else @@ -301,7 +302,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco positive.link(&m_jit); - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { m_jit.store32(recovery.gpr(), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), reinterpret_cast<char*>(scratchDataBuffer + currentPoisonIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); } else { @@ -315,8 +316,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco m_jit.loadDouble(addressGPR, FPRInfo::fpRegT0); m_jit.loadPtr(myScratch, addressGPR); - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } break; @@ -329,16 +330,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 7) Dump all doubles into the register file, or to the scratch storage if the // destination virtual register is poisoned. if (haveFPRs) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != InFPR) continue; - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { m_jit.storeDouble(recovery.fpr(), scratchDataBuffer + currentPoisonIndex); - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } else - m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.storeDouble(recovery.fpr(), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); } } @@ -356,8 +357,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // that is far from guaranteed. unsigned displacementIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); @@ -381,15 +382,15 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } displacementIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: case Int32DisplacedInRegisterFile: case CellDisplacedInRegisterFile: case BooleanDisplacedInRegisterFile: - m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); - m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); + m_jit.store32(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); break; default: break; @@ -414,8 +415,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // to their new (old JIT) locations. unsigned scratchIndex = numberOfPoisonedVirtualRegisters; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: m_jit.load32(AssemblyHelpers::payloadFor(recovery.virtualRegister()), GPRInfo::regT0); @@ -436,30 +437,30 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } scratchIndex = numberOfPoisonedVirtualRegisters; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag), GPRInfo::regT1); - m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); - m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); + m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); scratchIndex++; break; case Int32DisplacedInRegisterFile: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); - m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::Int32Tag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); + m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); break; case CellDisplacedInRegisterFile: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); - m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::CellTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); + m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); break; case BooleanDisplacedInRegisterFile: m_jit.load32(reinterpret_cast<char*>(scratchDataBuffer + scratchIndex++) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload), GPRInfo::regT0); - m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); - m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store32(AssemblyHelpers::TrustedImm32(JSValue::BooleanTag), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); + m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); break; default: break; @@ -473,11 +474,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 9) Dump all poisoned virtual registers. if (numberOfPoisonedVirtualRegisters) { - for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) { + for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) { if (!poisonedVirtualRegisters[virtualRegister]) continue; - const ValueRecovery& recovery = exit.m_variables[virtualRegister]; + const ValueRecovery& recovery = operands.local(virtualRegister); switch (recovery.technique()) { case InGPR: case UnboxedInt32InGPR: @@ -519,16 +520,16 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco m_jit.move(AssemblyHelpers::TrustedImm32(jsUndefined().tag()), GPRInfo::regT1); } - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != Constant) continue; if (recovery.constant().isUndefined()) { - m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); - m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store32(GPRInfo::regT0, AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); + m_jit.store32(GPRInfo::regT1, AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); } else { - m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)exit.operandForIndex(index))); - m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().payload()), AssemblyHelpers::payloadFor((VirtualRegister)operands.operandForIndex(index))); + m_jit.store32(AssemblyHelpers::TrustedImm32(recovery.constant().tag()), AssemblyHelpers::tagFor((VirtualRegister)operands.operandForIndex(index))); } } } @@ -611,11 +612,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // registers. if (haveArguments) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != ArgumentsThatWereNotCreated) continue; - int operand = exit.operandForIndex(index); + int operand = operands.operandForIndex(index); // Find the right inline call frame. InlineCallFrame* inlineCallFrame = 0; for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; diff --git a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp index 33ba69a35..2f38ba79b 100644 --- a/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp +++ b/Source/JavaScriptCore/dfg/DFGOSRExitCompiler64.cpp @@ -29,10 +29,11 @@ #if ENABLE(DFG_JIT) && USE(JSVALUE64) #include "DFGOperations.h" +#include <wtf/DataLog.h> namespace JSC { namespace DFG { -void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* recovery) +void OSRExitCompiler::compileExit(const OSRExit& exit, const Operands<ValueRecovery>& operands, SpeculationRecovery* recovery) { // 1) Pro-forma stuff. #if DFG_ENABLE(DEBUG_VERBOSE) @@ -44,7 +45,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco dataLog(" -> %p ", codeOrigin.inlineCallFrame->executable.get()); } dataLog(") "); - exit.dump(WTF::dataFile()); + dumpOperands(operands, WTF::dataFile()); #endif #if DFG_ENABLE(VERBOSE_SPECULATION_FAILURE) SpeculationFailureDebugInfo* debugInfo = new SpeculationFailureDebugInfo; @@ -110,7 +111,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // GPRInfo::numberOfRegisters of them. Also see if there are any constants, // any undefined slots, any FPR slots, and any unboxed ints. - Vector<bool> poisonedVirtualRegisters(exit.m_variables.size()); + Vector<bool> poisonedVirtualRegisters(operands.numberOfLocals()); for (unsigned i = 0; i < poisonedVirtualRegisters.size(); ++i) poisonedVirtualRegisters[i] = false; @@ -129,8 +130,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco bool haveUInt32s = false; bool haveArguments = false; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case Int32DisplacedInRegisterFile: case DoubleDisplacedInRegisterFile: @@ -145,8 +146,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // to ensure this happens efficiently. Note that we expect this case // to be rare, so the handling of it is optimized for the cases in // which it does not happen. - if (recovery.virtualRegister() < (int)exit.m_variables.size()) { - switch (exit.m_variables[recovery.virtualRegister()].technique()) { + if (recovery.virtualRegister() < (int)operands.numberOfLocals()) { + switch (operands.local(recovery.virtualRegister()).technique()) { case InGPR: case UnboxedInt32InGPR: case UInt32InGPR: @@ -224,8 +225,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 5) Perform all reboxing of integers. if (haveUnboxedInt32s || haveUInt32s) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case UnboxedInt32InGPR: if (recovery.gpr() != alreadyBoxed) @@ -233,7 +234,7 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco break; case AlreadyInRegisterFileAsUnboxedInt32: - m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(exit.operandForIndex(index)))); + m_jit.store32(AssemblyHelpers::TrustedImm32(static_cast<uint32_t>(TagTypeNumber >> 32)), AssemblyHelpers::tagFor(static_cast<VirtualRegister>(operands.operandForIndex(index)))); break; case UInt32InGPR: { @@ -284,19 +285,19 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // Note that GPRs do not have a fast change (like haveFPRs) because we expect that // most OSR failure points will have at least one GPR that needs to be dumped. - initializePoisoned(exit.m_variables.size()); + initializePoisoned(operands.numberOfLocals()); unsigned currentPoisonIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); - int operand = exit.operandForIndex(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; + int operand = operands.operandForIndex(index); switch (recovery.technique()) { case InGPR: case UnboxedInt32InGPR: case UInt32InGPR: - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { m_jit.storePtr(recovery.gpr(), scratchDataBuffer + currentPoisonIndex); - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } else m_jit.storePtr(recovery.gpr(), AssemblyHelpers::addressFor((VirtualRegister)operand)); @@ -311,8 +312,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco if (haveFPRs) { // 7) Box all doubles (relies on there being more GPRs than FPRs) - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != InFPR) continue; FPRReg fpr = recovery.fpr(); @@ -323,17 +324,17 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 8) Dump all doubles into the register file, or to the scratch storage if // the destination virtual register is poisoned. - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != InFPR) continue; GPRReg gpr = GPRInfo::toRegister(FPRInfo::toIndex(recovery.fpr())); - if (exit.isVariable(index) && poisonedVirtualRegisters[exit.variableForIndex(index)]) { + if (operands.isVariable(index) && poisonedVirtualRegisters[operands.variableForIndex(index)]) { m_jit.storePtr(gpr, scratchDataBuffer + currentPoisonIndex); - m_poisonScratchIndices[exit.variableForIndex(index)] = currentPoisonIndex; + m_poisonScratchIndices[operands.variableForIndex(index)] = currentPoisonIndex; currentPoisonIndex++; } else - m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.storePtr(gpr, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); } } @@ -341,13 +342,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 9) Box all unboxed doubles in the register file. if (haveUnboxedDoubles) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != AlreadyInRegisterFileAsUnboxedDouble) continue; - m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index)), FPRInfo::fpRegT0); + m_jit.loadDouble(AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index)), FPRInfo::fpRegT0); m_jit.boxDouble(FPRInfo::fpRegT0, GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); } } @@ -363,8 +364,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // that is far from guaranteed. unsigned displacementIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: m_jit.loadPtr(AssemblyHelpers::addressFor(recovery.virtualRegister()), GPRInfo::toRegister(displacementIndex++)); @@ -390,13 +391,13 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } displacementIndex = 0; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: case Int32DisplacedInRegisterFile: case DoubleDisplacedInRegisterFile: - m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.storePtr(GPRInfo::toRegister(displacementIndex++), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); break; default: @@ -422,8 +423,8 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // to their new (old JIT) locations. unsigned scratchIndex = numberOfPoisonedVirtualRegisters; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: @@ -451,14 +452,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco } scratchIndex = numberOfPoisonedVirtualRegisters; - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; switch (recovery.technique()) { case DisplacedInRegisterFile: case Int32DisplacedInRegisterFile: case DoubleDisplacedInRegisterFile: m_jit.loadPtr(scratchDataBuffer + scratchIndex++, GPRInfo::regT0); - m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); break; default: @@ -473,11 +474,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // 11) Dump all poisoned virtual registers. if (numberOfPoisonedVirtualRegisters) { - for (int virtualRegister = 0; virtualRegister < (int)exit.m_variables.size(); ++virtualRegister) { + for (int virtualRegister = 0; virtualRegister < (int)operands.numberOfLocals(); ++virtualRegister) { if (!poisonedVirtualRegisters[virtualRegister]) continue; - const ValueRecovery& recovery = exit.m_variables[virtualRegister]; + const ValueRecovery& recovery = operands.local(virtualRegister); switch (recovery.technique()) { case InGPR: case UnboxedInt32InGPR: @@ -500,14 +501,14 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco if (haveUndefined) m_jit.move(AssemblyHelpers::TrustedImmPtr(JSValue::encode(jsUndefined())), GPRInfo::regT0); - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != Constant) continue; if (recovery.constant().isUndefined()) - m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.storePtr(GPRInfo::regT0, AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); else - m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)exit.operandForIndex(index))); + m_jit.storePtr(AssemblyHelpers::TrustedImmPtr(JSValue::encode(recovery.constant())), AssemblyHelpers::addressFor((VirtualRegister)operands.operandForIndex(index))); } } @@ -586,11 +587,11 @@ void OSRExitCompiler::compileExit(const OSRExit& exit, SpeculationRecovery* reco // registers. if (haveArguments) { - for (int index = 0; index < exit.numberOfRecoveries(); ++index) { - const ValueRecovery& recovery = exit.valueRecovery(index); + for (size_t index = 0; index < operands.size(); ++index) { + const ValueRecovery& recovery = operands[index]; if (recovery.technique() != ArgumentsThatWereNotCreated) continue; - int operand = exit.operandForIndex(index); + int operand = operands.operandForIndex(index); // Find the right inline call frame. InlineCallFrame* inlineCallFrame = 0; for (InlineCallFrame* current = exit.m_codeOrigin.inlineCallFrame; diff --git a/Source/JavaScriptCore/dfg/DFGOperations.cpp b/Source/JavaScriptCore/dfg/DFGOperations.cpp index 11362f432..5d6575a6f 100644 --- a/Source/JavaScriptCore/dfg/DFGOperations.cpp +++ b/Source/JavaScriptCore/dfg/DFGOperations.cpp @@ -140,6 +140,62 @@ "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ ); +#elif COMPILER(GCC) && CPU(ARM_TRADITIONAL) + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + "mov a2, lr" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_ECI(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + "mov a4, lr" "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +// EncodedJSValue in JSVALUE32_64 is a 64-bit integer. When being compiled in ARM EABI, it must be aligned even-numbered register (r0, r2 or [sp]). +// As a result, return address will be at a 4-byte further location in the following cases. +#if COMPILER_SUPPORTS(EABI) && CPU(ARM) +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #4]" +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJCI "str lr, [sp, #8]" +#else +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJI "str lr, [sp, #0]" +#define INSTRUCTION_STORE_RETURN_ADDRESS_EJCI "str lr, [sp, #4]" +#endif + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJI(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + INSTRUCTION_STORE_RETURN_ADDRESS_EJI "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + +#define FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_EJCI(function) \ + asm ( \ + ".text" "\n" \ + ".globl " SYMBOL_STRING(function) "\n" \ + HIDE_SYMBOL(function) "\n" \ + INLINE_ARM_FUNCTION(function) \ + SYMBOL_STRING(function) ":" "\n" \ + INSTRUCTION_STORE_RETURN_ADDRESS_EJCI "\n" \ + "b " LOCAL_REFERENCE(function) "WithReturnAddress" "\n" \ + ); + #endif #define P_FUNCTION_WRAPPER_WITH_RETURN_ADDRESS_E(function) \ @@ -1250,15 +1306,13 @@ void DFG_OPERATION debugOperationPrintSpeculationFailure(ExecState* exec, void* CodeBlock* alternative = codeBlock->alternative(); dataLog("Speculation failure in %p at @%u with executeCounter = %s, " "reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, " - "success/fail %u/(%u+%u)\n", + "osrExitCounter = %u\n", codeBlock, debugInfo->nodeIndex, alternative ? alternative->jitExecuteCounter().status() : 0, alternative ? alternative->reoptimizationRetryCounter() : 0, alternative ? alternative->optimizationDelayCounter() : 0, - codeBlock->speculativeSuccessCounter(), - codeBlock->speculativeFailCounter(), - codeBlock->forcedOSRExitCounter()); + codeBlock->osrExitCounter()); } #endif @@ -1324,6 +1378,17 @@ SYMBOL_STRING(getHostCallReturnValue) ":" "\n" "mov r0, r5" "\n" "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" ); +#elif CPU(ARM_TRADITIONAL) +asm ( +".text" "\n" +".globl " SYMBOL_STRING(getHostCallReturnValue) "\n" +HIDE_SYMBOL(getHostCallReturnValue) "\n" +INLINE_ARM_FUNCTION(getHostCallReturnValue) +SYMBOL_STRING(getHostCallReturnValue) ":" "\n" + "ldr r5, [r5, #-40]" "\n" + "mov r0, r5" "\n" + "b " LOCAL_REFERENCE(getHostCallReturnValueWithExecState) "\n" +); #endif extern "C" EncodedJSValue HOST_CALL_RETURN_VALUE_OPTION getHostCallReturnValueWithExecState(ExecState* exec) diff --git a/Source/JavaScriptCore/dfg/DFGPhase.h b/Source/JavaScriptCore/dfg/DFGPhase.h index 53055a215..80fd6914a 100644 --- a/Source/JavaScriptCore/dfg/DFGPhase.h +++ b/Source/JavaScriptCore/dfg/DFGPhase.h @@ -49,6 +49,8 @@ public: endPhase(); } + const char* name() const { return m_name; } + // Each phase must have a run() method. protected: @@ -76,17 +78,28 @@ private: }; template<typename PhaseType> +bool runAndLog(PhaseType& phase) +{ + bool result = phase.run(); +#if DFG_ENABLE(DEBUG_VERBOSE) + if (result) + dataLog("Phase %s changed the IR.\n", phase.name()); +#endif + return result; +} + +template<typename PhaseType> bool runPhase(Graph& graph) { PhaseType phase(graph); - return phase.run(); + return runAndLog(phase); } template<typename PhaseType, typename ArgumentType1> bool runPhase(Graph& graph, ArgumentType1 arg1) { PhaseType phase(graph, arg1); - return phase.run(); + return runAndLog(phase); } } } // namespace JSC::DFG diff --git a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp index 0bd81ec44..320eb6cb6 100644 --- a/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGPredictionPropagationPhase.cpp @@ -908,6 +908,7 @@ private: bool performPredictionPropagation(Graph& graph) { + SamplingRegion samplingRegion("DFG Prediction Propagation Phase"); return runPhase<PredictionPropagationPhase>(graph); } diff --git a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp index 5453469fe..32e4ef157 100644 --- a/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGRedundantPhiEliminationPhase.cpp @@ -169,6 +169,7 @@ private: bool performRedundantPhiElimination(Graph& graph) { + SamplingRegion samplingRegion("DFG Redundant Phi Elimination Phase"); return runPhase<RedundantPhiEliminationPhase>(graph); } diff --git a/Source/JavaScriptCore/dfg/DFGRepatch.cpp b/Source/JavaScriptCore/dfg/DFGRepatch.cpp index 9c3391be5..752316f9c 100644 --- a/Source/JavaScriptCore/dfg/DFGRepatch.cpp +++ b/Source/JavaScriptCore/dfg/DFGRepatch.cpp @@ -30,6 +30,7 @@ #include "DFGCCallHelpers.h" #include "DFGSpeculativeJIT.h" +#include "GCAwareJITStubRoutine.h" #include "LinkBuffer.h" #include "Operations.h" #include "PolymorphicPutByIdList.h" @@ -43,7 +44,7 @@ static void dfgRepatchCall(CodeBlock* codeblock, CodeLocationCall call, Function repatchBuffer.relink(call, newCalleeFunction); } -static void dfgRepatchByIdSelfAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, size_t offset, const FunctionPtr &slowPathFunction, bool compact) +static void dfgRepatchByIdSelfAccess(CodeBlock* codeBlock, StructureStubInfo& stubInfo, Structure* structure, PropertyOffset offset, const FunctionPtr &slowPathFunction, bool compact) { RepatchBuffer repatchBuffer(codeBlock); @@ -52,18 +53,19 @@ static void dfgRepatchByIdSelfAccess(CodeBlock* codeBlock, StructureStubInfo& st // Patch the structure check & the offset of the load. repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelPtrAtOffset(-(intptr_t)stubInfo.patch.dfg.deltaCheckImmToCall), structure); + repatchBuffer.setLoadInstructionIsActive(stubInfo.callReturnLocation.convertibleLoadAtOffset(stubInfo.patch.dfg.deltaCallToStorageLoad), isOutOfLineOffset(offset)); #if USE(JSVALUE64) if (compact) - repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), sizeof(JSValue) * offset); + repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset)); else - repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), sizeof(JSValue) * offset); + repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToLoadOrStore), offsetRelativeToPatchedStorage(offset)); #elif USE(JSVALUE32_64) if (compact) { - repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); - repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); + repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); + repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabelCompactAtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); } else { - repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); - repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), sizeof(JSValue) * offset + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); + repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToTagLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)); + repatchBuffer.repatch(stubInfo.callReturnLocation.dataLabel32AtOffset(stubInfo.patch.dfg.deltaCallToPayloadLoadOrStore), offsetRelativeToPatchedStorage(offset) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)); } #endif } @@ -105,7 +107,7 @@ static void linkRestoreScratch(LinkBuffer& patchBuffer, bool needToRestoreScratc linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase)); } -static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, size_t offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, MacroAssemblerCodeRef& stubRoutine) +static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stubInfo, StructureChain* chain, size_t count, PropertyOffset offset, Structure* structure, CodeLocationLabel successLabel, CodeLocationLabel slowCaseLabel, RefPtr<JITStubRoutine>& stubRoutine) { JSGlobalData* globalData = &exec->globalData(); @@ -139,13 +141,23 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu currStructure = it->get(); } - stubJit.loadPtr(protoObject->addressOfPropertyStorage(), resultGPR); + if (isInlineOffset(offset)) { #if USE(JSVALUE64) - stubJit.loadPtr(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>)), resultGPR); + stubJit.loadPtr(protoObject->locationForOffset(offset), resultGPR); #elif USE(JSVALUE32_64) - stubJit.load32(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); - stubJit.load32(MacroAssembler::Address(resultGPR, offset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); + stubJit.move(MacroAssembler::TrustedImmPtr(protoObject->locationForOffset(offset)), resultGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); #endif + } else { + stubJit.loadPtr(protoObject->addressOfOutOfLineStorage(), resultGPR); +#if USE(JSVALUE64) + stubJit.loadPtr(MacroAssembler::Address(resultGPR, offsetInOutOfLineStorage(offset) * sizeof(WriteBarrier<Unknown>)), resultGPR); +#elif USE(JSVALUE32_64) + stubJit.load32(MacroAssembler::Address(resultGPR, offsetInOutOfLineStorage(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, offsetInOutOfLineStorage(offset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); +#endif + } MacroAssembler::Jump success, fail; @@ -155,7 +167,7 @@ static void generateProtoChainAccessStub(ExecState* exec, StructureStubInfo& stu linkRestoreScratch(patchBuffer, needToRestoreScratch, success, fail, failureCases, successLabel, slowCaseLabel); - stubRoutine = FINALIZE_CODE( + stubRoutine = FINALIZE_CODE_FOR_STUB( patchBuffer, ("DFG prototype chain access stub for CodeBlock %p, return point %p", exec->codeBlock(), successLabel.executableAddress())); @@ -209,14 +221,14 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier linkRestoreScratch(patchBuffer, needToRestoreScratch, stubInfo, success, fail, failureCases); - stubInfo.stubRoutine = FINALIZE_CODE( + stubInfo.stubRoutine = FINALIZE_CODE_FOR_STUB( patchBuffer, ("DFG GetById array length stub for CodeBlock %p, return point %p", exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset( stubInfo.patch.dfg.deltaCallToDone).executableAddress())); RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code())); + repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine->code().code())); repatchBuffer.relink(stubInfo.callReturnLocation, operationGetById); return true; @@ -253,7 +265,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier if (slot.cachedPropertyType() != PropertySlot::Value) return false; - size_t offset = slot.cachedOffset(); + PropertyOffset offset = slot.cachedOffset(); size_t count = normalizePrototypeChain(exec, baseValue, slot.slotBase(), propertyName, offset); if (!count) return false; @@ -265,7 +277,7 @@ static bool tryCacheGetByID(ExecState* exec, JSValue baseValue, const Identifier generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone), stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase), stubInfo.stubRoutine); RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code())); + repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine->code().code())); repatchBuffer.relink(stubInfo.callReturnLocation, operationGetByIdProtoBuildList); stubInfo.initGetByIdChain(*globalData, codeBlock->ownerExecutable(), structure, prototypeChain, count, true); @@ -312,7 +324,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi listIndex = 0; } else if (stubInfo.accessType == access_get_by_id_self) { ASSERT(!stubInfo.stubRoutine); - polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), MacroAssemblerCodeRef::createSelfManagedCodeRef(stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase)), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true); + polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), JITStubRoutine::createSelfManagedRoutine(stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase)), stubInfo.u.getByIdSelf.baseObjectStructure.get(), true); stubInfo.initGetByIdSelfList(polymorphicStructureList, 1); listIndex = 1; } else { @@ -349,12 +361,20 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi || slot.cachedPropertyType() == PropertySlot::Custom) { if (slot.cachedPropertyType() == PropertySlot::Getter) { ASSERT(baseGPR != scratchGPR); - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); + if (isInlineOffset(slot.cachedOffset())) { #if USE(JSVALUE64) - stubJit.loadPtr(MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue)), scratchGPR); -#elif USE(JSVALUE32_64) - stubJit.load32(MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), scratchGPR); + stubJit.loadPtr(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); +#else + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); #endif + } else { + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR); +#if USE(JSVALUE64) + stubJit.loadPtr(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); +#else + stubJit.load32(MacroAssembler::Address(scratchGPR, offsetRelativeToBase(slot.cachedOffset())), scratchGPR); +#endif + } stubJit.setupArgumentsWithExecState(baseGPR, scratchGPR); operationFunction = operationCallGetter; } else { @@ -385,13 +405,27 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi handlerCall = stubJit.call(); stubJit.jump(GPRInfo::returnValueGPR2); } else { - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); + if (isInlineOffset(slot.cachedOffset())) { #if USE(JSVALUE64) - stubJit.loadPtr(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue)), resultGPR); -#elif USE(JSVALUE32_64) - stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); - stubJit.load32(MacroAssembler::Address(resultGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); + stubJit.loadPtr(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR); +#else + if (baseGPR == resultTagGPR) { + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + } else { + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(baseGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); + } +#endif + } else { + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), resultGPR); +#if USE(JSVALUE64) + stubJit.loadPtr(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset())), resultGPR); +#else + stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); + stubJit.load32(MacroAssembler::Address(resultGPR, offsetRelativeToBase(slot.cachedOffset()) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultGPR); #endif + } success = stubJit.jump(); isDirect = true; } @@ -400,7 +434,7 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi CodeLocationLabel lastProtoBegin; if (listIndex) - lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code()); + lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code()); else lastProtoBegin = stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToSlowCase); ASSERT(!!lastProtoBegin); @@ -412,17 +446,23 @@ static bool tryBuildGetByIDList(ExecState* exec, JSValue baseValue, const Identi patchBuffer.link(handlerCall, lookupExceptionHandlerInStub); } - MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE( - patchBuffer, - ("DFG GetById polymorphic list access for CodeBlock %p, return point %p", - exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset( - stubInfo.patch.dfg.deltaCallToDone).executableAddress())); + RefPtr<JITStubRoutine> stubRoutine = + createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("DFG GetById polymorphic list access for CodeBlock %p, return point %p", + exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset( + stubInfo.patch.dfg.deltaCallToDone).executableAddress())), + *globalData, + codeBlock->ownerExecutable(), + slot.cachedPropertyType() == PropertySlot::Getter + || slot.cachedPropertyType() == PropertySlot::Custom); polymorphicStructureList->list[listIndex].set(*globalData, codeBlock->ownerExecutable(), stubRoutine, structure, isDirect); CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck); RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); if (listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1)) return true; @@ -450,7 +490,7 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I ASSERT(slot.slotBase().isObject()); - size_t offset = slot.cachedOffset(); + PropertyOffset offset = slot.cachedOffset(); size_t count = normalizePrototypeChain(exec, baseValue, slot.slotBase(), propertyName, offset); if (!count) return false; @@ -466,7 +506,7 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I if (stubInfo.accessType == access_get_by_id_chain) { ASSERT(!!stubInfo.stubRoutine); polymorphicStructureList = new PolymorphicAccessStructureList(*globalData, codeBlock->ownerExecutable(), stubInfo.stubRoutine, stubInfo.u.getByIdChain.baseObjectStructure.get(), stubInfo.u.getByIdChain.chain.get(), true); - stubInfo.stubRoutine = MacroAssemblerCodeRef(); + stubInfo.stubRoutine.clear(); stubInfo.initGetByIdProtoList(polymorphicStructureList, 1); } else { ASSERT(stubInfo.accessType == access_get_by_id_proto_list); @@ -477,10 +517,10 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I if (listIndex < POLYMORPHIC_LIST_CACHE_SIZE) { stubInfo.u.getByIdProtoList.listSize++; - CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine.code()); + CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructureList->list[listIndex - 1].stubRoutine->code().code()); ASSERT(!!lastProtoBegin); - MacroAssemblerCodeRef stubRoutine; + RefPtr<JITStubRoutine> stubRoutine; generateProtoChainAccessStub(exec, stubInfo, prototypeChain, count, offset, structure, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone), lastProtoBegin, stubRoutine); @@ -488,7 +528,7 @@ static bool tryBuildGetByIDProtoList(ExecState* exec, JSValue baseValue, const I CodeLocationJump jumpLocation = stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck); RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); if (listIndex < (POLYMORPHIC_LIST_CACHE_SIZE - 1)) return true; @@ -548,7 +588,7 @@ static void emitPutReplaceStub( PutKind, Structure* structure, CodeLocationLabel failureLabel, - MacroAssemblerCodeRef& stubRoutine) + RefPtr<JITStubRoutine>& stubRoutine) { JSGlobalData* globalData = &exec->globalData(); GPRReg baseGPR = static_cast<GPRReg>(stubInfo.patch.dfg.baseGPR); @@ -567,7 +607,7 @@ static void emitPutReplaceStub( MacroAssembler stubJit; - if (scratchGPR == InvalidGPRReg && (writeBarrierNeeded || !structure->isUsingInlineStorage())) { + if (scratchGPR == InvalidGPRReg && (writeBarrierNeeded || isOutOfLineOffset(slot.cachedOffset()))) { scratchGPR = SpeculativeJIT::selectScratchGPR(baseGPR, valueGPR); needToRestoreScratch = true; stubJit.push(scratchGPR); @@ -586,20 +626,20 @@ static void emitPutReplaceStub( #endif #if USE(JSVALUE64) - if (structure->isUsingInlineStorage()) - stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue))); + if (isInlineOffset(slot.cachedOffset())) + stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue))); else { - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); - stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue))); + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR); + stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue))); } #elif USE(JSVALUE32_64) - if (structure->isUsingInlineStorage()) { - stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + if (isInlineOffset(slot.cachedOffset())) { + stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); } else { - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); - stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR); + stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); } #endif @@ -622,7 +662,7 @@ static void emitPutReplaceStub( patchBuffer.link(success, stubInfo.callReturnLocation.labelAtOffset(stubInfo.patch.dfg.deltaCallToDone)); patchBuffer.link(failure, failureLabel); - stubRoutine = FINALIZE_CODE( + stubRoutine = FINALIZE_CODE_FOR_STUB( patchBuffer, ("DFG PutById replace stub for CodeBlock %p, return point %p", exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset( @@ -640,7 +680,7 @@ static void emitPutTransitionStub( Structure* oldStructure, StructureChain* prototypeChain, CodeLocationLabel failureLabel, - MacroAssemblerCodeRef& stubRoutine) + RefPtr<JITStubRoutine>& stubRoutine) { JSGlobalData* globalData = &exec->globalData(); @@ -685,20 +725,20 @@ static void emitPutTransitionStub( stubJit.storePtr(MacroAssembler::TrustedImmPtr(structure), MacroAssembler::Address(baseGPR, JSCell::structureOffset())); #if USE(JSVALUE64) - if (structure->isUsingInlineStorage()) - stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue))); + if (isInlineOffset(slot.cachedOffset())) + stubJit.storePtr(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue))); else { - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); - stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue))); + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR); + stubJit.storePtr(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue))); } #elif USE(JSVALUE32_64) - if (structure->isUsingInlineStorage()) { - stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + if (isInlineOffset(slot.cachedOffset())) { + stubJit.store32(valueGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + stubJit.store32(valueTagGPR, MacroAssembler::Address(baseGPR, JSObject::offsetOfInlineStorage() + offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); } else { - stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); - stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); - stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, slot.cachedOffset() * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); + stubJit.loadPtr(MacroAssembler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR); + stubJit.store32(valueGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); + stubJit.store32(valueTagGPR, MacroAssembler::Address(scratchGPR, offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue) + OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); } #endif @@ -721,8 +761,8 @@ static void emitPutTransitionStub( patchBuffer.link(failure, failureLabel); else patchBuffer.link(failureCases, failureLabel); - - stubRoutine = FINALIZE_CODE( + + stubRoutine = FINALIZE_CODE_FOR_STUB( patchBuffer, ("DFG PutById transition stub for CodeBlock %p, return point %p", exec->codeBlock(), stubInfo.callReturnLocation.labelAtOffset( @@ -752,7 +792,7 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier return false; // skip optimizing the case where we need a realloc - if (oldStructure->propertyStorageCapacity() != structure->propertyStorageCapacity()) + if (oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()) return false; normalizePrototypeChain(exec, baseCell); @@ -766,7 +806,7 @@ static bool tryCachePutByID(ExecState* exec, JSValue baseValue, const Identifier stubInfo.stubRoutine); RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine.code())); + repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubInfo.stubRoutine->code().code())); repatchBuffer.relink(stubInfo.callReturnLocation, appropriateListBuildingPutByIdFunction(slot, putKind)); stubInfo.initPutByIdTransition(*globalData, codeBlock->ownerExecutable(), oldStructure, structure, prototypeChain, putKind == Direct); @@ -808,14 +848,14 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi // Optimize self access. if (slot.base() == baseValue) { PolymorphicPutByIdList* list; - MacroAssemblerCodeRef stubRoutine; + RefPtr<JITStubRoutine> stubRoutine; if (slot.type() == PutPropertySlot::NewProperty) { if (structure->isDictionary()) return false; // skip optimizing the case where we need a realloc - if (oldStructure->propertyStorageCapacity() != structure->propertyStorageCapacity()) + if (oldStructure->outOfLineCapacity() != structure->outOfLineCapacity()) return false; normalizePrototypeChain(exec, baseCell); @@ -855,7 +895,7 @@ static bool tryBuildPutByIdList(ExecState* exec, JSValue baseValue, const Identi } RepatchBuffer repatchBuffer(codeBlock); - repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(stubInfo.callReturnLocation.jumpAtOffset(stubInfo.patch.dfg.deltaCallToStructCheck), CodeLocationLabel(stubRoutine->code().code())); if (list->isFull()) repatchBuffer.relink(stubInfo.callReturnLocation, appropriateGenericPutByIdFunction(slot, putKind)); diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp index 0c0f3260f..c6ec62129 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.cpp @@ -45,6 +45,8 @@ SpeculativeJIT::SpeculativeJIT(JITCompiler& jit) , m_variables(jit.graph().m_localVars) , m_lastSetOperand(std::numeric_limits<int>::max()) , m_state(m_jit.graph()) + , m_stream(&jit.codeBlock()->variableEventStream()) + , m_minifiedGraph(&jit.codeBlock()->minifiedDFG()) , m_isCheckingArgumentTypes(false) { } @@ -99,7 +101,7 @@ GPRReg SpeculativeJIT::fillStorage(NodeIndex nodeIndex) GPRReg gpr = allocate(); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); - info.fillStorage(gpr); + info.fillStorage(*m_stream, gpr); return gpr; } @@ -780,39 +782,6 @@ FPRTemporary::FPRTemporary(SpeculativeJIT* jit, JSValueOperand& op1) } #endif -void ValueSource::dump(FILE* out) const -{ - switch (kind()) { - case SourceNotSet: - fprintf(out, "NotSet"); - break; - case SourceIsDead: - fprintf(out, "IsDead"); - break; - case ValueInRegisterFile: - fprintf(out, "InRegFile"); - break; - case Int32InRegisterFile: - fprintf(out, "Int32"); - break; - case CellInRegisterFile: - fprintf(out, "Cell"); - break; - case BooleanInRegisterFile: - fprintf(out, "Bool"); - break; - case DoubleInRegisterFile: - fprintf(out, "Double"); - break; - case ArgumentsSource: - fprintf(out, "Arguments"); - break; - case HaveNode: - fprintf(out, "Node(%d)", m_nodeIndex); - break; - } -} - void SpeculativeJIT::compilePeepHoleDoubleBranch(Node& node, NodeIndex branchNodeIndex, JITCompiler::DoubleCondition condition) { Node& branchNode = at(branchNodeIndex); @@ -953,12 +922,30 @@ bool SpeculativeJIT::compilePeepHoleBranch(Node& node, MacroAssembler::Relationa return false; } +void SpeculativeJIT::noticeOSRBirth(NodeIndex nodeIndex, Node& node) +{ + if (!node.hasVirtualRegister()) + return; + + VirtualRegister virtualRegister = node.virtualRegister(); + GenerationInfo& info = m_generationInfo[virtualRegister]; + + info.noticeOSRBirth(*m_stream, nodeIndex, virtualRegister); +} + void SpeculativeJIT::compileMovHint(Node& node) { ASSERT(node.op() == SetLocal); - setNodeIndexForOperand(node.child1().index(), node.local()); m_lastSetOperand = node.local(); + + Node& child = at(node.child1()); + noticeOSRBirth(node.child1().index(), child); + + if (child.op() == UInt32ToNumber) + noticeOSRBirth(child.child1().index(), at(child.child1())); + + m_stream->appendAndLog(VariableEvent::movHint(node.child1().index(), node.local())); } void SpeculativeJIT::compile(BasicBlock& block) @@ -983,11 +970,20 @@ void SpeculativeJIT::compile(BasicBlock& block) m_jit.breakpoint(); #endif +#if DFG_ENABLE(DEBUG_VERBOSE) + dataLog("Setting up state for block #%u: ", m_block); +#endif + + m_stream->appendAndLog(VariableEvent::reset()); + m_jit.jitAssertHasValidCallFrame(); ASSERT(m_arguments.size() == block.variablesAtHead.numberOfArguments()); - for (size_t i = 0; i < m_arguments.size(); ++i) - m_arguments[i] = ValueSource(ValueInRegisterFile); + for (size_t i = 0; i < m_arguments.size(); ++i) { + ValueSource valueSource = ValueSource(ValueInRegisterFile); + m_arguments[i] = valueSource; + m_stream->appendAndLog(VariableEvent::setLocal(argumentToOperand(i), valueSource.dataFormat())); + } m_state.reset(); m_state.beginBasicBlock(&block); @@ -995,18 +991,21 @@ void SpeculativeJIT::compile(BasicBlock& block) ASSERT(m_variables.size() == block.variablesAtHead.numberOfLocals()); for (size_t i = 0; i < m_variables.size(); ++i) { NodeIndex nodeIndex = block.variablesAtHead.local(i); + ValueSource valueSource; if (nodeIndex == NoNode) - m_variables[i] = ValueSource(SourceIsDead); + valueSource = ValueSource(SourceIsDead); else if (at(nodeIndex).variableAccessData()->isArgumentsAlias()) - m_variables[i] = ValueSource(ArgumentsSource); + valueSource = ValueSource(ArgumentsSource); else if (at(nodeIndex).variableAccessData()->isCaptured()) - m_variables[i] = ValueSource(ValueInRegisterFile); + valueSource = ValueSource(ValueInRegisterFile); else if (!at(nodeIndex).refCount()) - m_variables[i] = ValueSource(SourceIsDead); + valueSource = ValueSource(SourceIsDead); else if (at(nodeIndex).variableAccessData()->shouldUseDoubleFormat()) - m_variables[i] = ValueSource(DoubleInRegisterFile); + valueSource = ValueSource(DoubleInRegisterFile); else - m_variables[i] = ValueSource::forSpeculation(at(nodeIndex).variableAccessData()->argumentAwarePrediction()); + valueSource = ValueSource::forSpeculation(at(nodeIndex).variableAccessData()->argumentAwarePrediction()); + m_variables[i] = valueSource; + m_stream->appendAndLog(VariableEvent::setLocal(i, valueSource.dataFormat())); } m_lastSetOperand = std::numeric_limits<int>::max(); @@ -1019,6 +1018,10 @@ void SpeculativeJIT::compile(BasicBlock& block) verificationSucceeded.link(&m_jit); } +#if DFG_ENABLE(DEBUG_VERBOSE) + dataLog("\n"); +#endif + for (m_indexInBlock = 0; m_indexInBlock < block.size(); ++m_indexInBlock) { m_compileIndex = block[m_indexInBlock]; m_jit.setForNode(m_compileIndex); @@ -1029,6 +1032,15 @@ void SpeculativeJIT::compile(BasicBlock& block) dataLog("SpeculativeJIT skipping Node @%d (bc#%u) at JIT offset 0x%x ", (int)m_compileIndex, node.codeOrigin.bytecodeIndex, m_jit.debugOffset()); #endif switch (node.op()) { + case JSConstant: + m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node)); + break; + + case WeakJSConstant: + m_jit.addWeakReference(node.weakConstant()); + m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node)); + break; + case SetLocal: compileMovHint(node); break; @@ -1073,11 +1085,9 @@ void SpeculativeJIT::compile(BasicBlock& block) break; } - case WeakJSConstant: - m_jit.addWeakReference(node.weakConstant()); - break; - default: + if (belongsInMinifiedGraph(node.op())) + m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node)); break; } } else { @@ -1100,6 +1110,11 @@ void SpeculativeJIT::compile(BasicBlock& block) return; } + if (belongsInMinifiedGraph(node.op())) { + m_minifiedGraph->append(MinifiedNode::fromNode(m_compileIndex, node)); + noticeOSRBirth(m_compileIndex, node); + } + #if DFG_ENABLE(DEBUG_VERBOSE) if (node.hasResult()) { GenerationInfo& info = m_generationInfo[node.virtualRegister()]; @@ -1120,16 +1135,6 @@ void SpeculativeJIT::compile(BasicBlock& block) #endif } -#if DFG_ENABLE(VERBOSE_VALUE_RECOVERIES) - for (size_t i = 0; i < m_arguments.size(); ++i) - computeValueRecoveryFor(argumentToOperand(i)).dump(stderr); - - dataLog(" : "); - - for (int operand = 0; operand < (int)m_variables.size(); ++operand) - computeValueRecoveryFor(operand).dump(stderr); -#endif - #if DFG_ENABLE(DEBUG_VERBOSE) dataLog("\n"); #endif @@ -1366,154 +1371,14 @@ void SpeculativeJIT::linkOSREntries(LinkBuffer& linkBuffer) ValueRecovery SpeculativeJIT::computeValueRecoveryFor(const ValueSource& valueSource) { - switch (valueSource.kind()) { - case SourceIsDead: - return ValueRecovery::constant(jsUndefined()); - - case ValueInRegisterFile: - return ValueRecovery::alreadyInRegisterFile(); + if (valueSource.isInRegisterFile()) + return valueSource.valueRecovery(); - case Int32InRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedInt32(); - - case CellInRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedCell(); - - case BooleanInRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedBoolean(); - - case DoubleInRegisterFile: - return ValueRecovery::alreadyInRegisterFileAsUnboxedDouble(); - - case ArgumentsSource: - return ValueRecovery::argumentsThatWereNotCreated(); - - case HaveNode: { - Node* nodePtr = &at(valueSource.nodeIndex()); - - if (nodePtr->isPhantomArguments()) - return ValueRecovery::argumentsThatWereNotCreated(); - - if (nodePtr->hasConstant()) - return ValueRecovery::constant(valueOfJSConstant(valueSource.nodeIndex())); - - if (!nodePtr->shouldGenerate()) { - // It's legitimately dead. As in, nobody will ever use this node, or operand, - // ever. Set it to Undefined to make the GC happy after the OSR. - return ValueRecovery::constant(jsUndefined()); - } + ASSERT(valueSource.kind() == HaveNode); + if (isConstant(valueSource.nodeIndex())) + return ValueRecovery::constant(valueOfJSConstant(valueSource.nodeIndex())); - GenerationInfo* infoPtr = &m_generationInfo[nodePtr->virtualRegister()]; - if (!infoPtr->alive() || infoPtr->nodeIndex() != valueSource.nodeIndex()) { - // Try to see if there is an alternate node that would contain the value we want. - // There are four possibilities: - // - // Int32ToDouble: We can use this in place of the original node, but - // we'd rather not; so we use it only if it is the only remaining - // live version. - // - // ValueToInt32: If the only remaining live version of the value is - // ValueToInt32, then we can use it. - // - // UInt32ToNumber: If the only live version of the value is a UInt32ToNumber - // then the only remaining uses are ones that want a properly formed number - // rather than a UInt32 intermediate. - // - // The reverse of the above: This node could be a UInt32ToNumber, but its - // alternative is still alive. This means that the only remaining uses of - // the number would be fine with a UInt32 intermediate. - // - // DoubleAsInt32: Same as UInt32ToNumber. - // - - bool found = false; - - if (nodePtr->op() == UInt32ToNumber || nodePtr->op() == DoubleAsInt32) { - NodeIndex nodeIndex = nodePtr->child1().index(); - nodePtr = &at(nodeIndex); - infoPtr = &m_generationInfo[nodePtr->virtualRegister()]; - if (infoPtr->alive() && infoPtr->nodeIndex() == nodeIndex) - found = true; - } - - if (!found) { - NodeIndex int32ToDoubleIndex = NoNode; - NodeIndex valueToInt32Index = NoNode; - NodeIndex uint32ToNumberIndex = NoNode; - NodeIndex doubleAsInt32Index = NoNode; - - for (unsigned virtualRegister = 0; virtualRegister < m_generationInfo.size(); ++virtualRegister) { - GenerationInfo& info = m_generationInfo[virtualRegister]; - if (!info.alive()) - continue; - if (info.nodeIndex() == NoNode) - continue; - Node& node = at(info.nodeIndex()); - if (node.child1Unchecked() != valueSource.nodeIndex()) - continue; - switch (node.op()) { - case Int32ToDouble: - int32ToDoubleIndex = info.nodeIndex(); - break; - case ValueToInt32: - valueToInt32Index = info.nodeIndex(); - break; - case UInt32ToNumber: - uint32ToNumberIndex = info.nodeIndex(); - break; - case DoubleAsInt32: - doubleAsInt32Index = info.nodeIndex(); - default: - break; - } - } - - NodeIndex nodeIndexToUse; - if (doubleAsInt32Index != NoNode) - nodeIndexToUse = doubleAsInt32Index; - else if (int32ToDoubleIndex != NoNode) - nodeIndexToUse = int32ToDoubleIndex; - else if (valueToInt32Index != NoNode) - nodeIndexToUse = valueToInt32Index; - else if (uint32ToNumberIndex != NoNode) - nodeIndexToUse = uint32ToNumberIndex; - else - nodeIndexToUse = NoNode; - - if (nodeIndexToUse != NoNode) { - nodePtr = &at(nodeIndexToUse); - infoPtr = &m_generationInfo[nodePtr->virtualRegister()]; - ASSERT(infoPtr->alive() && infoPtr->nodeIndex() == nodeIndexToUse); - found = true; - } - } - - if (!found) - return ValueRecovery::constant(jsUndefined()); - } - - ASSERT(infoPtr->alive()); - - if (infoPtr->registerFormat() != DataFormatNone) { - if (infoPtr->registerFormat() == DataFormatDouble) - return ValueRecovery::inFPR(infoPtr->fpr()); -#if USE(JSVALUE32_64) - if (infoPtr->registerFormat() & DataFormatJS) - return ValueRecovery::inPair(infoPtr->tagGPR(), infoPtr->payloadGPR()); -#endif - return ValueRecovery::inGPR(infoPtr->gpr(), infoPtr->registerFormat()); - } - if (infoPtr->spillFormat() != DataFormatNone) - return ValueRecovery::displacedInRegisterFile(static_cast<VirtualRegister>(nodePtr->virtualRegister()), infoPtr->spillFormat()); - - ASSERT_NOT_REACHED(); - return ValueRecovery(); - } - - default: - ASSERT_NOT_REACHED(); - return ValueRecovery(); - } + return ValueRecovery(); } void SpeculativeJIT::compileGetCharCodeAt(Node& node) @@ -1652,10 +1517,11 @@ GeneratedOperandType SpeculativeJIT::checkGeneratedTypeForToInt32(NodeIndex node case DataFormatJSDouble: case DataFormatDouble: return GeneratedOperandDouble; + + default: + ASSERT_NOT_REACHED(); + return GeneratedOperandTypeUnknown; } - - ASSERT_NOT_REACHED(); - return GeneratedOperandTypeUnknown; } void SpeculativeJIT::compileValueToInt32(Node& node) diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h index 67a22b767..57bc84a12 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2011 Apple Inc. All rights reserved. + * Copyright (C) 2011, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -26,6 +26,8 @@ #ifndef DFGSpeculativeJIT_h #define DFGSpeculativeJIT_h +#include <wtf/Platform.h> + #if ENABLE(DFG_JIT) #include "DFGAbstractState.h" @@ -34,6 +36,7 @@ #include "DFGOSRExit.h" #include "DFGOperations.h" #include "DFGSilentRegisterSavePlan.h" +#include "DFGValueSource.h" #include "MarkedAllocator.h" #include "ValueRecovery.h" @@ -48,87 +51,6 @@ class SpeculateDoubleOperand; class SpeculateCellOperand; class SpeculateBooleanOperand; - -enum ValueSourceKind { - SourceNotSet, - ValueInRegisterFile, - Int32InRegisterFile, - CellInRegisterFile, - BooleanInRegisterFile, - DoubleInRegisterFile, - ArgumentsSource, - SourceIsDead, - HaveNode -}; - -class ValueSource { -public: - ValueSource() - : m_nodeIndex(nodeIndexFromKind(SourceNotSet)) - { - } - - explicit ValueSource(ValueSourceKind valueSourceKind) - : m_nodeIndex(nodeIndexFromKind(valueSourceKind)) - { - ASSERT(kind() != SourceNotSet); - ASSERT(kind() != HaveNode); - } - - explicit ValueSource(NodeIndex nodeIndex) - : m_nodeIndex(nodeIndex) - { - ASSERT(kind() == HaveNode); - } - - static ValueSource forSpeculation(SpeculatedType prediction) - { - if (isInt32Speculation(prediction)) - return ValueSource(Int32InRegisterFile); - if (isArraySpeculation(prediction)) - return ValueSource(CellInRegisterFile); - if (isBooleanSpeculation(prediction)) - return ValueSource(BooleanInRegisterFile); - return ValueSource(ValueInRegisterFile); - } - - bool isSet() const - { - return kindFromNodeIndex(m_nodeIndex) != SourceNotSet; - } - - ValueSourceKind kind() const - { - return kindFromNodeIndex(m_nodeIndex); - } - - NodeIndex nodeIndex() const - { - ASSERT(kind() == HaveNode); - return m_nodeIndex; - } - - void dump(FILE* out) const; - -private: - static NodeIndex nodeIndexFromKind(ValueSourceKind kind) - { - ASSERT(kind >= SourceNotSet && kind < HaveNode); - return NoNode - kind; - } - - static ValueSourceKind kindFromNodeIndex(NodeIndex nodeIndex) - { - unsigned kind = static_cast<unsigned>(NoNode - nodeIndex); - if (kind >= static_cast<unsigned>(HaveNode)) - return HaveNode; - return static_cast<ValueSourceKind>(kind); - } - - NodeIndex m_nodeIndex; -}; - - enum GeneratedOperandType { GeneratedOperandTypeUnknown, GeneratedOperandInteger, GeneratedOperandDouble, GeneratedOperandJSValue}; // === SpeculativeJIT === @@ -326,7 +248,7 @@ public: // use() returns true when the value becomes dead, and any // associated resources may be freed. - if (!info.use()) + if (!info.use(*m_stream)) return; // Release the associated machine registers. @@ -376,6 +298,7 @@ public: void runSlowPathGenerators(); void compile(Node&); + void noticeOSRBirth(NodeIndex, Node&); void compileMovHint(Node&); void compile(BasicBlock&); @@ -777,7 +700,7 @@ public: // Check the GenerationInfo to see if this value need writing // to the RegisterFile - if not, mark it as spilled & return. if (!info.needsSpill()) { - info.setSpilled(); + info.setSpilled(*m_stream, spillMe); return; } @@ -787,20 +710,20 @@ public: // This is special, since it's not a JS value - as in it's not visible to JS // code. m_jit.storePtr(info.gpr(), JITCompiler::addressFor(spillMe)); - info.spill(DataFormatStorage); + info.spill(*m_stream, spillMe, DataFormatStorage); return; } case DataFormatInteger: { m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe)); - info.spill(DataFormatInteger); + info.spill(*m_stream, spillMe, DataFormatInteger); return; } #if USE(JSVALUE64) case DataFormatDouble: { m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); - info.spill(DataFormatDouble); + info.spill(*m_stream, spillMe, DataFormatDouble); return; } @@ -816,13 +739,13 @@ public: // Spill the value, and record it as spilled in its boxed form. m_jit.storePtr(reg, JITCompiler::addressFor(spillMe)); - info.spill((DataFormat)(spillFormat | DataFormatJS)); + info.spill(*m_stream, spillMe, (DataFormat)(spillFormat | DataFormatJS)); return; #elif USE(JSVALUE32_64) case DataFormatCell: case DataFormatBoolean: { m_jit.store32(info.gpr(), JITCompiler::payloadFor(spillMe)); - info.spill(spillFormat); + info.spill(*m_stream, spillMe, spillFormat); return; } @@ -830,7 +753,7 @@ public: case DataFormatJSDouble: { // On JSVALUE32_64 boxing a double is a no-op. m_jit.storeDouble(info.fpr(), JITCompiler::addressFor(spillMe)); - info.spill(DataFormatJSDouble); + info.spill(*m_stream, spillMe, DataFormatJSDouble); return; } @@ -839,7 +762,7 @@ public: ASSERT(spillFormat & DataFormatJS); m_jit.store32(info.tagGPR(), JITCompiler::tagFor(spillMe)); m_jit.store32(info.payloadGPR(), JITCompiler::payloadFor(spillMe)); - info.spill(spillFormat); + info.spill(*m_stream, spillMe, spillFormat); return; #endif } @@ -1800,7 +1723,7 @@ public: } #endif -#if !defined(NDEBUG) && !CPU(ARM_THUMB2) +#if !defined(NDEBUG) && !CPU(ARM) void prepareForExternalCall() { for (unsigned i = 0; i < sizeof(void*) / 4; i++) @@ -2180,8 +2103,7 @@ public: m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, JSObject::offsetOfInheritorID())); // Initialize the object's property storage pointer. - m_jit.addPtr(MacroAssembler::TrustedImm32(sizeof(JSObject)), resultGPR, scratchGPR); - m_jit.storePtr(scratchGPR, MacroAssembler::Address(resultGPR, ClassType::offsetOfPropertyStorage())); + m_jit.storePtr(MacroAssembler::TrustedImmPtr(0), MacroAssembler::Address(resultGPR, ClassType::offsetOfOutOfLineStorage())); } // It is acceptable to have structure be equal to scratch, so long as you're fine @@ -2204,7 +2126,7 @@ public: if (!m_compileOkay) return; ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes); - m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this)); + m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_stream->size())); } void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail) { @@ -2231,7 +2153,7 @@ public: return; ASSERT(at(m_compileIndex).canExit() || m_isCheckingArgumentTypes); m_jit.codeBlock()->appendSpeculationRecovery(recovery); - m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_jit.codeBlock()->numberOfSpeculationRecoveries())); + m_jit.codeBlock()->appendOSRExit(OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), jumpToFail, this, m_stream->size(), m_jit.codeBlock()->numberOfSpeculationRecoveries())); } void speculationCheck(ExitKind kind, JSValueSource jsValueSource, Edge nodeUse, MacroAssembler::Jump jumpToFail, const SpeculationRecovery& recovery) { @@ -2252,7 +2174,7 @@ public: m_jit.codeBlock()->appendOSRExit( OSRExit(kind, jsValueSource, m_jit.graph().methodOfGettingAValueProfileFor(nodeIndex), - JITCompiler::Jump(), this))); + JITCompiler::Jump(), this, m_stream->size()))); exit.m_watchpointIndex = m_jit.codeBlock()->appendWatchpoint( Watchpoint(m_jit.watchpointLabel())); return &m_jit.codeBlock()->watchpoint(exit.m_watchpointIndex); @@ -2295,7 +2217,8 @@ public: exit.m_codeOrigin = nextNode->codeOrigin; exit.m_lastSetOperand = setLocal->local(); - exit.valueRecoveryForOperand(setLocal->local()) = valueRecovery; + exit.m_valueRecoveryOverride = adoptRef( + new ValueRecoveryOverride(setLocal->local(), valueRecovery)); } void forwardSpeculationCheck(ExitKind kind, JSValueSource jsValueSource, NodeIndex nodeIndex, MacroAssembler::JumpList& jumpsToFail, const ValueRecovery& valueRecovery) { @@ -2362,6 +2285,13 @@ public: return m_variables[operand]; } + void recordSetLocal(int operand, ValueSource valueSource) + { + valueSourceReferenceForOperand(operand) = valueSource; + m_stream->appendAndLog(VariableEvent::setLocal(operand, valueSource.dataFormat())); + } + + // The JIT, while also provides MacroAssembler functionality. JITCompiler& m_jit; // The current node being generated. @@ -2395,6 +2325,9 @@ public: AbstractState m_state; + VariableEventStream* m_stream; + MinifiedGraph* m_minifiedGraph; + bool m_isCheckingArgumentTypes; Vector<SlowPathGenerator*, 8> m_slowPathGenerators; // doesn't use OwnPtr<> because I don't want to include DFGSlowPathGenerator.h diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp index 05609baa8..bbbf3c40c 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT32_64.cpp @@ -62,7 +62,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); } - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -91,7 +91,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger); - info.fillInteger(payloadGPR); + info.fillInteger(*m_stream, payloadGPR); returnFormat = DataFormatInteger; return payloadGPR; } @@ -103,10 +103,11 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat returnFormat = DataFormatInteger; return gpr; } - } - ASSERT_NOT_REACHED(); - return InvalidGPRReg; + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; + } } FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) @@ -123,13 +124,13 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) GPRReg gpr = allocate(); m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); unlock(gpr); } else if (isNumberConstant(nodeIndex)) { FPRReg fpr = fprAllocate(); m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } else { // FIXME: should not be reachable? @@ -142,7 +143,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) FPRReg fpr = fprAllocate(); m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } @@ -162,7 +163,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) hasUnboxedDouble.link(&m_jit); m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } } @@ -207,7 +208,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) m_gprs.unlock(tagGPR); m_gprs.unlock(payloadGPR); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); info.killSpilled(); return fpr; } @@ -227,10 +228,11 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) m_fprs.lock(fpr); return fpr; } - } - ASSERT_NOT_REACHED(); - return InvalidFPRReg; + default: + ASSERT_NOT_REACHED(); + return InvalidFPRReg; + } } bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& payloadGPR, FPRReg& fpr) @@ -252,7 +254,7 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa m_jit.move(Imm32(valueOfJSConstant(nodeIndex).payload()), payloadGPR); m_gprs.retain(tagGPR, virtualRegister, SpillOrderConstant); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderConstant); - info.fillJSValue(tagGPR, payloadGPR, isInt32Constant(nodeIndex) ? DataFormatJSInteger : DataFormatJS); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, isInt32Constant(nodeIndex) ? DataFormatJSInteger : DataFormatJS); } else { DataFormat spillFormat = info.spillFormat(); ASSERT(spillFormat != DataFormatNone && spillFormat != DataFormatStorage); @@ -278,7 +280,7 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa m_jit.load32(JITCompiler::payloadFor(virtualRegister), payloadGPR); m_gprs.retain(tagGPR, virtualRegister, SpillOrderSpilled); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderSpilled); - info.fillJSValue(tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, spillFormat == DataFormatJSDouble ? DataFormatJS : spillFormat); } return true; @@ -320,7 +322,7 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa m_gprs.release(gpr); m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); - info.fillJSValue(tagGPR, payloadGPR, fillFormat); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, fillFormat); return true; } @@ -335,7 +337,7 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa m_fprs.release(oldFPR); m_gprs.retain(tagGPR, virtualRegister, SpillOrderJS); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderJS); - info.fillJSValue(tagGPR, payloadGPR, DataFormatJS); + info.fillJSValue(*m_stream, tagGPR, payloadGPR, DataFormatJS); return true; } @@ -353,10 +355,11 @@ bool SpeculativeJIT::fillJSValue(NodeIndex nodeIndex, GPRReg& tagGPR, GPRReg& pa case DataFormatStorage: // this type currently never occurs ASSERT_NOT_REACHED(); - } - ASSERT_NOT_REACHED(); - return true; + default: + ASSERT_NOT_REACHED(); + return true; + } } class ValueToNumberSlowPathGenerator @@ -505,7 +508,7 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNon JITCompiler::DataLabelPtr structureToCompare; JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(basePayloadGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); - m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR); + JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfOutOfLineStorage()), resultPayloadGPR); JITCompiler::DataLabelCompact tagLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); JITCompiler::DataLabelCompact payloadLoadWithPatch = m_jit.load32WithCompactAddressOffsetPatch(JITCompiler::Address(resultPayloadGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); @@ -547,7 +550,7 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseTagGPROrNon } m_jit.addPropertyAccess( PropertyAccessRecord( - codeOrigin, structureToCompare, structureCheck, + codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, tagLoadWithPatch, payloadLoadWithPatch, slowPath.get(), doneLabel, safeCast<int8_t>(basePayloadGPR), safeCast<int8_t>(resultTagGPR), safeCast<int8_t>(resultPayloadGPR), safeCast<int8_t>(scratchGPR), @@ -562,7 +565,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, writeBarrier(basePayloadGPR, valueTagGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR); - m_jit.loadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); + JITCompiler::ConvertibleLoadLabel propertyStorageLoad = m_jit.convertibleLoadPtr(JITCompiler::Address(basePayloadGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR); JITCompiler::DataLabel32 tagStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valueTagGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag))); JITCompiler::DataLabel32 payloadStoreWithPatch = m_jit.store32WithAddressOffsetPatch(valuePayloadGPR, JITCompiler::Address(scratchGPR, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload))); @@ -594,7 +597,7 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg basePayloadGPR, } m_jit.addPropertyAccess( PropertyAccessRecord( - codeOrigin, structureToCompare, structureCheck, + codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, JITCompiler::DataLabelCompact(tagStoreWithPatch.label()), JITCompiler::DataLabelCompact(payloadStoreWithPatch.label()), slowPath.get(), doneLabel, safeCast<int8_t>(basePayloadGPR), @@ -1065,7 +1068,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& GPRReg gpr = allocate(); m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -1080,7 +1083,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& GPRReg gpr = allocate(); m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -1098,7 +1101,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderInteger); - info.fillInteger(payloadGPR); + info.fillInteger(*m_stream, payloadGPR); // If !strict we're done, return. returnFormat = DataFormatInteger; return payloadGPR; @@ -1119,10 +1122,11 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& case DataFormatJSBoolean: case DataFormatStorage: ASSERT_NOT_REACHED(); - } - ASSERT_NOT_REACHED(); - return InvalidGPRReg; + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; + } } GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat) @@ -1160,24 +1164,24 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) GPRReg gpr = allocate(); m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); unlock(gpr); } else if (isNumberConstant(nodeIndex)) { FPRReg fpr = fprAllocate(); m_jit.loadDouble(addressOfDoubleConstant(nodeIndex), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderConstant); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } else ASSERT_NOT_REACHED(); } else { DataFormat spillFormat = info.spillFormat(); ASSERT((spillFormat & DataFormatJS) || spillFormat == DataFormatInteger); - if (spillFormat == DataFormatJSDouble) { + if (spillFormat == DataFormatJSDouble || spillFormat == DataFormatDouble) { FPRReg fpr = fprAllocate(); m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } @@ -1200,7 +1204,8 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) hasUnboxedDouble.link(&m_jit); m_fprs.retain(fpr, virtualRegister, SpillOrderSpilled); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); + info.killSpilled(); return fpr; } } @@ -1237,7 +1242,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) m_gprs.unlock(tagGPR); m_gprs.unlock(payloadGPR); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); info.killSpilled(); return fpr; } @@ -1265,10 +1270,11 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) case DataFormatBoolean: case DataFormatJSBoolean: ASSERT_NOT_REACHED(); - } - ASSERT_NOT_REACHED(); - return InvalidFPRReg; + default: + ASSERT_NOT_REACHED(); + return InvalidFPRReg; + } } GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) @@ -1295,7 +1301,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) GPRReg gpr = allocate(); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); - info.fillCell(gpr); + info.fillCell(*m_stream, gpr); return gpr; } @@ -1305,7 +1311,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) GPRReg gpr = allocate(); m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - info.fillCell(gpr); + info.fillCell(*m_stream, gpr); return gpr; } @@ -1327,7 +1333,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderCell); - info.fillCell(payloadGPR); + info.fillCell(*m_stream, payloadGPR); return payloadGPR; } @@ -1339,10 +1345,11 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) case DataFormatBoolean: case DataFormatStorage: ASSERT_NOT_REACHED(); - } - ASSERT_NOT_REACHED(); - return InvalidGPRReg; + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; + } } GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) @@ -1369,7 +1376,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) GPRReg gpr = allocate(); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); m_jit.move(MacroAssembler::TrustedImm32(jsValue.asBoolean()), gpr); - info.fillBoolean(gpr); + info.fillBoolean(*m_stream, gpr); return gpr; } @@ -1381,7 +1388,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) GPRReg gpr = allocate(); m_jit.load32(JITCompiler::payloadFor(virtualRegister), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); - info.fillBoolean(gpr); + info.fillBoolean(*m_stream, gpr); return gpr; } @@ -1404,7 +1411,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) m_gprs.release(tagGPR); m_gprs.release(payloadGPR); m_gprs.retain(payloadGPR, virtualRegister, SpillOrderBoolean); - info.fillBoolean(payloadGPR); + info.fillBoolean(*m_stream, payloadGPR); return payloadGPR; } @@ -1416,10 +1423,11 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) case DataFormatCell: case DataFormatStorage: ASSERT_NOT_REACHED(); - } - ASSERT_NOT_REACHED(); - return InvalidGPRReg; + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; + } } JITCompiler::Jump SpeculativeJIT::convertToDouble(JSValueOperand& op, FPRReg result) @@ -2003,7 +2011,7 @@ void SpeculativeJIT::compile(Node& node) // Indicate that it's no longer necessary to retrieve the value of // this bytecode variable from registers or other locations in the register file, // but that it is stored as a double. - valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile); + recordSetLocal(node.local(), ValueSource(DoubleInRegisterFile)); break; } SpeculatedType predictedType = node.variableAccessData()->argumentAwarePrediction(); @@ -2011,14 +2019,14 @@ void SpeculativeJIT::compile(Node& node) DoubleOperand value(this, node.child1()); m_jit.storeDouble(value.fpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile); + recordSetLocal(node.local(), ValueSource(DoubleInRegisterFile)); break; } if (isInt32Speculation(predictedType)) { SpeculateIntegerOperand value(this, node.child1()); m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(Int32InRegisterFile); + recordSetLocal(node.local(), ValueSource(Int32InRegisterFile)); break; } if (isArraySpeculation(predictedType)) { @@ -2028,14 +2036,14 @@ void SpeculativeJIT::compile(Node& node) speculationCheck(BadType, JSValueSource::unboxedCell(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); m_jit.storePtr(cellGPR, JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile); + recordSetLocal(node.local(), ValueSource(CellInRegisterFile)); break; } if (isBooleanSpeculation(predictedType)) { SpeculateBooleanOperand value(this, node.child1()); m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(BooleanInRegisterFile); + recordSetLocal(node.local(), ValueSource(BooleanInRegisterFile)); break; } } @@ -2043,7 +2051,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.store32(value.payloadGPR(), JITCompiler::payloadFor(node.local())); m_jit.store32(value.tagGPR(), JITCompiler::tagFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(ValueInRegisterFile); + recordSetLocal(node.local(), ValueSource(ValueInRegisterFile)); break; } @@ -3542,7 +3550,7 @@ void SpeculativeJIT::compile(Node& node) GPRReg baseGPR = base.gpr(); GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); + m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), resultGPR); storageResult(resultGPR, m_compileIndex); break; @@ -3883,10 +3891,15 @@ void SpeculativeJIT::compile(Node& node) JITCompiler::Jump structuresNotMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultPayloadGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset())); // Fast case - m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultPayloadGPR); + m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfOutOfLineStorage()), resultPayloadGPR); m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR); - m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag)), resultTagGPR); - m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload)), resultPayloadGPR); +#if DFG_ENABLE(JIT_ASSERT) + JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(inlineStorageCapacity)); + m_jit.breakpoint(); + isOutOfLine.link(&m_jit); +#endif + m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.tag) - inlineStorageCapacity * static_cast<ptrdiff_t>(sizeof(JSValue))), resultTagGPR); + m_jit.load32(JITCompiler::BaseIndex(resultPayloadGPR, resolveInfoGPR, JITCompiler::TimesEight, OBJECT_OFFSETOF(EncodedValueDescriptor, asBits.payload) - inlineStorageCapacity * static_cast<ptrdiff_t>(sizeof(JSValue))), resultPayloadGPR); addSlowPathGenerator( slowPathCall( diff --git a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp index 215f8013d..27eb28fa7 100644 --- a/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp +++ b/Source/JavaScriptCore/dfg/DFGSpeculativeJIT64.cpp @@ -48,7 +48,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); if (isInt32Constant(nodeIndex)) { m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -74,7 +74,7 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat // Since we statically know that we're filling an integer, and values // in the RegisterFile are boxed, this must be DataFormatJSInteger. // We will check this with a jitAssert below. - info.fillJSValue(gpr, DataFormatJSInteger); + info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); unlock(gpr); } @@ -107,10 +107,11 @@ GPRReg SpeculativeJIT::fillInteger(NodeIndex nodeIndex, DataFormat& returnFormat returnFormat = DataFormatInteger; return gpr; } + + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidGPRReg; } FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) @@ -127,7 +128,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) // FIXME: should not be reachable? m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); unlock(gpr); } else if (isNumberConstant(nodeIndex)) { FPRReg fpr = fprAllocate(); @@ -136,7 +137,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) unlock(gpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } else { // FIXME: should not be reachable? @@ -144,7 +145,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) JSValue jsValue = valueOfJSConstant(nodeIndex); m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr); m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); - info.fillJSValue(gpr, DataFormatJS); + info.fillJSValue(*m_stream, gpr, DataFormatJS); unlock(gpr); } } else { @@ -154,7 +155,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) FPRReg fpr = fprAllocate(); m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } @@ -163,7 +164,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); unlock(gpr); break; } @@ -174,7 +175,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) ASSERT(spillFormat & DataFormatJS); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); - info.fillJSValue(gpr, spillFormat); + info.fillJSValue(*m_stream, gpr, spillFormat); unlock(gpr); break; } @@ -216,7 +217,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) m_gprs.unlock(jsValueGpr); m_gprs.unlock(tempGpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); info.killSpilled(); return fpr; } @@ -247,7 +248,7 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) m_gprs.release(gpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } @@ -256,10 +257,11 @@ FPRReg SpeculativeJIT::fillDouble(NodeIndex nodeIndex) m_fprs.lock(fpr); return fpr; } + + default: + ASSERT_NOT_REACHED(); + return InvalidFPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidFPRReg; } GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) @@ -274,18 +276,18 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) if (node.hasConstant()) { if (isInt32Constant(nodeIndex)) { - info.fillJSValue(gpr, DataFormatJSInteger); + info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); JSValue jsValue = jsNumber(valueOfInt32Constant(nodeIndex)); m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); } else if (isNumberConstant(nodeIndex)) { - info.fillJSValue(gpr, DataFormatJSDouble); + info.fillJSValue(*m_stream, gpr, DataFormatJSDouble); JSValue jsValue(JSValue::EncodeAsDouble, valueOfNumberConstant(nodeIndex)); m_jit.move(MacroAssembler::ImmPtr(JSValue::encode(jsValue)), gpr); } else { ASSERT(isJSConstant(nodeIndex)); JSValue jsValue = valueOfJSConstant(nodeIndex); m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr); - info.fillJSValue(gpr, DataFormatJS); + info.fillJSValue(*m_stream, gpr, DataFormatJS); } m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); @@ -305,7 +307,7 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) } else ASSERT(spillFormat & DataFormatJS); } - info.fillJSValue(gpr, spillFormat); + info.fillJSValue(*m_stream, gpr, spillFormat); } return gpr; } @@ -321,7 +323,7 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) } m_gprs.lock(gpr); m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); - info.fillJSValue(gpr, DataFormatJSInteger); + info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); return gpr; } @@ -330,7 +332,7 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) GPRReg gpr = boxDouble(fpr); // Update all info - info.fillJSValue(gpr, DataFormatJSDouble); + info.fillJSValue(*m_stream, gpr, DataFormatJSDouble); m_fprs.release(fpr); m_gprs.retain(gpr, virtualRegister, SpillOrderJS); @@ -353,10 +355,11 @@ GPRReg SpeculativeJIT::fillJSValue(NodeIndex nodeIndex) case DataFormatStorage: // this type currently never occurs ASSERT_NOT_REACHED(); + + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidGPRReg; } class ValueToNumberSlowPathGenerator @@ -494,7 +497,8 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg JITCompiler::DataLabelPtr structureToCompare; JITCompiler::PatchableJump structureCheck = m_jit.patchableBranchPtrWithPatch(JITCompiler::NotEqual, JITCompiler::Address(baseGPR, JSCell::structureOffset()), structureToCompare, JITCompiler::TrustedImmPtr(reinterpret_cast<void*>(-1))); - m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); + JITCompiler::ConvertibleLoadLabel propertyStorageLoad = + m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), resultGPR); JITCompiler::DataLabelCompact loadWithPatch = m_jit.loadPtrWithCompactAddressOffsetPatch(JITCompiler::Address(resultGPR, 0), resultGPR); JITCompiler::Label doneLabel = m_jit.label(); @@ -514,8 +518,8 @@ void SpeculativeJIT::cachedGetById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg } m_jit.addPropertyAccess( PropertyAccessRecord( - codeOrigin, structureToCompare, structureCheck, loadWithPatch, slowPath.get(), - doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), + codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, loadWithPatch, + slowPath.get(), doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(resultGPR), safeCast<int8_t>(scratchGPR), spillMode == NeedToSpill ? PropertyAccessRecord::RegistersInUse : PropertyAccessRecord::RegistersFlushed)); addSlowPathGenerator(slowPath.release()); @@ -533,7 +537,8 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg writeBarrier(baseGPR, valueGPR, valueUse, WriteBarrierForPropertyAccess, scratchGPR); - m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), scratchGPR); + JITCompiler::ConvertibleLoadLabel propertyStorageLoad = + m_jit.convertibleLoadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), scratchGPR); JITCompiler::DataLabel32 storeWithPatch = m_jit.storePtrWithAddressOffsetPatch(valueGPR, JITCompiler::Address(scratchGPR, 0)); JITCompiler::Label doneLabel = m_jit.label(); @@ -563,7 +568,11 @@ void SpeculativeJIT::cachedPutById(CodeOrigin codeOrigin, GPRReg baseGPR, GPRReg slowCases, this, optimizedCall, NoResult, valueGPR, baseGPR, identifier(identifierNumber)); } - m_jit.addPropertyAccess(PropertyAccessRecord(codeOrigin, structureToCompare, structureCheck, JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel, safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR))); + m_jit.addPropertyAccess( + PropertyAccessRecord( + codeOrigin, structureToCompare, structureCheck, propertyStorageLoad, + JITCompiler::DataLabelCompact(storeWithPatch.label()), slowPath.get(), doneLabel, + safeCast<int8_t>(baseGPR), safeCast<int8_t>(valueGPR), safeCast<int8_t>(scratchGPR))); addSlowPathGenerator(slowPath.release()); } @@ -1047,7 +1056,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); ASSERT(isInt32Constant(nodeIndex)); m_jit.move(MacroAssembler::Imm32(valueOfInt32Constant(nodeIndex)), gpr); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -1062,7 +1071,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& // If we know this was spilled as an integer we can fill without checking. if (strict) { m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); returnFormat = DataFormatInteger; return gpr; } @@ -1071,14 +1080,14 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& m_jit.orPtr(GPRInfo::tagTypeNumberRegister, gpr); } else m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); - info.fillJSValue(gpr, DataFormatJSInteger); + info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); returnFormat = DataFormatJSInteger; return gpr; } m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); // Fill as JSValue, and fall through. - info.fillJSValue(gpr, DataFormatJSInteger); + info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); m_gprs.unlock(gpr); } @@ -1088,7 +1097,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& m_gprs.lock(gpr); if (!isInt32Speculation(type)) speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchPtr(MacroAssembler::Below, gpr, GPRInfo::tagTypeNumberRegister)); - info.fillJSValue(gpr, DataFormatJSInteger); + info.fillJSValue(*m_stream, gpr, DataFormatJSInteger); // If !strict we're done, return. if (!strict) { returnFormat = DataFormatJSInteger; @@ -1109,7 +1118,7 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& result = allocate(); else { m_gprs.lock(gpr); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); result = gpr; } m_jit.zeroExtend32ToPtr(gpr, result); @@ -1151,10 +1160,11 @@ GPRReg SpeculativeJIT::fillSpeculateIntInternal(NodeIndex nodeIndex, DataFormat& case DataFormatStorage: ASSERT_NOT_REACHED(); + + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidGPRReg; } GPRReg SpeculativeJIT::fillSpeculateInt(NodeIndex nodeIndex, DataFormat& returnFormat) @@ -1191,7 +1201,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) unlock(gpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } if (isNumberConstant(nodeIndex)) { @@ -1201,7 +1211,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) unlock(gpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); @@ -1214,7 +1224,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) FPRReg fpr = fprAllocate(); m_jit.loadDouble(JITCompiler::addressFor(virtualRegister), fpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } @@ -1223,7 +1233,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.load32(JITCompiler::addressFor(virtualRegister), gpr); - info.fillInteger(gpr); + info.fillInteger(*m_stream, gpr); unlock(gpr); break; } @@ -1234,7 +1244,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) ASSERT(spillFormat & DataFormatJS); m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); - info.fillJSValue(gpr, spillFormat); + info.fillJSValue(*m_stream, gpr, spillFormat); unlock(gpr); break; } @@ -1277,7 +1287,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) m_gprs.unlock(jsValueGpr); m_gprs.unlock(tempGpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); info.killSpilled(); return fpr; } @@ -1308,7 +1318,7 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) m_gprs.release(gpr); m_fprs.retain(fpr, virtualRegister, SpillOrderDouble); - info.fillDouble(fpr); + info.fillDouble(*m_stream, fpr); return fpr; } @@ -1317,10 +1327,11 @@ FPRReg SpeculativeJIT::fillSpeculateDouble(NodeIndex nodeIndex) m_fprs.lock(fpr); return fpr; } + + default: + ASSERT_NOT_REACHED(); + return InvalidFPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidFPRReg; } GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) @@ -1347,7 +1358,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) if (jsValue.isCell()) { m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); m_jit.move(MacroAssembler::TrustedImmPtr(jsValue.asCell()), gpr); - info.fillJSValue(gpr, DataFormatJSCell); + info.fillJSValue(*m_stream, gpr, DataFormatJSCell); return gpr; } terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); @@ -1357,10 +1368,10 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); - info.fillJSValue(gpr, DataFormatJS); + info.fillJSValue(*m_stream, gpr, DataFormatJS); if (!isCellSpeculation(type)) speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister)); - info.fillJSValue(gpr, DataFormatJSCell); + info.fillJSValue(*m_stream, gpr, DataFormatJSCell); return gpr; } @@ -1376,7 +1387,7 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) m_gprs.lock(gpr); if (!isCellSpeculation(type)) speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, GPRInfo::tagMaskRegister)); - info.fillJSValue(gpr, DataFormatJSCell); + info.fillJSValue(*m_stream, gpr, DataFormatJSCell); return gpr; } @@ -1392,10 +1403,11 @@ GPRReg SpeculativeJIT::fillSpeculateCell(NodeIndex nodeIndex) case DataFormatStorage: ASSERT_NOT_REACHED(); + + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidGPRReg; } GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) @@ -1422,7 +1434,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) if (jsValue.isBoolean()) { m_gprs.retain(gpr, virtualRegister, SpillOrderConstant); m_jit.move(MacroAssembler::TrustedImmPtr(JSValue::encode(jsValue)), gpr); - info.fillJSValue(gpr, DataFormatJSBoolean); + info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); return gpr; } terminateSpeculativeExecution(Uncountable, JSValueRegs(), NoNode); @@ -1432,13 +1444,13 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) m_gprs.retain(gpr, virtualRegister, SpillOrderSpilled); m_jit.loadPtr(JITCompiler::addressFor(virtualRegister), gpr); - info.fillJSValue(gpr, DataFormatJS); + info.fillJSValue(*m_stream, gpr, DataFormatJS); if (!isBooleanSpeculation(type)) { m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr); speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg)); m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr); } - info.fillJSValue(gpr, DataFormatJSBoolean); + info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); return gpr; } @@ -1457,7 +1469,7 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) speculationCheck(BadType, JSValueRegs(gpr), nodeIndex, m_jit.branchTestPtr(MacroAssembler::NonZero, gpr, TrustedImm32(static_cast<int32_t>(~1))), SpeculationRecovery(BooleanSpeculationCheck, gpr, InvalidGPRReg)); m_jit.xorPtr(TrustedImm32(static_cast<int32_t>(ValueFalse)), gpr); } - info.fillJSValue(gpr, DataFormatJSBoolean); + info.fillJSValue(*m_stream, gpr, DataFormatJSBoolean); return gpr; } @@ -1473,10 +1485,11 @@ GPRReg SpeculativeJIT::fillSpeculateBoolean(NodeIndex nodeIndex) case DataFormatStorage: ASSERT_NOT_REACHED(); + + default: + ASSERT_NOT_REACHED(); + return InvalidGPRReg; } - - ASSERT_NOT_REACHED(); - return InvalidGPRReg; } JITCompiler::Jump SpeculativeJIT::convertToDouble(GPRReg value, FPRReg result, GPRReg tmp) @@ -2055,7 +2068,7 @@ void SpeculativeJIT::compile(Node& node) // Indicate that it's no longer necessary to retrieve the value of // this bytecode variable from registers or other locations in the register file, // but that it is stored as a double. - valueSourceReferenceForOperand(node.local()) = ValueSource(DoubleInRegisterFile); + recordSetLocal(node.local(), ValueSource(DoubleInRegisterFile)); break; } @@ -2064,7 +2077,7 @@ void SpeculativeJIT::compile(Node& node) SpeculateIntegerOperand value(this, node.child1()); m_jit.store32(value.gpr(), JITCompiler::payloadFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(Int32InRegisterFile); + recordSetLocal(node.local(), ValueSource(Int32InRegisterFile)); break; } if (isArraySpeculation(predictedType)) { @@ -2074,14 +2087,14 @@ void SpeculativeJIT::compile(Node& node) speculationCheck(BadType, JSValueRegs(cellGPR), node.child1(), m_jit.branchPtr(MacroAssembler::NotEqual, MacroAssembler::Address(cellGPR, JSCell::classInfoOffset()), MacroAssembler::TrustedImmPtr(&JSArray::s_info))); m_jit.storePtr(cellGPR, JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(CellInRegisterFile); + recordSetLocal(node.local(), ValueSource(CellInRegisterFile)); break; } if (isBooleanSpeculation(predictedType)) { SpeculateBooleanOperand boolean(this, node.child1()); m_jit.storePtr(boolean.gpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(BooleanInRegisterFile); + recordSetLocal(node.local(), ValueSource(BooleanInRegisterFile)); break; } } @@ -2090,7 +2103,7 @@ void SpeculativeJIT::compile(Node& node) m_jit.storePtr(value.gpr(), JITCompiler::addressFor(node.local())); noResult(m_compileIndex); - valueSourceReferenceForOperand(node.local()) = ValueSource(ValueInRegisterFile); + recordSetLocal(node.local(), ValueSource(ValueInRegisterFile)); break; } @@ -3567,7 +3580,7 @@ void SpeculativeJIT::compile(Node& node) GPRReg baseGPR = base.gpr(); GPRReg resultGPR = result.gpr(); - m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfPropertyStorage()), resultGPR); + m_jit.loadPtr(JITCompiler::Address(baseGPR, JSObject::offsetOfOutOfLineStorage()), resultGPR); storageResult(resultGPR, m_compileIndex); break; @@ -3883,9 +3896,14 @@ void SpeculativeJIT::compile(Node& node) JITCompiler::Jump structuresDontMatch = m_jit.branchPtr(JITCompiler::NotEqual, resultGPR, JITCompiler::Address(globalObjectGPR, JSCell::structureOffset())); // Fast case - m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfPropertyStorage()), resultGPR); m_jit.load32(JITCompiler::Address(resolveInfoGPR, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), resolveInfoGPR); - m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr), resultGPR); +#if DFG_ENABLE(JIT_ASSERT) + JITCompiler::Jump isOutOfLine = m_jit.branch32(JITCompiler::GreaterThanOrEqual, resolveInfoGPR, TrustedImm32(inlineStorageCapacity)); + m_jit.breakpoint(); + isOutOfLine.link(&m_jit); +#endif + m_jit.loadPtr(JITCompiler::Address(globalObjectGPR, JSObject::offsetOfOutOfLineStorage()), resultGPR); + m_jit.loadPtr(JITCompiler::BaseIndex(resultGPR, resolveInfoGPR, JITCompiler::ScalePtr, -inlineStorageCapacity * static_cast<ptrdiff_t>(sizeof(JSValue))), resultGPR); addSlowPathGenerator( slowPathCall( diff --git a/Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h b/Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h new file mode 100644 index 000000000..317111aec --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGValueRecoveryOverride.h @@ -0,0 +1,57 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGValueRecoveryOverride_h +#define DFGValueRecoveryOverride_h + +#include <wtf/Platform.h> + +#if ENABLE(DFG_JIT) + +#include "ValueRecovery.h" +#include <wtf/RefCounted.h> + +namespace JSC { namespace DFG { + +class ValueRecoveryOverride : public RefCounted<ValueRecoveryOverride> { +public: + ValueRecoveryOverride() { } + + ValueRecoveryOverride(int operand, const ValueRecovery& recovery) + : operand(operand) + , recovery(recovery) + { + } + + int operand; + ValueRecovery recovery; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGValueRecoveryOverride_h + diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.cpp b/Source/JavaScriptCore/dfg/DFGValueSource.cpp new file mode 100644 index 000000000..25d43ee6b --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGValueSource.cpp @@ -0,0 +1,69 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGValueSource.h" + +#if ENABLE(DFG_JIT) + +namespace JSC { namespace DFG { + +void ValueSource::dump(FILE* out) const +{ + switch (kind()) { + case SourceNotSet: + fprintf(out, "NotSet"); + break; + case SourceIsDead: + fprintf(out, "IsDead"); + break; + case ValueInRegisterFile: + fprintf(out, "InRegFile"); + break; + case Int32InRegisterFile: + fprintf(out, "Int32"); + break; + case CellInRegisterFile: + fprintf(out, "Cell"); + break; + case BooleanInRegisterFile: + fprintf(out, "Bool"); + break; + case DoubleInRegisterFile: + fprintf(out, "Double"); + break; + case ArgumentsSource: + fprintf(out, "Arguments"); + break; + case HaveNode: + fprintf(out, "Node(%d)", m_nodeIndex); + break; + } +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGValueSource.h b/Source/JavaScriptCore/dfg/DFGValueSource.h new file mode 100644 index 000000000..be4a6e081 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGValueSource.h @@ -0,0 +1,225 @@ +/* + * Copyright (C) 2011 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGValueSource_h +#define DFGValueSource_h + +#include <wtf/Platform.h> + +#if ENABLE(DFG_JIT) + +#include "DFGCommon.h" +#include "DataFormat.h" +#include "SpeculatedType.h" +#include "ValueRecovery.h" + +namespace JSC { namespace DFG { + +enum ValueSourceKind { + SourceNotSet, + ValueInRegisterFile, + Int32InRegisterFile, + CellInRegisterFile, + BooleanInRegisterFile, + DoubleInRegisterFile, + ArgumentsSource, + SourceIsDead, + HaveNode +}; + +static inline ValueSourceKind dataFormatToValueSourceKind(DataFormat dataFormat) +{ + switch (dataFormat) { + case DataFormatInteger: + return Int32InRegisterFile; + case DataFormatDouble: + return DoubleInRegisterFile; + case DataFormatBoolean: + return BooleanInRegisterFile; + case DataFormatCell: + return CellInRegisterFile; + case DataFormatDead: + return SourceIsDead; + case DataFormatArguments: + return ArgumentsSource; + default: + ASSERT(dataFormat & DataFormatJS); + return ValueInRegisterFile; + } +} + +static inline DataFormat valueSourceKindToDataFormat(ValueSourceKind kind) +{ + switch (kind) { + case ValueInRegisterFile: + return DataFormatJS; + case Int32InRegisterFile: + return DataFormatInteger; + case CellInRegisterFile: + return DataFormatCell; + case BooleanInRegisterFile: + return DataFormatBoolean; + case DoubleInRegisterFile: + return DataFormatDouble; + case ArgumentsSource: + return DataFormatArguments; + case SourceIsDead: + return DataFormatDead; + default: + return DataFormatNone; + } +} + +static inline bool isInRegisterFile(ValueSourceKind kind) +{ + DataFormat format = valueSourceKindToDataFormat(kind); + return format != DataFormatNone && format < DataFormatOSRMarker; +} + +// Can this value be recovered without having to look at register allocation state or +// DFG node liveness? +static inline bool isTriviallyRecoverable(ValueSourceKind kind) +{ + return valueSourceKindToDataFormat(kind) != DataFormatNone; +} + +class ValueSource { +public: + ValueSource() + : m_nodeIndex(nodeIndexFromKind(SourceNotSet)) + { + } + + explicit ValueSource(ValueSourceKind valueSourceKind) + : m_nodeIndex(nodeIndexFromKind(valueSourceKind)) + { + ASSERT(kind() != SourceNotSet); + ASSERT(kind() != HaveNode); + } + + explicit ValueSource(NodeIndex nodeIndex) + : m_nodeIndex(nodeIndex) + { + ASSERT(nodeIndex != NoNode); + ASSERT(kind() == HaveNode); + } + + static ValueSource forSpeculation(SpeculatedType prediction) + { + if (isInt32Speculation(prediction)) + return ValueSource(Int32InRegisterFile); + if (isArraySpeculation(prediction)) + return ValueSource(CellInRegisterFile); + if (isBooleanSpeculation(prediction)) + return ValueSource(BooleanInRegisterFile); + return ValueSource(ValueInRegisterFile); + } + + static ValueSource forDataFormat(DataFormat dataFormat) + { + return ValueSource(dataFormatToValueSourceKind(dataFormat)); + } + + bool isSet() const + { + return kindFromNodeIndex(m_nodeIndex) != SourceNotSet; + } + + ValueSourceKind kind() const + { + return kindFromNodeIndex(m_nodeIndex); + } + + bool isInRegisterFile() const { return JSC::DFG::isInRegisterFile(kind()); } + bool isTriviallyRecoverable() const { return JSC::DFG::isTriviallyRecoverable(kind()); } + + DataFormat dataFormat() const + { + return valueSourceKindToDataFormat(kind()); + } + + ValueRecovery valueRecovery() const + { + ASSERT(isTriviallyRecoverable()); + switch (kind()) { + case ValueInRegisterFile: + return ValueRecovery::alreadyInRegisterFile(); + + case Int32InRegisterFile: + return ValueRecovery::alreadyInRegisterFileAsUnboxedInt32(); + + case CellInRegisterFile: + return ValueRecovery::alreadyInRegisterFileAsUnboxedCell(); + + case BooleanInRegisterFile: + return ValueRecovery::alreadyInRegisterFileAsUnboxedBoolean(); + + case DoubleInRegisterFile: + return ValueRecovery::alreadyInRegisterFileAsUnboxedDouble(); + + case SourceIsDead: + return ValueRecovery::constant(jsUndefined()); + + case ArgumentsSource: + return ValueRecovery::argumentsThatWereNotCreated(); + + default: + ASSERT_NOT_REACHED(); + return ValueRecovery(); + } + } + + NodeIndex nodeIndex() const + { + ASSERT(kind() == HaveNode); + return m_nodeIndex; + } + + void dump(FILE* out) const; + +private: + static NodeIndex nodeIndexFromKind(ValueSourceKind kind) + { + ASSERT(kind >= SourceNotSet && kind < HaveNode); + return NoNode - kind; + } + + static ValueSourceKind kindFromNodeIndex(NodeIndex nodeIndex) + { + unsigned kind = static_cast<unsigned>(NoNode - nodeIndex); + if (kind >= static_cast<unsigned>(HaveNode)) + return HaveNode; + return static_cast<ValueSourceKind>(kind); + } + + NodeIndex m_nodeIndex; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGValueSource_h + diff --git a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h index 382907d27..e734e6387 100644 --- a/Source/JavaScriptCore/dfg/DFGVariableAccessData.h +++ b/Source/JavaScriptCore/dfg/DFGVariableAccessData.h @@ -176,7 +176,7 @@ public: // If the variable has been voted to become a double, then make it a // double. - if (doubleVoteRatio() >= Options::doubleVoteRatioForDoubleFormat) + if (doubleVoteRatio() >= Options::doubleVoteRatioForDoubleFormat()) return true; return false; diff --git a/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp b/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp new file mode 100644 index 000000000..3e84a6ba1 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGVariableEvent.cpp @@ -0,0 +1,91 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGVariableEvent.h" + +#if ENABLE(DFG_JIT) + +#include "DFGFPRInfo.h" +#include "DFGGPRInfo.h" + +namespace JSC { namespace DFG { + +void VariableEvent::dump(FILE* out) const +{ + switch (kind()) { + case Reset: + fprintf(out, "Reset"); + break; + case BirthToFill: + dumpFillInfo("BirthToFill", out); + break; + case BirthToSpill: + dumpSpillInfo("BirthToSpill", out); + break; + case Fill: + dumpFillInfo("Fill", out); + break; + case Spill: + dumpSpillInfo("Spill", out); + break; + case Death: + fprintf(out, "Death(@%u)", nodeIndex()); + break; + case MovHint: + fprintf(out, "MovHint(@%u, r%d)", nodeIndex(), operand()); + break; + case SetLocalEvent: + fprintf(out, "SetLocal(r%d, %s)", operand(), dataFormatToString(dataFormat())); + break; + default: + ASSERT_NOT_REACHED(); + break; + } +} + +void VariableEvent::dumpFillInfo(const char* name, FILE* out) const +{ + fprintf(out, "%s(@%u, ", name, nodeIndex()); + if (dataFormat() == DataFormatDouble) + fprintf(out, "%s", FPRInfo::debugName(fpr())); +#if USE(JSVALUE32_64) + else if (dataFormat() & DataFormatJS) + fprintf(out, "%s:%s", GPRInfo::debugName(tagGPR()), GPRInfo::debugName(payloadGPR())); +#endif + else + fprintf(out, "%s", GPRInfo::debugName(gpr())); + fprintf(out, ", %s)", dataFormatToString(dataFormat())); +} + +void VariableEvent::dumpSpillInfo(const char* name, FILE* out) const +{ + fprintf(out, "%s(@%u, r%d, %s)", name, nodeIndex(), virtualRegister(), dataFormatToString(dataFormat())); +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGVariableEvent.h b/Source/JavaScriptCore/dfg/DFGVariableEvent.h new file mode 100644 index 000000000..a491a3ebf --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGVariableEvent.h @@ -0,0 +1,270 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGVariableEvent_h +#define DFGVariableEvent_h + +#include <wtf/Platform.h> + +#if ENABLE(DFG_JIT) + +#include "DFGCommon.h" +#include "DataFormat.h" +#include "MacroAssembler.h" +#include <stdio.h> + +namespace JSC { namespace DFG { + +enum VariableEventKind { + // Marks the beginning of a checkpoint. If you interpret the variable + // events starting at a Reset point then you'll get everything you need. + Reset, + + // Node births. Points in the code where a node becomes relevant for OSR. + // It may be the point where it is actually born (i.e. assigned) or it may + // be a later point, if it's only later in the sequence of instructions + // that we start to care about this node. + BirthToFill, + BirthToSpill, + + // Events related to how a node is represented. + Fill, + Spill, + + // Death of a node - after this we no longer care about this node. + Death, + + // A MovHint means that a node is being associated with a bytecode operand, + // but that it has not been stored into that operand. + MovHint, + + // A SetLocalEvent means that a node's value has actually been stored into the + // bytecode operand that it's associated with. + SetLocalEvent, + + // Used to indicate an uninitialized VariableEvent. Don't use for other + // purposes. + InvalidEventKind +}; + +union VariableRepresentation { + MacroAssembler::RegisterID gpr; + MacroAssembler::FPRegisterID fpr; +#if USE(JSVALUE32_64) + struct { + MacroAssembler::RegisterID tagGPR; + MacroAssembler::RegisterID payloadGPR; + } pair; +#endif + int32_t virtualReg; +}; + +class VariableEvent { +public: + VariableEvent() + : m_kind(InvalidEventKind) + { + } + + static VariableEvent reset() + { + VariableEvent event; + event.m_kind = Reset; + return event; + } + + static VariableEvent fillGPR(VariableEventKind kind, NodeIndex nodeIndex, MacroAssembler::RegisterID gpr, DataFormat dataFormat) + { + ASSERT(kind == BirthToFill || kind == Fill); + ASSERT(dataFormat != DataFormatDouble); +#if USE(JSVALUE32_64) + ASSERT(!(dataFormat & DataFormatJS)); +#endif + VariableEvent event; + event.m_index = nodeIndex; + event.u.gpr = gpr; + event.m_kind = kind; + event.m_dataFormat = dataFormat; + return event; + } + +#if USE(JSVALUE32_64) + static VariableEvent fillPair(VariableEventKind kind, NodeIndex nodeIndex, MacroAssembler::RegisterID tagGPR, MacroAssembler::RegisterID payloadGPR) + { + ASSERT(kind == BirthToFill || kind == Fill); + VariableEvent event; + event.m_index = nodeIndex; + event.u.pair.tagGPR = tagGPR; + event.u.pair.payloadGPR = payloadGPR; + event.m_kind = kind; + event.m_dataFormat = DataFormatJS; + return event; + } +#endif // USE(JSVALUE32_64) + + static VariableEvent fillFPR(VariableEventKind kind, NodeIndex nodeIndex, MacroAssembler::FPRegisterID fpr) + { + ASSERT(kind == BirthToFill || kind == Fill); + VariableEvent event; + event.m_index = nodeIndex; + event.u.fpr = fpr; + event.m_kind = kind; + event.m_dataFormat = DataFormatDouble; + return event; + } + + static VariableEvent spill(VariableEventKind kind, NodeIndex nodeIndex, VirtualRegister virtualRegister, DataFormat format) + { + ASSERT(kind == BirthToSpill || kind == Spill); + VariableEvent event; + event.m_index = nodeIndex; + event.u.virtualReg = virtualRegister; + event.m_kind = kind; + event.m_dataFormat = format; + return event; + } + + static VariableEvent death(NodeIndex nodeIndex) + { + VariableEvent event; + event.m_index = nodeIndex; + event.m_kind = Death; + return event; + } + + static VariableEvent setLocal(int operand, DataFormat format) + { + VariableEvent event; + event.u.virtualReg = operand; + event.m_kind = SetLocalEvent; + event.m_dataFormat = format; + return event; + } + + static VariableEvent movHint(NodeIndex nodeIndex, int operand) + { + VariableEvent event; + event.m_index = nodeIndex; + event.u.virtualReg = operand; + event.m_kind = MovHint; + return event; + } + + VariableEventKind kind() const + { + return static_cast<VariableEventKind>(m_kind); + } + + NodeIndex nodeIndex() const + { + ASSERT(m_kind == BirthToFill || m_kind == Fill + || m_kind == BirthToSpill || m_kind == Spill + || m_kind == Death || m_kind == MovHint); + return m_index; + } + + DataFormat dataFormat() const + { + ASSERT(m_kind == BirthToFill || m_kind == Fill + || m_kind == BirthToSpill || m_kind == Spill + || m_kind == SetLocalEvent); + return static_cast<DataFormat>(m_dataFormat); + } + + MacroAssembler::RegisterID gpr() const + { + ASSERT(m_kind == BirthToFill || m_kind == Fill); + ASSERT(m_dataFormat); + ASSERT(m_dataFormat != DataFormatDouble); +#if USE(JSVALUE32_64) + ASSERT(!(m_dataFormat & DataFormatJS)); +#endif + return u.gpr; + } + +#if USE(JSVALUE32_64) + MacroAssembler::RegisterID tagGPR() const + { + ASSERT(m_kind == BirthToFill || m_kind == Fill); + ASSERT(m_dataFormat & DataFormatJS); + return u.pair.tagGPR; + } + MacroAssembler::RegisterID payloadGPR() const + { + ASSERT(m_kind == BirthToFill || m_kind == Fill); + ASSERT(m_dataFormat & DataFormatJS); + return u.pair.payloadGPR; + } +#endif // USE(JSVALUE32_64) + + MacroAssembler::FPRegisterID fpr() const + { + ASSERT(m_kind == BirthToFill || m_kind == Fill); + ASSERT(m_dataFormat == DataFormatDouble); + return u.fpr; + } + + VirtualRegister virtualRegister() const + { + ASSERT(m_kind == BirthToSpill || m_kind == Spill); + return static_cast<VirtualRegister>(u.virtualReg); + } + + int operand() const + { + ASSERT(m_kind == SetLocalEvent || m_kind == MovHint); + return u.virtualReg; + } + + const VariableRepresentation& variableRepresentation() const { return u; } + + void dump(FILE*) const; + +private: + void dumpFillInfo(const char* name, FILE*) const; + void dumpSpillInfo(const char* name, FILE*) const; + + NodeIndex m_index; + + // For BirthToFill, Fill: + // - The GPR or FPR, or a GPR pair. + // For BirthToSpill, Spill: + // - The virtual register. + // For MovHint, SetLocalEvent: + // - The bytecode operand. + // For Death: + // - Unused. + VariableRepresentation u; + + int8_t m_kind; + int8_t m_dataFormat; +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGVariableEvent_h + diff --git a/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp new file mode 100644 index 000000000..5d548a755 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGVariableEventStream.cpp @@ -0,0 +1,286 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "DFGVariableEventStream.h" + +#if ENABLE(DFG_JIT) + +#include "CodeBlock.h" +#include "DFGValueSource.h" +#include <wtf/DataLog.h> + +namespace JSC { namespace DFG { + +void VariableEventStream::logEvent(const VariableEvent& event) +{ + dataLog("seq#%u:", static_cast<unsigned>(size())); + event.dump(WTF::dataFile()); + dataLog(" "); +} + +struct MinifiedGenerationInfo { + bool filled; // true -> in gpr/fpr/pair, false -> spilled + VariableRepresentation u; + DataFormat format; + + MinifiedGenerationInfo() + : format(DataFormatNone) + { + } + + void update(const VariableEvent& event) + { + switch (event.kind()) { + case BirthToFill: + case Fill: + filled = true; + break; + case BirthToSpill: + case Spill: + filled = false; + break; + case Death: + format = DataFormatNone; + return; + default: + return; + } + + u = event.variableRepresentation(); + format = event.dataFormat(); + } +}; + +void VariableEventStream::reconstruct( + CodeBlock* codeBlock, CodeOrigin codeOrigin, MinifiedGraph& graph, + unsigned index, Operands<ValueRecovery>& valueRecoveries) const +{ + ASSERT(codeBlock->getJITType() == JITCode::DFGJIT); + CodeBlock* baselineCodeBlock = codeBlock->baselineVersion(); + + unsigned numVariables; + if (codeOrigin.inlineCallFrame) + numVariables = baselineCodeBlockForInlineCallFrame(codeOrigin.inlineCallFrame)->m_numCalleeRegisters + codeOrigin.inlineCallFrame->stackOffset; + else + numVariables = baselineCodeBlock->m_numCalleeRegisters; + + // Crazy special case: if we're at index == 0 then this must be an argument check + // failure, in which case all variables are already set up. The recoveries should + // reflect this. + if (!index) { + valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables); + for (size_t i = 0; i < valueRecoveries.size(); ++i) + valueRecoveries[i] = ValueRecovery::alreadyInRegisterFile(); + return; + } + + // Step 1: Find the last checkpoint, and figure out the number of virtual registers as we go. + unsigned startIndex = index - 1; + while (at(startIndex).kind() != Reset) + startIndex--; + + // Step 2: Create a mock-up of the DFG's state and execute the events. + Operands<ValueSource> operandSources(codeBlock->numParameters(), numVariables); + Vector<MinifiedGenerationInfo, 32> generationInfos(graph.originalGraphSize()); + for (unsigned i = startIndex; i < index; ++i) { + const VariableEvent& event = at(i); + switch (event.kind()) { + case Reset: + // nothing to do. + break; + case BirthToFill: + case BirthToSpill: + case Fill: + case Spill: + case Death: + generationInfos[event.nodeIndex()].update(event); + break; + case MovHint: + if (operandSources.hasOperand(event.operand())) + operandSources.setOperand(event.operand(), ValueSource(event.nodeIndex())); + break; + case SetLocalEvent: + if (operandSources.hasOperand(event.operand())) + operandSources.setOperand(event.operand(), ValueSource::forDataFormat(event.dataFormat())); + break; + default: + ASSERT_NOT_REACHED(); + break; + } + } + + // Step 3: Record the things that are live, so we can get to them more quickly. + Vector<unsigned, 16> indicesOfLiveThings; + for (unsigned i = 0; i < generationInfos.size(); ++i) { + if (generationInfos[i].format != DataFormatNone) + indicesOfLiveThings.append(i); + } + + // Step 4: Compute value recoveries! + valueRecoveries = Operands<ValueRecovery>(codeBlock->numParameters(), numVariables); + for (unsigned i = 0; i < operandSources.size(); ++i) { + ValueSource& source = operandSources[i]; + if (source.isTriviallyRecoverable()) { + valueRecoveries[i] = source.valueRecovery(); + continue; + } + + ASSERT(source.kind() == HaveNode); + MinifiedNode* node = graph.at(source.nodeIndex()); + if (node) { + if (node->hasConstantNumber()) { + valueRecoveries[i] = ValueRecovery::constant( + codeBlock->constantRegister( + FirstConstantRegisterIndex + node->constantNumber()).get()); + continue; + } + if (node->hasWeakConstant()) { + valueRecoveries[i] = ValueRecovery::constant(node->weakConstant()); + continue; + } + if (node->op() == PhantomArguments) { + valueRecoveries[i] = ValueRecovery::argumentsThatWereNotCreated(); + continue; + } + } + + MinifiedGenerationInfo* info = &generationInfos[source.nodeIndex()]; + if (info->format == DataFormatNone) { + // Try to see if there is an alternate node that would contain the value we want. + // There are four possibilities: + // + // Int32ToDouble: We can use this in place of the original node, but + // we'd rather not; so we use it only if it is the only remaining + // live version. + // + // ValueToInt32: If the only remaining live version of the value is + // ValueToInt32, then we can use it. + // + // UInt32ToNumber: If the only live version of the value is a UInt32ToNumber + // then the only remaining uses are ones that want a properly formed number + // rather than a UInt32 intermediate. + // + // DoubleAsInt32: Same as UInt32ToNumber. + // + // The reverse of the above: This node could be a UInt32ToNumber, but its + // alternative is still alive. This means that the only remaining uses of + // the number would be fine with a UInt32 intermediate. + + bool found = false; + + if (node && node->op() == UInt32ToNumber) { + NodeIndex nodeIndex = node->child1(); + node = graph.at(nodeIndex); + info = &generationInfos[nodeIndex]; + if (info->format != DataFormatNone) + found = true; + } + + if (!found) { + NodeIndex int32ToDoubleIndex = NoNode; + NodeIndex valueToInt32Index = NoNode; + NodeIndex uint32ToNumberIndex = NoNode; + NodeIndex doubleAsInt32Index = NoNode; + + for (unsigned i = 0; i < indicesOfLiveThings.size(); ++i) { + NodeIndex nodeIndex = indicesOfLiveThings[i]; + node = graph.at(nodeIndex); + if (!node) + continue; + if (!node->hasChild1()) + continue; + if (node->child1() != source.nodeIndex()) + continue; + ASSERT(generationInfos[nodeIndex].format != DataFormatNone); + switch (node->op()) { + case Int32ToDouble: + int32ToDoubleIndex = nodeIndex; + break; + case ValueToInt32: + valueToInt32Index = nodeIndex; + break; + case UInt32ToNumber: + uint32ToNumberIndex = nodeIndex; + break; + case DoubleAsInt32: + doubleAsInt32Index = nodeIndex; + break; + default: + break; + } + } + + NodeIndex nodeIndexToUse; + if (doubleAsInt32Index != NoNode) + nodeIndexToUse = doubleAsInt32Index; + else if (int32ToDoubleIndex != NoNode) + nodeIndexToUse = int32ToDoubleIndex; + else if (valueToInt32Index != NoNode) + nodeIndexToUse = valueToInt32Index; + else if (uint32ToNumberIndex != NoNode) + nodeIndexToUse = uint32ToNumberIndex; + else + nodeIndexToUse = NoNode; + + if (nodeIndexToUse != NoNode) { + info = &generationInfos[nodeIndexToUse]; + ASSERT(info->format != DataFormatNone); + found = true; + } + } + + if (!found) { + valueRecoveries[i] = ValueRecovery::constant(jsUndefined()); + continue; + } + } + + ASSERT(info->format != DataFormatNone); + + if (info->filled) { + if (info->format == DataFormatDouble) { + valueRecoveries[i] = ValueRecovery::inFPR(info->u.fpr); + continue; + } +#if USE(JSVALUE32_64) + if (info->format & DataFormatJS) { + valueRecoveries[i] = ValueRecovery::inPair(info->u.pair.tagGPR, info->u.pair.payloadGPR); + continue; + } +#endif + valueRecoveries[i] = ValueRecovery::inGPR(info->u.gpr, info->format); + continue; + } + + valueRecoveries[i] = + ValueRecovery::displacedInRegisterFile(static_cast<VirtualRegister>(info->u.virtualReg), info->format); + } +} + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + diff --git a/Source/JavaScriptCore/dfg/DFGVariableEventStream.h b/Source/JavaScriptCore/dfg/DFGVariableEventStream.h new file mode 100644 index 000000000..0d10eb048 --- /dev/null +++ b/Source/JavaScriptCore/dfg/DFGVariableEventStream.h @@ -0,0 +1,64 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef DFGVariableEventStream_h +#define DFGVariableEventStream_h + +#include <wtf/Platform.h> + +#if ENABLE(DFG_JIT) + +#include "DFGCommon.h" +#include "DFGMinifiedGraph.h" +#include "DFGVariableEvent.h" +#include "Operands.h" +#include <wtf/Vector.h> + +namespace JSC { namespace DFG { + +class VariableEventStream : public Vector<VariableEvent> { +public: + void appendAndLog(const VariableEvent& event) + { +#if DFG_ENABLE(DEBUG_VERBOSE) + logEvent(event); +#endif + append(event); + } + + void reconstruct( + CodeBlock*, CodeOrigin, MinifiedGraph&, + unsigned index, Operands<ValueRecovery>&) const; + +private: + void logEvent(const VariableEvent&); +}; + +} } // namespace JSC::DFG + +#endif // ENABLE(DFG_JIT) + +#endif // DFGVariableEventStream_h + diff --git a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp index 2d7ce33c9..86b33835d 100644 --- a/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp +++ b/Source/JavaScriptCore/dfg/DFGVirtualRegisterAllocationPhase.cpp @@ -132,6 +132,7 @@ public: bool performVirtualRegisterAllocation(Graph& graph) { + SamplingRegion samplingRegion("DFG Virtual Register Allocation Phase"); return runPhase<VirtualRegisterAllocationPhase>(graph); } diff --git a/Source/JavaScriptCore/disassembler/udis86/udis86_syn-att.c b/Source/JavaScriptCore/disassembler/udis86/udis86_syn-att.c index 132d6ff84..155a34ca2 100644 --- a/Source/JavaScriptCore/disassembler/udis86/udis86_syn-att.c +++ b/Source/JavaScriptCore/disassembler/udis86/udis86_syn-att.c @@ -109,7 +109,7 @@ gen_operand(struct ud* u, struct ud_operand* op) } if ( sext_size < 64 ) sext_mask = ( 1ull << sext_size ) - 1; - mkasm( u, "0x" FMT64 "x", imm & sext_mask ); + mkasm( u, "$0x" FMT64 "x", imm & sext_mask ); break; } diff --git a/Source/JavaScriptCore/gyp/generate-derived-sources.sh b/Source/JavaScriptCore/gyp/generate-derived-sources.sh deleted file mode 100755 index a03af7d9f..000000000 --- a/Source/JavaScriptCore/gyp/generate-derived-sources.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -mkdir -p "${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore/docs" -cd "${BUILT_PRODUCTS_DIR}/DerivedSources/JavaScriptCore" - -/bin/ln -sfh "${SRCROOT}/.." JavaScriptCore -export JavaScriptCore="JavaScriptCore" - -make --no-builtin-rules -f "JavaScriptCore/DerivedSources.make" -j `/usr/sbin/sysctl -n hw.ncpu` diff --git a/Source/JavaScriptCore/gyp/generate-dtrace-header.sh b/Source/JavaScriptCore/gyp/generate-dtrace-header.sh deleted file mode 100755 index a3c31dd70..000000000 --- a/Source/JavaScriptCore/gyp/generate-dtrace-header.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/sh - -TRACING_D="$1/runtime/Tracing.d"; -TRACING_H="$BUILT_PRODUCTS_DIR/DerivedSources/JavaScriptCore/TracingDtrace.h"; - -if [[ "${HAVE_DTRACE}" = "1" && "${TRACING_D}" -nt "${TRACING_H}" ]]; then - dtrace -h -o "${TRACING_H}" -s "${TRACING_D}"; -fi; - diff --git a/Source/JavaScriptCore/gyp/run-if-exists.sh b/Source/JavaScriptCore/gyp/run-if-exists.sh deleted file mode 100755 index 242ffc81b..000000000 --- a/Source/JavaScriptCore/gyp/run-if-exists.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/sh - -if [ -f $1 ]; then - $1 || exit $?; -fi diff --git a/Source/JavaScriptCore/gyp/update-info-plist.sh b/Source/JavaScriptCore/gyp/update-info-plist.sh deleted file mode 100755 index d02ecce45..000000000 --- a/Source/JavaScriptCore/gyp/update-info-plist.sh +++ /dev/null @@ -1,4 +0,0 @@ -# Touch Info.plist to let Xcode know it needs to copy it into the built product -if [[ "${CONFIGURATION}" != "Production" ]]; then - touch "$1"; -fi diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.cpp b/Source/JavaScriptCore/heap/ConservativeRoots.cpp index d63faebf3..6b9cbef45 100644 --- a/Source/JavaScriptCore/heap/ConservativeRoots.cpp +++ b/Source/JavaScriptCore/heap/ConservativeRoots.cpp @@ -62,11 +62,6 @@ void ConservativeRoots::grow() m_roots = newRoots; } -class DummyMarkHook { -public: - void mark(void*) { } -}; - template<typename MarkHook> inline void ConservativeRoots::genericAddPointer(void* p, TinyBloomFilter filter, MarkHook& markHook) { @@ -110,15 +105,48 @@ void ConservativeRoots::genericAddSpan(void* begin, void* end, MarkHook& markHoo genericAddPointer(*it, filter, markHook); } +class DummyMarkHook { +public: + void mark(void*) { } +}; + void ConservativeRoots::add(void* begin, void* end) { - DummyMarkHook hook; - genericAddSpan(begin, end, hook); + DummyMarkHook dummy; + genericAddSpan(begin, end, dummy); } -void ConservativeRoots::add(void* begin, void* end, DFGCodeBlocks& dfgCodeBlocks) +void ConservativeRoots::add(void* begin, void* end, JITStubRoutineSet& jitStubRoutines) +{ + genericAddSpan(begin, end, jitStubRoutines); +} + +template<typename T, typename U> +class CompositeMarkHook { +public: + CompositeMarkHook(T& first, U& second) + : m_first(first) + , m_second(second) + { + } + + void mark(void* address) + { + m_first.mark(address); + m_second.mark(address); + } + +private: + T& m_first; + U& m_second; +}; + +void ConservativeRoots::add( + void* begin, void* end, JITStubRoutineSet& jitStubRoutines, DFGCodeBlocks& dfgCodeBlocks) { - genericAddSpan(begin, end, dfgCodeBlocks); + CompositeMarkHook<JITStubRoutineSet, DFGCodeBlocks> markHook( + jitStubRoutines, dfgCodeBlocks); + genericAddSpan(begin, end, markHook); } } // namespace JSC diff --git a/Source/JavaScriptCore/heap/ConservativeRoots.h b/Source/JavaScriptCore/heap/ConservativeRoots.h index 9d9e9ba0c..219bdcc8e 100644 --- a/Source/JavaScriptCore/heap/ConservativeRoots.h +++ b/Source/JavaScriptCore/heap/ConservativeRoots.h @@ -32,9 +32,10 @@ namespace JSC { -class JSCell; class DFGCodeBlocks; class Heap; +class JITStubRoutineSet; +class JSCell; class ConservativeRoots { public: @@ -42,7 +43,8 @@ public: ~ConservativeRoots(); void add(void* begin, void* end); - void add(void* begin, void* end, DFGCodeBlocks&); + void add(void* begin, void* end, JITStubRoutineSet&); + void add(void* begin, void* end, JITStubRoutineSet&, DFGCodeBlocks&); size_t size(); JSCell** roots(); diff --git a/Source/JavaScriptCore/heap/CopiedSpace.cpp b/Source/JavaScriptCore/heap/CopiedSpace.cpp index 631e829ec..9eb70a556 100644 --- a/Source/JavaScriptCore/heap/CopiedSpace.cpp +++ b/Source/JavaScriptCore/heap/CopiedSpace.cpp @@ -66,6 +66,7 @@ CheckedBoolean CopiedSpace::tryAllocateSlowCase(size_t bytes, void** outPtr) if (isOversize(bytes)) return tryAllocateOversize(bytes, outPtr); + ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock()); m_heap->didAllocate(m_allocator.currentCapacity()); allocateBlock(); diff --git a/Source/JavaScriptCore/heap/Heap.cpp b/Source/JavaScriptCore/heap/Heap.cpp index ef062c9ce..377132765 100644 --- a/Source/JavaScriptCore/heap/Heap.cpp +++ b/Source/JavaScriptCore/heap/Heap.cpp @@ -160,15 +160,9 @@ static inline size_t proportionalHeapSize(size_t heapSize, size_t ramSize) return 1.25 * heapSize; } -static inline bool isValidSharedInstanceThreadState() +static inline bool isValidSharedInstanceThreadState(JSGlobalData* globalData) { - if (!JSLock::lockCount()) - return false; - - if (!JSLock::currentThreadIsHoldingLock()) - return false; - - return true; + return globalData->apiLock().currentThreadIsHoldingLock(); } static inline bool isValidThreadState(JSGlobalData* globalData) @@ -176,7 +170,7 @@ static inline bool isValidThreadState(JSGlobalData* globalData) if (globalData->identifierTable != wtfThreadData().currentIdentifierTable()) return false; - if (globalData->isSharedInstance() && !isValidSharedInstanceThreadState()) + if (globalData->isSharedInstance() && !isValidSharedInstanceThreadState(globalData)) return false; return true; @@ -275,10 +269,6 @@ void Heap::lastChanceToFinalize() ASSERT(!m_globalData->dynamicGlobalObject); ASSERT(m_operationInProgress == NoOperation); - // FIXME: Make this a release-mode crash once we're sure no one's doing this. - if (size_t size = m_protectedValues.size()) - WTFLogAlways("ERROR: JavaScriptCore heap deallocated while %ld values were still protected", static_cast<unsigned long>(size)); - m_objectSpace.lastChanceToFinalize(); #if ENABLE(SIMPLE_HEAP_PROFILING) @@ -327,7 +317,7 @@ void Heap::didAbandon(size_t bytes) void Heap::protect(JSValue k) { ASSERT(k); - ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance()); + ASSERT(m_globalData->apiLock().currentThreadIsHoldingLock()); if (!k.isCell()) return; @@ -338,7 +328,7 @@ void Heap::protect(JSValue k) bool Heap::unprotect(JSValue k) { ASSERT(k); - ASSERT(JSLock::currentThreadIsHoldingLock() || !m_globalData->isSharedInstance()); + ASSERT(m_globalData->apiLock().currentThreadIsHoldingLock()); if (!k.isCell()) return false; @@ -430,6 +420,7 @@ void Heap::markRoots(bool fullGC) // We gather conservative roots before clearing mark bits because conservative // gathering uses the mark bits to determine whether a reference is valid. ConservativeRoots machineThreadRoots(&m_objectSpace.blocks(), &m_storageSpace); + m_jitStubRoutines.clearMarks(); { GCPHASE(GatherConservativeRoots); m_machineThreads.gatherConservativeRoots(machineThreadRoots, &dummy); @@ -439,7 +430,8 @@ void Heap::markRoots(bool fullGC) m_dfgCodeBlocks.clearMarks(); { GCPHASE(GatherRegisterFileRoots); - registerFile().gatherConservativeRoots(registerFileRoots, m_dfgCodeBlocks); + registerFile().gatherConservativeRoots( + registerFileRoots, m_jitStubRoutines, m_dfgCodeBlocks); } #if ENABLE(DFG_JIT) @@ -464,6 +456,7 @@ void Heap::markRoots(bool fullGC) m_storageSpace.startedCopying(); SlotVisitor& visitor = m_slotVisitor; + visitor.setup(); HeapRootVisitor heapRootVisitor(visitor); { @@ -549,9 +542,10 @@ void Heap::markRoots(bool fullGC) } { - GCPHASE(TraceCodeBlocks); - MARK_LOG_ROOT(visitor, "Trace Code Blocks"); + GCPHASE(TraceCodeBlocksAndJITStubRoutines); + MARK_LOG_ROOT(visitor, "Trace Code Blocks and JIT Stub Routines"); m_dfgCodeBlocks.traceMarkedCodeBlocks(visitor); + m_jitStubRoutines.traceMarkedStubRoutines(visitor); visitor.donateAndDrain(); } @@ -595,12 +589,11 @@ void Heap::markRoots(bool fullGC) #endif visitor.reset(); - m_sharedData.reset(); #if ENABLE(PARALLEL_GC) m_sharedData.resetChildren(); #endif + m_sharedData.reset(); m_storageSpace.doneCopying(); - } size_t Heap::objectCount() @@ -675,6 +668,7 @@ void Heap::deleteUnmarkedCompiledCode() } m_dfgCodeBlocks.deleteUnmarkedJettisonedCodeBlocks(); + m_jitStubRoutines.deleteUnmarkedJettisonedStubRoutines(); } void Heap::collectAllGarbage() @@ -692,6 +686,7 @@ void Heap::collect(SweepToggle sweepToggle) SamplingRegion samplingRegion("Garbage Collection"); GCPHASE(Collect); + ASSERT(globalData()->apiLock().currentThreadIsHoldingLock()); ASSERT(globalData()->identifierTable == wtfThreadData().currentIdentifierTable()); ASSERT(m_isSafeToCollect); JAVASCRIPTCORE_GC_BEGIN(); @@ -777,19 +772,19 @@ void Heap::collect(SweepToggle sweepToggle) JAVASCRIPTCORE_GC_END(); } -void Heap::setActivityCallback(PassOwnPtr<GCActivityCallback> activityCallback) +void Heap::setActivityCallback(GCActivityCallback* activityCallback) { m_activityCallback = activityCallback; } GCActivityCallback* Heap::activityCallback() { - return m_activityCallback.get(); + return m_activityCallback; } IncrementalSweeper* Heap::sweeper() { - return m_sweeper.get(); + return m_sweeper; } void Heap::setGarbageCollectionTimerEnabled(bool enable) diff --git a/Source/JavaScriptCore/heap/Heap.h b/Source/JavaScriptCore/heap/Heap.h index 91c3aa58f..a43be3df0 100644 --- a/Source/JavaScriptCore/heap/Heap.h +++ b/Source/JavaScriptCore/heap/Heap.h @@ -26,6 +26,7 @@ #include "DFGCodeBlocks.h" #include "HandleSet.h" #include "HandleStack.h" +#include "JITStubRoutineSet.h" #include "MarkedAllocator.h" #include "MarkedBlock.h" #include "MarkedBlockSet.h" @@ -44,10 +45,12 @@ namespace JSC { class CodeBlock; class ExecutableBase; class GCActivityCallback; + class GCAwareJITStubRoutine; class GlobalCodeBlock; class Heap; class HeapRootVisitor; class IncrementalSweeper; + class JITStubRoutine; class JSCell; class JSGlobalData; class JSValue; @@ -99,10 +102,10 @@ namespace JSC { MachineThreads& machineThreads() { return m_machineThreads; } JS_EXPORT_PRIVATE GCActivityCallback* activityCallback(); - JS_EXPORT_PRIVATE void setActivityCallback(PassOwnPtr<GCActivityCallback>); + JS_EXPORT_PRIVATE void setActivityCallback(GCActivityCallback*); JS_EXPORT_PRIVATE void setGarbageCollectionTimerEnabled(bool); - IncrementalSweeper* sweeper(); + JS_EXPORT_PRIVATE IncrementalSweeper* sweeper(); // true if an allocation or collection is in progress inline bool isBusy(); @@ -168,6 +171,8 @@ namespace JSC { private: friend class CodeBlock; + friend class GCAwareJITStubRoutine; + friend class JITStubRoutine; friend class LLIntOffsetsExtractor; friend class MarkedSpace; friend class MarkedAllocator; @@ -229,6 +234,7 @@ namespace JSC { HandleSet m_handleSet; HandleStack m_handleStack; DFGCodeBlocks m_dfgCodeBlocks; + JITStubRoutineSet m_jitStubRoutines; FinalizerOwner m_finalizerOwner; bool m_isSafeToCollect; @@ -237,10 +243,10 @@ namespace JSC { double m_lastGCLength; double m_lastCodeDiscardTime; - OwnPtr<GCActivityCallback> m_activityCallback; - OwnPtr<IncrementalSweeper> m_sweeper; - DoublyLinkedList<ExecutableBase> m_compiledCode; + + GCActivityCallback* m_activityCallback; + IncrementalSweeper* m_sweeper; }; inline bool Heap::shouldCollect() diff --git a/Source/JavaScriptCore/heap/HeapTimer.cpp b/Source/JavaScriptCore/heap/HeapTimer.cpp index b4d928a34..ae66f9e26 100644 --- a/Source/JavaScriptCore/heap/HeapTimer.cpp +++ b/Source/JavaScriptCore/heap/HeapTimer.cpp @@ -26,6 +26,10 @@ #include "config.h" #include "HeapTimer.h" +#include "APIShims.h" +#include "JSObject.h" +#include "JSString.h" +#include "ScopeChain.h" #include <wtf/Threading.h> namespace JSC { @@ -46,7 +50,8 @@ HeapTimer::HeapTimer(JSGlobalData* globalData, CFRunLoopRef runLoop) HeapTimer::~HeapTimer() { - invalidate(); + CFRunLoopRemoveTimer(m_runLoop.get(), m_timer.get(), kCFRunLoopCommonModes); + CFRunLoopTimerInvalidate(m_timer.get()); } void HeapTimer::synchronize() @@ -60,14 +65,37 @@ void HeapTimer::synchronize() void HeapTimer::invalidate() { - CFRunLoopRemoveTimer(m_runLoop.get(), m_timer.get(), kCFRunLoopCommonModes); - CFRunLoopTimerInvalidate(m_timer.get()); + m_globalData = 0; + CFRunLoopTimerSetNextFireDate(m_timer.get(), CFAbsoluteTimeGetCurrent() - s_decade); +} + +void HeapTimer::didStartVMShutdown() +{ + if (CFRunLoopGetCurrent() == m_runLoop.get()) { + invalidate(); + delete this; + return; + } + ASSERT(!m_globalData->apiLock().currentThreadIsHoldingLock()); + MutexLocker locker(m_shutdownMutex); + invalidate(); } void HeapTimer::timerDidFire(CFRunLoopTimerRef, void* info) { HeapTimer* agent = static_cast<HeapTimer*>(info); - agent->doWork(); + agent->m_shutdownMutex.lock(); + if (!agent->m_globalData) { + agent->m_shutdownMutex.unlock(); + delete agent; + return; + } + { + // We don't ref here to prevent us from resurrecting the ref count of a "dead" JSGlobalData. + APIEntryShim shim(agent->m_globalData, APIEntryShimWithoutLock::DontRefGlobalData); + agent->doWork(); + } + agent->m_shutdownMutex.unlock(); } #else @@ -81,6 +109,11 @@ HeapTimer::~HeapTimer() { } +void HeapTimer::didStartVMShutdown() +{ + delete this; +} + void HeapTimer::synchronize() { } @@ -89,7 +122,6 @@ void HeapTimer::invalidate() { } - #endif diff --git a/Source/JavaScriptCore/heap/HeapTimer.h b/Source/JavaScriptCore/heap/HeapTimer.h index fea013975..9255e0648 100644 --- a/Source/JavaScriptCore/heap/HeapTimer.h +++ b/Source/JavaScriptCore/heap/HeapTimer.h @@ -27,6 +27,7 @@ #define HeapTimer_h #include <wtf/RetainPtr.h> +#include <wtf/Threading.h> #if USE(CF) #include <CoreFoundation/CoreFoundation.h> @@ -46,7 +47,8 @@ public: #endif virtual ~HeapTimer(); - + + void didStartVMShutdown(); virtual void synchronize(); virtual void doWork() = 0; @@ -59,6 +61,8 @@ protected: RetainPtr<CFRunLoopTimerRef> m_timer; RetainPtr<CFRunLoopRef> m_runLoop; CFRunLoopTimerContext m_context; + + Mutex m_shutdownMutex; #endif private: diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp index 848377346..49222c545 100644 --- a/Source/JavaScriptCore/heap/IncrementalSweeper.cpp +++ b/Source/JavaScriptCore/heap/IncrementalSweeper.cpp @@ -45,7 +45,6 @@ static const CFTimeInterval sweepTimeMultiplier = 1.0 / sweepTimeTotal; void IncrementalSweeper::doWork() { - APIEntryShim shim(m_globalData); doSweep(WTF::monotonicallyIncreasingTime()); } @@ -55,9 +54,9 @@ IncrementalSweeper::IncrementalSweeper(Heap* heap, CFRunLoopRef runLoop) { } -PassOwnPtr<IncrementalSweeper> IncrementalSweeper::create(Heap* heap) +IncrementalSweeper* IncrementalSweeper::create(Heap* heap) { - return adoptPtr(new IncrementalSweeper(heap, CFRunLoopGetCurrent())); + return new IncrementalSweeper(heap, CFRunLoopGetCurrent()); } void IncrementalSweeper::scheduleTimer() @@ -110,9 +109,9 @@ void IncrementalSweeper::doWork() { } -PassOwnPtr<IncrementalSweeper> IncrementalSweeper::create(Heap* heap) +IncrementalSweeper* IncrementalSweeper::create(Heap* heap) { - return adoptPtr(new IncrementalSweeper(heap->globalData())); + return new IncrementalSweeper(heap->globalData()); } void IncrementalSweeper::startSweeping(const HashSet<MarkedBlock*>&) diff --git a/Source/JavaScriptCore/heap/IncrementalSweeper.h b/Source/JavaScriptCore/heap/IncrementalSweeper.h index 20f4e3ca8..eedfa7f6f 100644 --- a/Source/JavaScriptCore/heap/IncrementalSweeper.h +++ b/Source/JavaScriptCore/heap/IncrementalSweeper.h @@ -39,7 +39,7 @@ class Heap; class IncrementalSweeper : public HeapTimer { public: - static PassOwnPtr<IncrementalSweeper> create(Heap*); + static IncrementalSweeper* create(Heap*); void startSweeping(const HashSet<MarkedBlock*>& blockSnapshot); virtual void doWork(); diff --git a/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp new file mode 100644 index 000000000..054bf06dd --- /dev/null +++ b/Source/JavaScriptCore/heap/JITStubRoutineSet.cpp @@ -0,0 +1,126 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JITStubRoutineSet.h" + +#if ENABLE(JIT) + +#include "GCAwareJITStubRoutine.h" +#include "ScopeChain.h" +#include "SlotVisitor.h" + +namespace JSC { + +JITStubRoutineSet::JITStubRoutineSet() { } +JITStubRoutineSet::~JITStubRoutineSet() +{ + for (size_t i = m_listOfRoutines.size(); i--;) { + GCAwareJITStubRoutine* routine = m_listOfRoutines[i]; + + routine->m_mayBeExecuting = false; + + if (!routine->m_isJettisoned) { + // Inform the deref() routine that it should delete this guy as soon + // as the ref count reaches zero. + routine->m_isJettisoned = true; + continue; + } + + routine->deleteFromGC(); + } +} + +void JITStubRoutineSet::add(GCAwareJITStubRoutine* routine) +{ + ASSERT(!routine->m_isJettisoned); + + m_listOfRoutines.append(routine); + + uintptr_t start = routine->startAddress(); + uintptr_t end = routine->endAddress(); + uintptr_t step = JITStubRoutine::addressStep(); + for (uintptr_t iter = start; iter < end; iter += step) { + ASSERT(m_addressToRoutineMap.find(iter) == m_addressToRoutineMap.end()); + m_addressToRoutineMap.add(iter, routine); + } +} + +void JITStubRoutineSet::clearMarks() +{ + for (size_t i = m_listOfRoutines.size(); i--;) + m_listOfRoutines[i]->m_mayBeExecuting = false; +} + +void JITStubRoutineSet::markSlow(uintptr_t address) +{ + HashMap<uintptr_t, GCAwareJITStubRoutine*>::iterator iter = + m_addressToRoutineMap.find(address & ~(JITStubRoutine::addressStep() - 1)); + + if (iter == m_addressToRoutineMap.end()) + return; + + iter->second->m_mayBeExecuting = true; +} + +void JITStubRoutineSet::deleteUnmarkedJettisonedStubRoutines() +{ + for (size_t i = 0; i < m_listOfRoutines.size(); i++) { + GCAwareJITStubRoutine* routine = m_listOfRoutines[i]; + if (!routine->m_isJettisoned || routine->m_mayBeExecuting) + continue; + + uintptr_t start = routine->startAddress(); + uintptr_t end = routine->endAddress(); + uintptr_t step = JITStubRoutine::addressStep(); + for (uintptr_t iter = start; iter < end; iter += step) { + ASSERT(m_addressToRoutineMap.find(iter) != m_addressToRoutineMap.end()); + ASSERT(m_addressToRoutineMap.find(iter)->second == routine); + m_addressToRoutineMap.remove(iter); + } + + routine->deleteFromGC(); + + m_listOfRoutines[i] = m_listOfRoutines.last(); + m_listOfRoutines.removeLast(); + i--; + } +} + +void JITStubRoutineSet::traceMarkedStubRoutines(SlotVisitor& visitor) +{ + for (size_t i = m_listOfRoutines.size(); i--;) { + GCAwareJITStubRoutine* routine = m_listOfRoutines[i]; + if (!routine->m_mayBeExecuting) + continue; + + routine->markRequiredObjects(visitor); + } +} + +} // namespace JSC + +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/heap/JITStubRoutineSet.h b/Source/JavaScriptCore/heap/JITStubRoutineSet.h new file mode 100644 index 000000000..ea120132e --- /dev/null +++ b/Source/JavaScriptCore/heap/JITStubRoutineSet.h @@ -0,0 +1,80 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JITStubRoutineSet_h +#define JITStubRoutineSet_h + +#include <wtf/Platform.h> + +#if ENABLE(JIT) + +#include "JITStubRoutine.h" +#include <wtf/FastAllocBase.h> +#include <wtf/HashMap.h> +#include <wtf/Vector.h> + +namespace JSC { + +class GCAwareJITStubRoutine; +class SlotVisitor; + +class JITStubRoutineSet { + WTF_MAKE_NONCOPYABLE(JITStubRoutineSet); + WTF_MAKE_FAST_ALLOCATED; + +public: + JITStubRoutineSet(); + ~JITStubRoutineSet(); + + void add(GCAwareJITStubRoutine*); + + void clearMarks(); + + void mark(void* candidateAddress) + { + uintptr_t address = reinterpret_cast<uintptr_t>(candidateAddress); + if (!JITStubRoutine::passesFilter(address)) + return; + + markSlow(address); + } + + void deleteUnmarkedJettisonedStubRoutines(); + + void traceMarkedStubRoutines(SlotVisitor&); + +private: + void markSlow(uintptr_t address); + + HashMap<uintptr_t, GCAwareJITStubRoutine*> m_addressToRoutineMap; + Vector<GCAwareJITStubRoutine*> m_listOfRoutines; +}; + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // JITStubRoutineSet_h + diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.cpp b/Source/JavaScriptCore/heap/MachineStackMarker.cpp index 8e0c57b6a..7eb57479b 100644 --- a/Source/JavaScriptCore/heap/MachineStackMarker.cpp +++ b/Source/JavaScriptCore/heap/MachineStackMarker.cpp @@ -141,8 +141,10 @@ MachineThreads::MachineThreads(Heap* heap) MachineThreads::~MachineThreads() { - if (m_threadSpecific) - ThreadSpecificKeyDelete(m_threadSpecific); + if (m_threadSpecific) { + int error = pthread_key_delete(m_threadSpecific); + ASSERT_UNUSED(error, !error); + } MutexLocker registeredThreadsLock(m_registeredThreadsMutex); for (Thread* t = m_registeredThreads; t;) { @@ -179,17 +181,19 @@ void MachineThreads::makeUsableFromMultipleThreads() if (m_threadSpecific) return; - ThreadSpecificKeyCreate(&m_threadSpecific, removeThread); + int error = pthread_key_create(&m_threadSpecific, removeThread); + if (error) + CRASH(); } void MachineThreads::addCurrentThread() { ASSERT(!m_heap->globalData()->exclusiveThread || m_heap->globalData()->exclusiveThread == currentThread()); - if (!m_threadSpecific || ThreadSpecificGet(m_threadSpecific)) + if (!m_threadSpecific || pthread_getspecific(m_threadSpecific)) return; - ThreadSpecificSet(m_threadSpecific, this); + pthread_setspecific(m_threadSpecific, this); Thread* thread = new Thread(getCurrentPlatformThread(), wtfThreadData().stack().origin()); MutexLocker lock(m_registeredThreadsMutex); diff --git a/Source/JavaScriptCore/heap/MachineStackMarker.h b/Source/JavaScriptCore/heap/MachineStackMarker.h index 2209f97e9..5c7705fcf 100644 --- a/Source/JavaScriptCore/heap/MachineStackMarker.h +++ b/Source/JavaScriptCore/heap/MachineStackMarker.h @@ -22,14 +22,14 @@ #ifndef MachineThreads_h #define MachineThreads_h +#include <pthread.h> #include <wtf/Noncopyable.h> -#include <wtf/ThreadSpecific.h> #include <wtf/ThreadingPrimitives.h> namespace JSC { - class Heap; class ConservativeRoots; + class Heap; class MachineThreads { WTF_MAKE_NONCOPYABLE(MachineThreads); @@ -55,7 +55,7 @@ namespace JSC { Heap* m_heap; Mutex m_registeredThreadsMutex; Thread* m_registeredThreads; - WTF::ThreadSpecificKey m_threadSpecific; + pthread_key_t m_threadSpecific; }; } // namespace JSC diff --git a/Source/JavaScriptCore/heap/MarkStack.cpp b/Source/JavaScriptCore/heap/MarkStack.cpp index 3eb02c4e8..9d9130026 100644 --- a/Source/JavaScriptCore/heap/MarkStack.cpp +++ b/Source/JavaScriptCore/heap/MarkStack.cpp @@ -38,6 +38,7 @@ #include "Structure.h" #include "UString.h" #include "WriteBarrier.h" +#include <wtf/Atomics.h> #include <wtf/DataLog.h> #include <wtf/MainThread.h> @@ -65,7 +66,7 @@ MarkStackSegment* MarkStackSegmentAllocator::allocate() } } - return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize)); + return static_cast<MarkStackSegment*>(OSAllocator::reserveAndCommit(Options::gcMarkStackSegmentSize())); } void MarkStackSegmentAllocator::release(MarkStackSegment* segment) @@ -86,13 +87,13 @@ void MarkStackSegmentAllocator::shrinkReserve() while (segments) { MarkStackSegment* toFree = segments; segments = segments->m_previous; - OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize); + OSAllocator::decommitAndRelease(toFree, Options::gcMarkStackSegmentSize()); } } MarkStackArray::MarkStackArray(MarkStackSegmentAllocator& allocator) : m_allocator(allocator) - , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize)) + , m_segmentCapacity(MarkStackSegment::capacityFromSize(Options::gcMarkStackSegmentSize())) , m_top(0) , m_numberOfPreviousSegments(0) { @@ -225,8 +226,8 @@ void MarkStackArray::stealSomeCellsFrom(MarkStackArray& other, size_t idleThread void MarkStackThreadSharedData::resetChildren() { for (unsigned i = 0; i < m_markingThreadsMarkStack.size(); ++i) - m_markingThreadsMarkStack[i]->reset(); -} + m_markingThreadsMarkStack[i]->reset(); +} size_t MarkStackThreadSharedData::childVisitCount() { @@ -257,12 +258,13 @@ void MarkStackThreadSharedData::markingThreadStartFunc(void* myVisitor) MarkStackThreadSharedData::MarkStackThreadSharedData(JSGlobalData* globalData) : m_globalData(globalData) , m_copiedSpace(&globalData->heap.m_storageSpace) + , m_shouldHashConst(false) , m_sharedMarkStack(m_segmentAllocator) , m_numberOfActiveParallelMarkers(0) , m_parallelMarkersShouldExit(false) { #if ENABLE(PARALLEL_GC) - for (unsigned i = 1; i < Options::numberOfGCMarkers; ++i) { + for (unsigned i = 1; i < Options::numberOfGCMarkers(); ++i) { SlotVisitor* slotVisitor = new SlotVisitor(*this); m_markingThreadsMarkStack.append(slotVisitor); m_markingThreads.append(createThread(markingThreadStartFunc, slotVisitor, "JavaScriptCore::Marking")); @@ -298,6 +300,21 @@ void MarkStackThreadSharedData::reset() ASSERT(m_opaqueRoots.isEmpty()); #endif m_weakReferenceHarvesters.removeAll(); + + if (m_shouldHashConst) { + m_globalData->resetNewStringsSinceLastHashConst(); + m_shouldHashConst = false; + } +} + +void MarkStack::setup() +{ + m_shared.m_shouldHashConst = m_shared.m_globalData->haveEnoughNewStringsToHashConst(); + m_shouldHashConst = m_shared.m_shouldHashConst; +#if ENABLE(PARALLEL_GC) + for (unsigned i = 0; i < m_shared.m_markingThreadsMarkStack.size(); ++i) + m_shared.m_markingThreadsMarkStack[i]->m_shouldHashConst = m_shared.m_shouldHashConst; +#endif } void MarkStack::reset() @@ -309,6 +326,10 @@ void MarkStack::reset() #else m_opaqueRoots.clear(); #endif + if (m_shouldHashConst) { + m_uniqueStrings.clear(); + m_shouldHashConst = false; + } } void MarkStack::append(ConservativeRoots& conservativeRoots) @@ -333,7 +354,7 @@ ALWAYS_INLINE static void visitChildren(SlotVisitor& visitor, const JSCell* cell } if (isJSFinalObject(cell)) { - JSObject::visitChildren(const_cast<JSCell*>(cell), visitor); + JSFinalObject::visitChildren(const_cast<JSCell*>(cell), visitor); return; } @@ -368,7 +389,7 @@ void SlotVisitor::donateKnownParallel() // Otherwise, assume that a thread will go idle soon, and donate. m_stack.donateSomeCellsTo(m_shared.m_sharedMarkStack); - if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers) + if (m_shared.m_numberOfActiveParallelMarkers < Options::numberOfGCMarkers()) m_shared.m_markingCondition.broadcast(); } @@ -377,10 +398,10 @@ void SlotVisitor::drain() ASSERT(m_isInParallelMode); #if ENABLE(PARALLEL_GC) - if (Options::numberOfGCMarkers > 1) { + if (Options::numberOfGCMarkers() > 1) { while (!m_stack.isEmpty()) { m_stack.refill(); - for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance; m_stack.canRemoveLast() && countdown--;) + for (unsigned countdown = Options::minimumNumberOfScansBetweenRebalance(); m_stack.canRemoveLast() && countdown--;) visitChildren(*this, m_stack.removeLast()); donateKnownParallel(); } @@ -401,14 +422,14 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) { ASSERT(m_isInParallelMode); - ASSERT(Options::numberOfGCMarkers); + ASSERT(Options::numberOfGCMarkers()); bool shouldBeParallel; #if ENABLE(PARALLEL_GC) - shouldBeParallel = Options::numberOfGCMarkers > 1; + shouldBeParallel = Options::numberOfGCMarkers() > 1; #else - ASSERT(Options::numberOfGCMarkers == 1); + ASSERT(Options::numberOfGCMarkers() == 1); shouldBeParallel = false; #endif @@ -469,7 +490,7 @@ void SlotVisitor::drainFromShared(SharedDrainMode sharedDrainMode) } } - size_t idleThreadCount = Options::numberOfGCMarkers - m_shared.m_numberOfActiveParallelMarkers; + size_t idleThreadCount = Options::numberOfGCMarkers() - m_shared.m_numberOfActiveParallelMarkers; m_stack.stealSomeCellsFrom(m_shared.m_sharedMarkStack, idleThreadCount); m_shared.m_numberOfActiveParallelMarkers++; } @@ -521,6 +542,79 @@ void* SlotVisitor::allocateNewSpace(void* ptr, size_t bytes) return CopiedSpace::allocateFromBlock(m_copyBlock, bytes); } +ALWAYS_INLINE bool JSString::tryHashConstLock() +{ +#if ENABLE(PARALLEL_GC) + unsigned currentFlags = m_flags; + + if (currentFlags & HashConstLock) + return false; + + unsigned newFlags = currentFlags | HashConstLock; + + if (!WTF::weakCompareAndSwap(&m_flags, currentFlags, newFlags)) + return false; + + WTF::memoryBarrierAfterLock(); + return true; +#else + if (isHashConstSingleton()) + return false; + + m_flags |= HashConstLock; + + return true; +#endif +} + +ALWAYS_INLINE void JSString::releaseHashConstLock() +{ +#if ENABLE(PARALLEL_GC) + WTF::memoryBarrierBeforeUnlock(); +#endif + m_flags &= ~HashConstLock; +} + +ALWAYS_INLINE bool JSString::shouldTryHashConst() +{ + return ((length() > 1) && !isRope() && !isHashConstSingleton()); +} + +ALWAYS_INLINE void MarkStack::internalAppend(JSValue* slot) +{ + // This internalAppend is only intended for visits to object and array backing stores. + // as it can change the JSValue pointed to be the argument when the original JSValue + // is a string that contains the same contents as another string. + + ASSERT(slot); + JSValue value = *slot; + ASSERT(value); + if (!value.isCell()) + return; + + JSCell* cell = value.asCell(); + + if (m_shouldHashConst && cell->isString()) { + JSString* string = jsCast<JSString*>(cell); + if (string->shouldTryHashConst() && string->tryHashConstLock()) { + UniqueStringMap::AddResult addResult = m_uniqueStrings.add(string->string().impl(), value); + if (addResult.isNewEntry) + string->setHashConstSingleton(); + else { + JSValue existingJSValue = addResult.iterator->second; + if (value != existingJSValue) + jsCast<JSString*>(existingJSValue.asCell())->clearHashConstSingleton(); + *slot = existingJSValue; + string->releaseHashConstLock(); + return; + } + string->releaseHashConstLock(); + } + } + + internalAppend(cell); +} + void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsigned length) { void* oldPtr = *ptr; @@ -534,7 +628,7 @@ void SlotVisitor::copyAndAppend(void** ptr, size_t bytes, JSValue* values, unsig newValues[i] = value; if (!value) continue; - internalAppend(value); + internalAppend(&newValues[i]); } memcpy(newPtr, oldPtr, jsValuesOffset); diff --git a/Source/JavaScriptCore/heap/MarkStack.h b/Source/JavaScriptCore/heap/MarkStack.h index c3065e7d6..ff25531a4 100644 --- a/Source/JavaScriptCore/heap/MarkStack.h +++ b/Source/JavaScriptCore/heap/MarkStack.h @@ -219,6 +219,8 @@ namespace JSC { MarkStackSegmentAllocator m_segmentAllocator; + bool m_shouldHashConst; + Vector<ThreadIdentifier> m_markingThreads; Vector<MarkStack*> m_markingThreadsMarkStack; @@ -259,6 +261,7 @@ namespace JSC { MarkStackThreadSharedData& sharedData() { return m_shared; } bool isEmpty() { return m_stack.isEmpty(); } + void setup(); void reset(); size_t visitCount() const { return m_visitCount; } @@ -292,6 +295,7 @@ namespace JSC { void internalAppend(JSCell*); void internalAppend(JSValue); + void internalAppend(JSValue*); JS_EXPORT_PRIVATE void mergeOpaqueRoots(); @@ -304,7 +308,7 @@ namespace JSC { void mergeOpaqueRootsIfProfitable() { - if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold) + if (static_cast<unsigned>(m_opaqueRoots.size()) < Options::opaqueRootMergeThreshold()) return; mergeOpaqueRoots(); } @@ -325,6 +329,10 @@ namespace JSC { MarkStackThreadSharedData& m_shared; + bool m_shouldHashConst; // Local per-thread copy of shared flag for performance reasons + typedef HashMap<StringImpl*, JSValue> UniqueStringMap; + UniqueStringMap m_uniqueStrings; + #if ENABLE(OBJECT_MARK_LOGGING) unsigned m_logChildCount; #endif @@ -339,6 +347,7 @@ namespace JSC { , m_visitCount(0) , m_isInParallelMode(false) , m_shared(shared) + , m_shouldHashConst(false) { } @@ -350,7 +359,7 @@ namespace JSC { inline void MarkStack::addOpaqueRoot(void* root) { #if ENABLE(PARALLEL_GC) - if (Options::numberOfGCMarkers == 1) { + if (Options::numberOfGCMarkers() == 1) { // Put directly into the shared HashSet. m_shared.m_opaqueRoots.add(root); return; diff --git a/Source/JavaScriptCore/heap/MarkedAllocator.cpp b/Source/JavaScriptCore/heap/MarkedAllocator.cpp index 9cac906a1..972728637 100644 --- a/Source/JavaScriptCore/heap/MarkedAllocator.cpp +++ b/Source/JavaScriptCore/heap/MarkedAllocator.cpp @@ -3,6 +3,7 @@ #include "GCActivityCallback.h" #include "Heap.h" +#include "JSGlobalData.h" #include <wtf/CurrentTime.h> namespace JSC { @@ -56,6 +57,7 @@ inline void* MarkedAllocator::tryAllocate() void* MarkedAllocator::allocateSlowCase() { + ASSERT(m_heap->globalData()->apiLock().currentThreadIsHoldingLock()); #if COLLECT_ON_EVERY_ALLOCATION m_heap->collectAllGarbage(); ASSERT(m_heap->m_operationInProgress == NoOperation); diff --git a/Source/JavaScriptCore/heap/SlotVisitor.h b/Source/JavaScriptCore/heap/SlotVisitor.h index 715e2008c..70d68bb04 100644 --- a/Source/JavaScriptCore/heap/SlotVisitor.h +++ b/Source/JavaScriptCore/heap/SlotVisitor.h @@ -41,7 +41,7 @@ public: void donate() { ASSERT(m_isInParallelMode); - if (Options::numberOfGCMarkers == 1) + if (Options::numberOfGCMarkers() == 1) return; donateKnownParallel(); diff --git a/Source/JavaScriptCore/heap/WeakBlock.cpp b/Source/JavaScriptCore/heap/WeakBlock.cpp index 8900e73df..05a44ea7e 100644 --- a/Source/JavaScriptCore/heap/WeakBlock.cpp +++ b/Source/JavaScriptCore/heap/WeakBlock.cpp @@ -127,8 +127,10 @@ void WeakBlock::reap() if (weakImpl->state() > WeakImpl::Dead) continue; - if (Heap::isMarked(weakImpl->jsValue().asCell())) + if (Heap::isMarked(weakImpl->jsValue().asCell())) { + ASSERT(weakImpl->state() == WeakImpl::Live); continue; + } weakImpl->setState(WeakImpl::Dead); } diff --git a/Source/JavaScriptCore/interpreter/Interpreter.cpp b/Source/JavaScriptCore/interpreter/Interpreter.cpp index b8610e7bf..b6072a5d6 100644 --- a/Source/JavaScriptCore/interpreter/Interpreter.cpp +++ b/Source/JavaScriptCore/interpreter/Interpreter.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009, 2010 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2010, 2012 Apple Inc. All rights reserved. * Copyright (C) 2008 Cameron Zwarich <cwzwarich@uwaterloo.ca> * * Redistribution and use in source and binary forms, with or without @@ -1822,7 +1822,7 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock* ASSERT(slot.slotBase().isObject()); JSObject* baseObject = asObject(slot.slotBase()); - size_t offset = slot.cachedOffset(); + PropertyOffset offset = slot.cachedOffset(); // Since we're accessing a prototype in a loop, it's a good bet that it // should not be treated as a dictionary. @@ -1851,7 +1851,7 @@ NEVER_INLINE void Interpreter::tryCacheGetByID(CallFrame* callFrame, CodeBlock* return; } - size_t offset = slot.cachedOffset(); + PropertyOffset offset = slot.cachedOffset(); size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset); if (!count) { vPC[0] = getOpcode(op_get_by_id_generic); @@ -3045,6 +3045,7 @@ JSValue Interpreter::privateExecute(ExecutionFlag flag, RegisterFile* registerFi vPC += OPCODE_LENGTH(op_resolve_with_this); NEXT_INSTRUCTION(); } + DEFINE_OPCODE(op_get_by_id_out_of_line) DEFINE_OPCODE(op_get_by_id) { /* get_by_id dst(r) base(r) property(id) structure(sID) nop(n) nop(n) nop(n) @@ -3527,6 +3528,7 @@ skip_id_custom_self: skip_get_string_length: goto *(&&skip_put_by_id); #endif + DEFINE_OPCODE(op_put_by_id_out_of_line) DEFINE_OPCODE(op_put_by_id) { /* put_by_id base(r) property(id) value(r) nop(n) nop(n) nop(n) nop(n) direct(b) @@ -3565,6 +3567,8 @@ skip_id_custom_self: #endif DEFINE_OPCODE(op_put_by_id_transition_direct) DEFINE_OPCODE(op_put_by_id_transition_normal) + DEFINE_OPCODE(op_put_by_id_transition_direct_out_of_line) + DEFINE_OPCODE(op_put_by_id_transition_normal_out_of_line) DEFINE_OPCODE(op_put_by_id_transition) { /* op_put_by_id_transition base(r) property(id) value(r) oldStructure(sID) newStructure(sID) structureChain(chain) offset(n) direct(b) @@ -3602,10 +3606,10 @@ skip_id_custom_self: proto = asObject(proto)->structure()->prototypeForLookup(callFrame); } } - baseObject->transitionTo(*globalData, newStructure); + baseObject->setStructureAndReallocateStorageIfNecessary(*globalData, newStructure); int value = vPC[3].u.operand; - unsigned offset = vPC[7].u.operand; + int offset = vPC[7].u.operand; ASSERT(baseObject->offsetForLocation(baseObject->getDirectLocation(*globalData, codeBlock->identifier(vPC[2].u.operand))) == offset); baseObject->putDirectOffset(callFrame->globalData(), offset, callFrame->r(value).jsValue()); @@ -3639,7 +3643,7 @@ skip_id_custom_self: ASSERT(baseCell->isObject()); JSObject* baseObject = asObject(baseCell); int value = vPC[3].u.operand; - unsigned offset = vPC[5].u.operand; + int offset = vPC[5].u.operand; ASSERT(baseObject->offsetForLocation(baseObject->getDirectLocation(*globalData, codeBlock->identifier(vPC[2].u.operand))) == offset); baseObject->putDirectOffset(callFrame->globalData(), offset, callFrame->r(value).jsValue()); @@ -3717,7 +3721,7 @@ skip_id_custom_self: JSValue expectedSubscript = callFrame->r(expected).jsValue(); int index = callFrame->r(i).i() - 1; JSValue result; - int offset = 0; + PropertyOffset offset = 0; if (subscript == expectedSubscript && baseValue.isCell() && (baseValue.asCell()->structure() == it->cachedStructure()) && it->getOffset(index, offset)) { callFrame->uncheckedR(dst) = JSValue(asObject(baseValue)->getDirectOffset(offset)); vPC += OPCODE_LENGTH(op_get_by_pname); diff --git a/Source/JavaScriptCore/interpreter/Interpreter.h b/Source/JavaScriptCore/interpreter/Interpreter.h index adb23f237..ba2f4fac4 100644 --- a/Source/JavaScriptCore/interpreter/Interpreter.h +++ b/Source/JavaScriptCore/interpreter/Interpreter.h @@ -1,5 +1,6 @@ /* * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2012 Research In Motion Limited. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -39,6 +40,7 @@ #include "RegisterFile.h" #include <wtf/HashMap.h> +#include <wtf/text/StringBuilder.h> namespace JSC { @@ -80,45 +82,63 @@ namespace JSC { UString sourceURL; UString toString(CallFrame* callFrame) const { - bool hasSourceURLInfo = !sourceURL.isNull() && !sourceURL.isEmpty(); - bool hasLineInfo = line > -1; + StringBuilder traceBuild; + String functionName = friendlyFunctionName(callFrame); + String sourceURL = friendlySourceURL(); + traceBuild.append(functionName); + if (!sourceURL.isEmpty()) { + if (!functionName.isEmpty()) + traceBuild.append('@'); + traceBuild.append(sourceURL); + if (line > -1) { + traceBuild.append(':'); + traceBuild.append(String::number(line)); + } + } + return traceBuild.toString().impl(); + } + String friendlySourceURL() const + { String traceLine; - JSObject* stackFrameCallee = callee.get(); switch (codeType) { case StackFrameEvalCode: - if (hasSourceURLInfo) { - traceLine = hasLineInfo ? String::format("eval code@%s:%d", sourceURL.ascii().data(), line) - : String::format("eval code@%s", sourceURL.ascii().data()); - } else - traceLine = String::format("eval code"); + case StackFrameFunctionCode: + case StackFrameGlobalCode: + if (!sourceURL.isEmpty()) + traceLine = sourceURL.impl(); break; - case StackFrameNativeCode: { - if (callee) { - UString functionName = getCalculatedDisplayName(callFrame, stackFrameCallee); - traceLine = String::format("%s@[native code]", functionName.ascii().data()); - } else - traceLine = "[native code]"; + case StackFrameNativeCode: + traceLine = "[native code]"; break; } - case StackFrameFunctionCode: { - UString functionName = getCalculatedDisplayName(callFrame, stackFrameCallee); - if (hasSourceURLInfo) { - traceLine = hasLineInfo ? String::format("%s@%s:%d", functionName.ascii().data(), sourceURL.ascii().data(), line) - : String::format("%s@%s", functionName.ascii().data(), sourceURL.ascii().data()); - } else - traceLine = String::format("%s\n", functionName.ascii().data()); + return traceLine.isNull() ? emptyString() : traceLine; + } + String friendlyFunctionName(CallFrame* callFrame) const + { + String traceLine; + JSObject* stackFrameCallee = callee.get(); + + switch (codeType) { + case StackFrameEvalCode: + traceLine = "eval code"; + break; + case StackFrameNativeCode: + if (callee) + traceLine = getCalculatedDisplayName(callFrame, stackFrameCallee).impl(); + break; + case StackFrameFunctionCode: + traceLine = getCalculatedDisplayName(callFrame, stackFrameCallee).impl(); break; - } case StackFrameGlobalCode: - if (hasSourceURLInfo) { - traceLine = hasLineInfo ? String::format("global code@%s:%d", sourceURL.ascii().data(), line) - : String::format("global code@%s", sourceURL.ascii().data()); - } else - traceLine = String::format("global code"); - + traceLine = "global code"; + break; } - return traceLine.impl(); + return traceLine.isNull() ? emptyString() : traceLine; + } + unsigned friendlyLineNumber() const + { + return line > -1 ? line : 0; } }; diff --git a/Source/JavaScriptCore/interpreter/RegisterFile.cpp b/Source/JavaScriptCore/interpreter/RegisterFile.cpp index b72352781..dacb53872 100644 --- a/Source/JavaScriptCore/interpreter/RegisterFile.cpp +++ b/Source/JavaScriptCore/interpreter/RegisterFile.cpp @@ -73,9 +73,9 @@ void RegisterFile::gatherConservativeRoots(ConservativeRoots& conservativeRoots) conservativeRoots.add(begin(), end()); } -void RegisterFile::gatherConservativeRoots(ConservativeRoots& conservativeRoots, DFGCodeBlocks& dfgCodeBlocks) +void RegisterFile::gatherConservativeRoots(ConservativeRoots& conservativeRoots, JITStubRoutineSet& jitStubRoutines, DFGCodeBlocks& dfgCodeBlocks) { - conservativeRoots.add(begin(), end(), dfgCodeBlocks); + conservativeRoots.add(begin(), end(), jitStubRoutines, dfgCodeBlocks); } void RegisterFile::releaseExcessCapacity() diff --git a/Source/JavaScriptCore/interpreter/RegisterFile.h b/Source/JavaScriptCore/interpreter/RegisterFile.h index 21ad7fbae..8fff8208c 100644 --- a/Source/JavaScriptCore/interpreter/RegisterFile.h +++ b/Source/JavaScriptCore/interpreter/RegisterFile.h @@ -39,6 +39,7 @@ namespace JSC { class ConservativeRoots; class DFGCodeBlocks; + class JITStubRoutineSet; class LLIntOffsetsExtractor; class RegisterFile { @@ -64,7 +65,7 @@ namespace JSC { ~RegisterFile(); void gatherConservativeRoots(ConservativeRoots&); - void gatherConservativeRoots(ConservativeRoots&, DFGCodeBlocks&); + void gatherConservativeRoots(ConservativeRoots&, JITStubRoutineSet&, DFGCodeBlocks&); Register* begin() const { return static_cast<Register*>(m_reservation.base()); } Register* end() const { return m_end; } diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp index 79399196e..e9bb66ce7 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.cpp @@ -55,7 +55,7 @@ namespace JSC { class DemandExecutableAllocator : public MetaAllocator { public: DemandExecutableAllocator() - : MetaAllocator(32) // round up all allocations to 32 bytes + : MetaAllocator(jitAllocationGranule) { MutexLocker lock(allocatorsMutex()); allocators().add(this); diff --git a/Source/JavaScriptCore/jit/ExecutableAllocator.h b/Source/JavaScriptCore/jit/ExecutableAllocator.h index 8cd5cba07..85779e6a8 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocator.h +++ b/Source/JavaScriptCore/jit/ExecutableAllocator.h @@ -77,6 +77,8 @@ namespace JSC { class JSGlobalData; void releaseExecutableMemory(JSGlobalData&); +static const unsigned jitAllocationGranule = 32; + inline size_t roundUpAllocationSize(size_t request, size_t granularity) { if ((std::numeric_limits<size_t>::max() - granularity) <= request) @@ -101,6 +103,18 @@ typedef WTF::MetaAllocatorHandle ExecutableMemoryHandle; class DemandExecutableAllocator; #endif +#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) +#if CPU(ARM) +static const size_t fixedExecutableMemoryPoolSize = 16 * 1024 * 1024; +#elif CPU(X86_64) +static const size_t fixedExecutableMemoryPoolSize = 1024 * 1024 * 1024; +#else +static const size_t fixedExecutableMemoryPoolSize = 32 * 1024 * 1024; +#endif + +extern uintptr_t startOfFixedExecutableMemoryPool; +#endif + class ExecutableAllocator { enum ProtectionSetting { Writable, Executable }; diff --git a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp index 2e08f1205..ad3343d11 100644 --- a/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp +++ b/Source/JavaScriptCore/jit/ExecutableAllocatorFixedVMPool.cpp @@ -45,27 +45,23 @@ using namespace WTF; namespace JSC { -#if CPU(ARM) -static const size_t fixedPoolSize = 16 * 1024 * 1024; -#elif CPU(X86_64) -static const size_t fixedPoolSize = 1024 * 1024 * 1024; -#else -static const size_t fixedPoolSize = 32 * 1024 * 1024; -#endif +uintptr_t startOfFixedExecutableMemoryPool; class FixedVMPoolExecutableAllocator : public MetaAllocator { public: FixedVMPoolExecutableAllocator() - : MetaAllocator(32) // round up all allocations to 32 bytes + : MetaAllocator(jitAllocationGranule) // round up all allocations to 32 bytes { - m_reservation = PageReservation::reserveWithGuardPages(fixedPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); + m_reservation = PageReservation::reserveWithGuardPages(fixedExecutableMemoryPoolSize, OSAllocator::JSJITCodePages, EXECUTABLE_POOL_WRITABLE, true); #if !(ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT)) if (!m_reservation) CRASH(); #endif if (m_reservation) { - ASSERT(m_reservation.size() == fixedPoolSize); + ASSERT(m_reservation.size() == fixedExecutableMemoryPoolSize); addFreshFreeSpace(m_reservation.base(), m_reservation.size()); + + startOfFixedExecutableMemoryPool = reinterpret_cast<uintptr_t>(m_reservation.base()); } } diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp new file mode 100644 index 000000000..7ea61178c --- /dev/null +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.cpp @@ -0,0 +1,127 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "GCAwareJITStubRoutine.h" + +#if ENABLE(JIT) + +#include "Heap.h" +#include "JSGlobalData.h" +#include "ScopeChain.h" +#include "SlotVisitor.h" +#include "Structure.h" + +namespace JSC { + +GCAwareJITStubRoutine::GCAwareJITStubRoutine( + const MacroAssemblerCodeRef& code, JSGlobalData& globalData) + : JITStubRoutine(code) + , m_mayBeExecuting(false) + , m_isJettisoned(false) +{ + globalData.heap.m_jitStubRoutines.add(this); +} + +GCAwareJITStubRoutine::~GCAwareJITStubRoutine() { } + +void GCAwareJITStubRoutine::observeZeroRefCount() +{ + if (m_isJettisoned) { + // This case is needed for when the system shuts down. It may be that + // the JIT stub routine set gets deleted before we get around to deleting + // this guy. In that case the GC informs us that we're jettisoned already + // and that we should delete ourselves as soon as the ref count reaches + // zero. + delete this; + return; + } + + ASSERT(!m_refCount); + + m_isJettisoned = true; +} + +void GCAwareJITStubRoutine::deleteFromGC() +{ + ASSERT(m_isJettisoned); + ASSERT(!m_refCount); + ASSERT(!m_mayBeExecuting); + + delete this; +} + +void GCAwareJITStubRoutine::markRequiredObjectsInternal(SlotVisitor&) +{ +} + +MarkingGCAwareJITStubRoutineWithOneObject::MarkingGCAwareJITStubRoutineWithOneObject( + const MacroAssemblerCodeRef& code, JSGlobalData& globalData, const JSCell* owner, + JSCell* object) + : GCAwareJITStubRoutine(code, globalData) + , m_object(globalData, owner, object) +{ +} + +MarkingGCAwareJITStubRoutineWithOneObject::~MarkingGCAwareJITStubRoutineWithOneObject() +{ +} + +void MarkingGCAwareJITStubRoutineWithOneObject::markRequiredObjectsInternal(SlotVisitor& visitor) +{ + visitor.append(&m_object); +} + +PassRefPtr<JITStubRoutine> createJITStubRoutine( + const MacroAssemblerCodeRef& code, + JSGlobalData& globalData, + const JSCell*, + bool makesCalls) +{ + if (!makesCalls) + return adoptRef(new JITStubRoutine(code)); + + return static_pointer_cast<JITStubRoutine>( + adoptRef(new GCAwareJITStubRoutine(code, globalData))); +} + +PassRefPtr<JITStubRoutine> createJITStubRoutine( + const MacroAssemblerCodeRef& code, + JSGlobalData& globalData, + const JSCell* owner, + bool makesCalls, + JSCell* object) +{ + if (!makesCalls) + return adoptRef(new JITStubRoutine(code)); + + return static_pointer_cast<JITStubRoutine>( + adoptRef(new MarkingGCAwareJITStubRoutineWithOneObject(code, globalData, owner, object))); +} + +} // namespace JSC + +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h new file mode 100644 index 000000000..59bc76beb --- /dev/null +++ b/Source/JavaScriptCore/jit/GCAwareJITStubRoutine.h @@ -0,0 +1,124 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef GCAwareJITStubRoutine_h +#define GCAwareJITStubRoutine_h + +#include <wtf/Platform.h> + +#if ENABLE(JIT) + +#include "JITStubRoutine.h" +#include "JSObject.h" +#include "JSString.h" +#include "WriteBarrier.h" +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +class JITStubRoutineSet; + +// Use this stub routine if you know that your code might be on stack when +// either GC or other kinds of stub deletion happen. Basicaly, if your stub +// routine makes calls (either to JS code or to C++ code) then you should +// assume that it's possible for that JS or C++ code to do something that +// causes the system to try to delete your routine. Using this routine type +// ensures that the actual deletion is delayed until the GC proves that the +// routine is no longer running. You can also subclass this routine if you +// want to mark additional objects during GC in those cases where the +// routine is known to be executing, or if you want to force this routine to +// keep other routines alive (for example due to the use of a slow-path +// list which does not get reclaimed all at once). +class GCAwareJITStubRoutine : public JITStubRoutine { +public: + GCAwareJITStubRoutine(const MacroAssemblerCodeRef&, JSGlobalData&); + virtual ~GCAwareJITStubRoutine(); + + void markRequiredObjects(SlotVisitor& visitor) + { + markRequiredObjectsInternal(visitor); + } + + void deleteFromGC(); + +protected: + virtual void observeZeroRefCount(); + + virtual void markRequiredObjectsInternal(SlotVisitor&); + +private: + friend class JITStubRoutineSet; + + bool m_mayBeExecuting; + bool m_isJettisoned; +}; + +// Use this if you want to mark one additional object during GC if your stub +// routine is known to be executing. +class MarkingGCAwareJITStubRoutineWithOneObject : public GCAwareJITStubRoutine { +public: + MarkingGCAwareJITStubRoutineWithOneObject( + const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, JSCell*); + virtual ~MarkingGCAwareJITStubRoutineWithOneObject(); + +protected: + virtual void markRequiredObjectsInternal(SlotVisitor&); + +private: + WriteBarrier<JSCell> m_object; +}; + +// Helper for easily creating a GC-aware JIT stub routine. For the varargs, +// pass zero or more JSCell*'s. This will either create a JITStubRoutine, a +// GCAwareJITStubRoutine, or an ObjectMarkingGCAwareJITStubRoutine as +// appropriate. Generally you only need to pass pointers that will be used +// after the first call to C++ or JS. +// +// PassRefPtr<JITStubRoutine> createJITStubRoutine( +// const MacroAssemblerCodeRef& code, +// JSGlobalData& globalData, +// const JSCell* owner, +// bool makesCalls, +// ...); +// +// Note that we don't actually use C-style varargs because that leads to +// strange type-related problems. For example it would preclude us from using +// our custom of passing '0' as NULL pointer. Besides, when I did try to write +// this function using varargs, I ended up with more code than this simple +// way. + +PassRefPtr<JITStubRoutine> createJITStubRoutine( + const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, bool makesCalls); +PassRefPtr<JITStubRoutine> createJITStubRoutine( + const MacroAssemblerCodeRef&, JSGlobalData&, const JSCell* owner, bool makesCalls, + JSCell*); + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // GCAwareJITStubRoutine_h + diff --git a/Source/JavaScriptCore/jit/JIT.cpp b/Source/JavaScriptCore/jit/JIT.cpp index e1e034b19..285355f1b 100644 --- a/Source/JavaScriptCore/jit/JIT.cpp +++ b/Source/JavaScriptCore/jit/JIT.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -99,7 +99,7 @@ void JIT::emitOptimizationCheck(OptimizationCheckKind kind) if (!canBeOptimized()) return; - Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop : Options::executionCounterIncrementForReturn), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())); + Jump skipOptimize = branchAdd32(Signed, TrustedImm32(kind == LoopOptimizationCheck ? Options::executionCounterIncrementForLoop() : Options::executionCounterIncrementForReturn()), AbsoluteAddress(m_codeBlock->addressOfJITExecuteCounter())); JITStubCall stubCall(this, cti_optimize); stubCall.addArgument(TrustedImm32(m_bytecodeOffset)); if (kind == EnterOptimizationCheck) @@ -255,6 +255,7 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_create_activation) DEFINE_OP(op_eq) DEFINE_OP(op_eq_null) + case op_get_by_id_out_of_line: DEFINE_OP(op_get_by_id) DEFINE_OP(op_get_arguments_length) DEFINE_OP(op_get_by_val) @@ -319,8 +320,11 @@ void JIT::privateCompileMainPass() DEFINE_OP(op_profile_will_call) DEFINE_OP(op_push_new_scope) DEFINE_OP(op_push_scope) + case op_put_by_id_out_of_line: case op_put_by_id_transition_direct: case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: DEFINE_OP(op_put_by_id) DEFINE_OP(op_put_by_index) DEFINE_OP(op_put_by_val) @@ -441,6 +445,7 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_create_this) DEFINE_SLOWCASE_OP(op_div) DEFINE_SLOWCASE_OP(op_eq) + case op_get_by_id_out_of_line: DEFINE_SLOWCASE_OP(op_get_by_id) DEFINE_SLOWCASE_OP(op_get_arguments_length) DEFINE_SLOWCASE_OP(op_get_by_val) @@ -472,16 +477,17 @@ void JIT::privateCompileSlowCases() DEFINE_SLOWCASE_OP(op_neq) DEFINE_SLOWCASE_OP(op_new_array) DEFINE_SLOWCASE_OP(op_new_object) - DEFINE_SLOWCASE_OP(op_new_func) - DEFINE_SLOWCASE_OP(op_new_func_exp) DEFINE_SLOWCASE_OP(op_not) DEFINE_SLOWCASE_OP(op_nstricteq) DEFINE_SLOWCASE_OP(op_post_dec) DEFINE_SLOWCASE_OP(op_post_inc) DEFINE_SLOWCASE_OP(op_pre_dec) DEFINE_SLOWCASE_OP(op_pre_inc) + case op_put_by_id_out_of_line: case op_put_by_id_transition_direct: case op_put_by_id_transition_normal: + case op_put_by_id_transition_direct_out_of_line: + case op_put_by_id_transition_normal_out_of_line: DEFINE_SLOWCASE_OP(op_put_by_id) DEFINE_SLOWCASE_OP(op_put_by_val) DEFINE_SLOWCASE_OP(op_put_global_var_check); @@ -539,6 +545,7 @@ ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin); info.patch.baseline.u.get.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureToCompare)); info.patch.baseline.u.get.structureCheck = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getStructureCheck)); + info.patch.baseline.u.get.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad)); #if USE(JSVALUE64) info.patch.baseline.u.get.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(getDisplacementLabel)); #else @@ -552,6 +559,7 @@ ALWAYS_INLINE void PropertyStubCompilationInfo::copyToStubInfo(StructureStubInfo case PutById: CodeLocationLabel hotPathBeginLocation = linkBuffer.locationOf(hotPathBegin); info.patch.baseline.u.put.structureToCompare = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putStructureToCompare)); + info.patch.baseline.u.put.propertyStorageLoad = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(propertyStorageLoad)); #if USE(JSVALUE64) info.patch.baseline.u.put.displacementLabel = MacroAssembler::differenceBetweenCodePtr(hotPathBeginLocation, linkBuffer.locationOf(putDisplacementLabel)); #else @@ -763,7 +771,9 @@ JITCode JIT::privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffo *functionEntryArityCheck = patchBuffer.locationOf(arityCheck); CodeRef result = FINALIZE_CODE( - patchBuffer, ("Baseline JIT code for CodeBlock %p", m_codeBlock)); + patchBuffer, + ("Baseline JIT code for CodeBlock %p, instruction count = %u", + m_codeBlock, m_codeBlock->instructionCount())); m_globalData->machineCodeBytesPerBytecodeWordForBaselineJIT.add( static_cast<double>(result.size()) / diff --git a/Source/JavaScriptCore/jit/JIT.h b/Source/JavaScriptCore/jit/JIT.h index 6d4c578c0..987c4a163 100644 --- a/Source/JavaScriptCore/jit/JIT.h +++ b/Source/JavaScriptCore/jit/JIT.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -157,6 +157,7 @@ namespace JSC { MacroAssembler::Label hotPathBegin; MacroAssembler::DataLabelPtr getStructureToCompare; MacroAssembler::PatchableJump getStructureCheck; + MacroAssembler::ConvertibleLoadLabel propertyStorageLoad; #if USE(JSVALUE64) MacroAssembler::DataLabelCompact getDisplacementLabel; #else @@ -185,17 +186,24 @@ namespace JSC { #endif - PropertyStubCompilationInfo(PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, + PropertyStubCompilationInfo( + PropertyStubGetById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, + MacroAssembler::DataLabelPtr structureToCompare, + MacroAssembler::PatchableJump structureCheck, + MacroAssembler::ConvertibleLoadLabel propertyStorageLoad, #if USE(JSVALUE64) - MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel, MacroAssembler::Label putResult) + MacroAssembler::DataLabelCompact displacementLabel, #else - MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::PatchableJump structureCheck, MacroAssembler::DataLabelCompact displacementLabel1, MacroAssembler::DataLabelCompact displacementLabel2, MacroAssembler::Label putResult) + MacroAssembler::DataLabelCompact displacementLabel1, + MacroAssembler::DataLabelCompact displacementLabel2, #endif + MacroAssembler::Label putResult) : m_type(GetById) , bytecodeIndex(bytecodeIndex) , hotPathBegin(hotPathBegin) , getStructureToCompare(structureToCompare) , getStructureCheck(structureCheck) + , propertyStorageLoad(propertyStorageLoad) #if USE(JSVALUE64) , getDisplacementLabel(displacementLabel) #else @@ -206,15 +214,21 @@ namespace JSC { { } - PropertyStubCompilationInfo(PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, + PropertyStubCompilationInfo( + PropertyStubPutById_T, unsigned bytecodeIndex, MacroAssembler::Label hotPathBegin, + MacroAssembler::DataLabelPtr structureToCompare, + MacroAssembler::ConvertibleLoadLabel propertyStorageLoad, #if USE(JSVALUE64) - MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel) + MacroAssembler::DataLabel32 displacementLabel #else - MacroAssembler::DataLabelPtr structureToCompare, MacroAssembler::DataLabel32 displacementLabel1, MacroAssembler::DataLabel32 displacementLabel2) + MacroAssembler::DataLabel32 displacementLabel1, + MacroAssembler::DataLabel32 displacementLabel2 #endif + ) : m_type(PutById) , bytecodeIndex(bytecodeIndex) , hotPathBegin(hotPathBegin) + , propertyStorageLoad(propertyStorageLoad) , putStructureToCompare(structureToCompare) #if USE(JSVALUE64) , putDisplacementLabel(displacementLabel) @@ -295,40 +309,40 @@ namespace JSC { return JIT(globalData, codeBlock).privateCompile(functionEntryArityCheck, effort); } - static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress) + static void compileGetByIdProto(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress) { JIT jit(globalData, codeBlock); jit.m_bytecodeOffset = stubInfo->bytecodeIndex; jit.privateCompileGetByIdProto(stubInfo, structure, prototypeStructure, ident, slot, cachedOffset, returnAddress, callFrame); } - static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + static void compileGetByIdSelfList(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) { JIT jit(globalData, codeBlock); jit.m_bytecodeOffset = stubInfo->bytecodeIndex; jit.privateCompileGetByIdSelfList(stubInfo, polymorphicStructures, currentIndex, structure, ident, slot, cachedOffset); } - static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + static void compileGetByIdProtoList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) { JIT jit(globalData, codeBlock); jit.m_bytecodeOffset = stubInfo->bytecodeIndex; jit.privateCompileGetByIdProtoList(stubInfo, prototypeStructureList, currentIndex, structure, prototypeStructure, ident, slot, cachedOffset, callFrame); } - static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) + static void compileGetByIdChainList(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructureList, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) { JIT jit(globalData, codeBlock); jit.m_bytecodeOffset = stubInfo->bytecodeIndex; jit.privateCompileGetByIdChainList(stubInfo, prototypeStructureList, currentIndex, structure, chain, count, ident, slot, cachedOffset, callFrame); } - static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress) + static void compileGetByIdChain(JSGlobalData* globalData, CallFrame* callFrame, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress) { JIT jit(globalData, codeBlock); jit.m_bytecodeOffset = stubInfo->bytecodeIndex; jit.privateCompileGetByIdChain(stubInfo, structure, chain, count, ident, slot, cachedOffset, returnAddress, callFrame); } - static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) + static void compilePutByIdTransition(JSGlobalData* globalData, CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) { JIT jit(globalData, codeBlock); jit.m_bytecodeOffset = stubInfo->bytecodeIndex; @@ -358,9 +372,9 @@ namespace JSC { static void resetPatchGetById(RepatchBuffer&, StructureStubInfo*); static void resetPatchPutById(RepatchBuffer&, StructureStubInfo*); - static void patchGetByIdSelf(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress); - static void patchPutByIdReplace(CodeBlock* codeblock, StructureStubInfo*, Structure*, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct); - static void patchMethodCallProto(JSGlobalData&, CodeBlock* codeblock, MethodCallLinkInfo&, StructureStubInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr); + static void patchGetByIdSelf(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr); + static void patchPutByIdReplace(CodeBlock*, StructureStubInfo*, Structure*, PropertyOffset cachedOffset, ReturnAddressPtr, bool direct); + static void patchMethodCallProto(JSGlobalData&, CodeBlock*, MethodCallLinkInfo&, StructureStubInfo&, JSObject*, Structure*, JSObject*, ReturnAddressPtr); static void compilePatchGetArrayLength(JSGlobalData* globalData, CodeBlock* codeBlock, ReturnAddressPtr returnAddress) { @@ -377,12 +391,12 @@ namespace JSC { void privateCompileLinkPass(); void privateCompileSlowCases(); JITCode privateCompile(CodePtr* functionEntryArityCheck, JITCompilationEffort); - void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); - void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, size_t cachedOffset); - void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame); - void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain* chain, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, CallFrame* callFrame); - void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame); - void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, size_t cachedOffset, StructureChain*, ReturnAddressPtr returnAddress, bool direct); + void privateCompileGetByIdProto(StructureStubInfo*, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*); + void privateCompileGetByIdSelfList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset); + void privateCompileGetByIdProtoList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, Structure* prototypeStructure, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*); + void privateCompileGetByIdChainList(StructureStubInfo*, PolymorphicAccessStructureList*, int, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, CallFrame*); + void privateCompileGetByIdChain(StructureStubInfo*, Structure*, StructureChain*, size_t count, const Identifier&, const PropertySlot&, PropertyOffset cachedOffset, ReturnAddressPtr, CallFrame*); + void privateCompilePutByIdTransition(StructureStubInfo*, Structure*, Structure*, PropertyOffset cachedOffset, StructureChain*, ReturnAddressPtr, bool direct); PassRefPtr<ExecutableMemoryHandle> privateCompileCTIMachineTrampolines(JSGlobalData*, TrampolineStructure*); Label privateCompileCTINativeCall(JSGlobalData*, bool isConstruct = false); @@ -423,7 +437,6 @@ namespace JSC { template<typename ClassType, bool destructor, typename StructureType> void emitAllocateBasicJSObject(StructureType, RegisterID result, RegisterID storagePtr); void emitAllocateBasicStorage(size_t, RegisterID result, RegisterID storagePtr); template<typename T> void emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID storagePtr); - void emitAllocateJSFunction(FunctionExecutable*, RegisterID scopeChain, RegisterID result, RegisterID storagePtr); void emitAllocateJSArray(unsigned valuesRegister, unsigned length, RegisterID cellResult, RegisterID storageResult, RegisterID storagePtr); #if ENABLE(VALUE_PROFILER) @@ -437,6 +450,8 @@ namespace JSC { void emitValueProfilingSite() { } #endif + enum FinalObjectMode { MayBeFinal, KnownNotFinal }; + #if USE(JSVALUE32_64) bool getOperandConstantImmediateInt(unsigned op1, unsigned op2, unsigned& op, int32_t& constant); @@ -469,10 +484,10 @@ namespace JSC { void compileGetByIdHotPath(); void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false); - void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset); - void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset); - void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset); - void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); + void compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode = MayBeFinal); + void compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset); // Arithmetic opcode helpers void emitAdd32Constant(unsigned dst, unsigned op, int32_t constant, ResultType opType); @@ -548,10 +563,10 @@ namespace JSC { void compileGetByIdHotPath(int baseVReg, Identifier*); void compileGetByIdSlowCase(int resultVReg, int baseVReg, Identifier* ident, Vector<SlowCaseEntry>::iterator& iter, bool isMethodCheck = false); - void compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset); - void compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset); - void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch); - void compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset); + void compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset); + void compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode = MayBeFinal); + void compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset); #endif // USE(JSVALUE32_64) @@ -750,8 +765,6 @@ namespace JSC { void emitSlow_op_to_jsnumber(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_to_primitive(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_urshift(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_new_func(Instruction*, Vector<SlowCaseEntry>::iterator&); - void emitSlow_op_new_func_exp(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitSlow_op_new_array(Instruction*, Vector<SlowCaseEntry>::iterator&); void emitRightShift(Instruction*, bool isUnsigned); diff --git a/Source/JavaScriptCore/jit/JITInlineMethods.h b/Source/JavaScriptCore/jit/JITInlineMethods.h index 40985ac90..d1cee7ef7 100644 --- a/Source/JavaScriptCore/jit/JITInlineMethods.h +++ b/Source/JavaScriptCore/jit/JITInlineMethods.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -429,8 +429,7 @@ template <typename ClassType, bool destructor, typename StructureType> inline vo storePtr(TrustedImmPtr(0), Address(result, JSObject::offsetOfInheritorID())); // initialize the object's property storage pointer - addPtr(TrustedImm32(sizeof(JSObject)), result, storagePtr); - storePtr(storagePtr, Address(result, ClassType::offsetOfPropertyStorage())); + storePtr(TrustedImmPtr(0), Address(result, ClassType::offsetOfOutOfLineStorage())); } template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, RegisterID result, RegisterID scratch) @@ -438,28 +437,6 @@ template <typename T> inline void JIT::emitAllocateJSFinalObject(T structure, Re emitAllocateBasicJSObject<JSFinalObject, false, T>(structure, result, scratch); } -inline void JIT::emitAllocateJSFunction(FunctionExecutable* executable, RegisterID scopeChain, RegisterID result, RegisterID storagePtr) -{ - emitAllocateBasicJSObject<JSFunction, true>(TrustedImmPtr(m_codeBlock->globalObject()->namedFunctionStructure()), result, storagePtr); - - // store the function's scope chain - storePtr(scopeChain, Address(result, JSFunction::offsetOfScopeChain())); - - // store the function's executable member - storePtr(TrustedImmPtr(executable), Address(result, JSFunction::offsetOfExecutable())); - - // clear the function's inheritorID - storePtr(TrustedImmPtr(0), Address(result, JSFunction::offsetOfCachedInheritorID())); - - // store the function's name - ASSERT(executable->nameValue()); - int functionNameOffset = sizeof(JSValue) * m_codeBlock->globalObject()->functionNameOffset(); - storePtr(TrustedImmPtr(executable->nameValue()), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.payload))); -#if USE(JSVALUE32_64) - store32(TrustedImm32(JSValue::CellTag), Address(regT1, functionNameOffset + OBJECT_OFFSETOF(JSValue, u.asBits.tag))); -#endif -} - inline void JIT::emitAllocateBasicStorage(size_t size, RegisterID result, RegisterID storagePtr) { CopiedAllocator* allocator = &m_globalData->heap.storageAllocator(); diff --git a/Source/JavaScriptCore/jit/JITOpcodes.cpp b/Source/JavaScriptCore/jit/JITOpcodes.cpp index 2e448dd52..c0af6f9e9 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes.cpp @@ -1,5 +1,5 @@ /* - * Copyright (C) 2009 Apple Inc. All rights reserved. + * Copyright (C) 2009, 2012 Apple Inc. All rights reserved. * Copyright (C) 2010 Patrick Gansterer <paroga@paroga.com> * * Redistribution and use in source and binary forms, with or without @@ -701,9 +701,8 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool) // Load cached property // Assume that the global object always uses external storage. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT0); load32(Address(regT2, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT1); - loadPtr(BaseIndex(regT0, regT1, ScalePtr), regT0); + compileGetDirectOffset(regT0, regT0, regT1, regT0, KnownNotFinal); emitValueProfilingSite(); emitPutVirtualRegister(currentInstruction[1].u.operand); } @@ -1618,11 +1617,9 @@ void JIT::emit_op_new_func(Instruction* currentInstruction) #endif } - FunctionExecutable* executable = m_codeBlock->functionDecl(currentInstruction[2].u.operand); - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); - emitAllocateJSFunction(executable, regT2, regT0, regT1); - - emitStoreCell(dst, regT0); + JITStubCall stubCall(this, cti_op_new_func); + stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand))); + stubCall.call(dst); if (currentInstruction[3].u.operand) { #if USE(JSVALUE32_64) @@ -1634,44 +1631,13 @@ void JIT::emit_op_new_func(Instruction* currentInstruction) } } -void JIT::emitSlow_op_new_func(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_new_func); - stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionDecl(currentInstruction[2].u.operand))); - stubCall.call(currentInstruction[1].u.operand); -} - void JIT::emit_op_new_func_exp(Instruction* currentInstruction) { - FunctionExecutable* executable = m_codeBlock->functionExpr(currentInstruction[2].u.operand); - - // We only inline the allocation of a anonymous function expressions - // If we want to be able to allocate a named function expression, we would - // need to be able to do inline allocation of a JSStaticScopeObject. - if (executable->name().isNull()) { - emitGetFromCallFrameHeaderPtr(RegisterFile::ScopeChain, regT2); - emitAllocateJSFunction(executable, regT2, regT0, regT1); - emitStoreCell(currentInstruction[1].u.operand, regT0); - return; - } - JITStubCall stubCall(this, cti_op_new_func_exp); stubCall.addArgument(TrustedImmPtr(m_codeBlock->functionExpr(currentInstruction[2].u.operand))); stubCall.call(currentInstruction[1].u.operand); } -void JIT::emitSlow_op_new_func_exp(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) -{ - FunctionExecutable* executable = m_codeBlock->functionExpr(currentInstruction[2].u.operand); - if (!executable->name().isNull()) - return; - linkSlowCase(iter); - JITStubCall stubCall(this, cti_op_new_func_exp); - stubCall.addArgument(TrustedImmPtr(executable)); - stubCall.call(currentInstruction[1].u.operand); -} - void JIT::emit_op_new_array(Instruction* currentInstruction) { int length = currentInstruction[3].u.operand; diff --git a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp index 4f8589557..095ea57d3 100644 --- a/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp +++ b/Source/JavaScriptCore/jit/JITOpcodes32_64.cpp @@ -794,16 +794,14 @@ void JIT::emit_op_resolve_global(Instruction* currentInstruction, bool dynamic) // Verify structure. - move(TrustedImmPtr(globalObject), regT0); + move(TrustedImmPtr(globalObject), regT2); move(TrustedImmPtr(resolveInfoAddress), regT3); loadPtr(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, structure)), regT1); - addSlowCase(branchPtr(NotEqual, regT1, Address(regT0, JSCell::structureOffset()))); + addSlowCase(branchPtr(NotEqual, regT1, Address(regT2, JSCell::structureOffset()))); // Load property. - loadPtr(Address(regT0, OBJECT_OFFSETOF(JSGlobalObject, m_propertyStorage)), regT2); load32(Address(regT3, OBJECT_OFFSETOF(GlobalResolveInfo, offset)), regT3); - load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), regT0); // payload - load32(BaseIndex(regT2, regT3, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), regT1); // tag + compileGetDirectOffset(regT2, regT1, regT0, regT3, KnownNotFinal); emitValueProfilingSite(); emitStore(dst, regT1, regT0); map(m_bytecodeOffset + (dynamic ? OPCODE_LENGTH(op_resolve_global_dynamic) : OPCODE_LENGTH(op_resolve_global)), dst, regT1, regT0); diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp index 7478f9184..466cff7db 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess.cpp @@ -29,6 +29,7 @@ #include "JIT.h" #include "CodeBlock.h" +#include "GCAwareJITStubRoutine.h" #include "GetterSetter.h" #include "Interpreter.h" #include "JITInlineMethods.h" @@ -151,10 +152,26 @@ void JIT::emitSlow_op_get_by_val(Instruction* currentInstruction, Vector<SlowCas emitValueProfilingSite(); } -void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch) +void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, RegisterID offset, RegisterID scratch, FinalObjectMode finalObjectMode) { - loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), scratch); - loadPtr(BaseIndex(scratch, offset, ScalePtr, 0), result); + ASSERT(sizeof(JSValue) == 8); + + if (finalObjectMode == MayBeFinal) { + Jump isInline = branch32(LessThan, offset, TrustedImm32(inlineStorageCapacity)); + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), scratch); + Jump done = jump(); + isInline.link(this); + addPtr(TrustedImm32(JSObject::offsetOfInlineStorage() + inlineStorageCapacity * sizeof(EncodedJSValue)), base, scratch); + done.link(this); + } else { +#if !ASSERT_DISABLED + Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(inlineStorageCapacity)); + breakpoint(); + isOutOfLine.link(this); +#endif + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), scratch); + } + loadPtr(BaseIndex(scratch, offset, ScalePtr, -inlineStorageCapacity * static_cast<ptrdiff_t>(sizeof(JSValue))), result); } void JIT::emit_op_get_by_pname(Instruction* currentInstruction) @@ -177,6 +194,7 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction) load32(addressFor(i), regT3); sub32(TrustedImm32(1), regT3); addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); + add32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_offsetBase)), regT3); compileGetDirectOffset(regT0, regT0, regT3, regT1); emitPutVirtualRegister(dst, regT0); @@ -283,7 +301,8 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction) void JIT::emit_op_method_check(Instruction* currentInstruction) { // Assert that the following instruction is a get_by_id. - ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id); + ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id + || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line); currentInstruction += OPCODE_LENGTH(op_method_check); unsigned resultVReg = currentInstruction[1].u.operand; @@ -373,14 +392,14 @@ void JIT::compileGetByIdHotPath(int baseVReg, Identifier*) PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); addSlowCase(structureCheck); - loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT0); + ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT0); DataLabelCompact displacementLabel = loadPtrWithCompactAddressOffsetPatch(Address(regT0, patchGetByIdDefaultOffset), regT0); Label putResult(this); END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel, putResult)); + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel, putResult)); } void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -440,14 +459,14 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) DataLabelPtr structureToCompare; addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); - loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2); + ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT2); DataLabel32 displacementLabel = storePtrWithAddressOffsetPatch(regT1, Address(regT2, patchPutByIdDefaultOffset)); END_UNINTERRUPTED_SEQUENCE(sequencePutById); emitWriteBarrier(regT0, regT1, regT2, regT3, ShouldFilterImmediates, WriteBarrierForPropertyAccess); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel)); + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel)); } void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -471,28 +490,41 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase // Compile a store into an object's property storage. May overwrite the // value in objectReg. -void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, size_t cachedOffset) +void JIT::compilePutDirectOffset(RegisterID base, RegisterID value, PropertyOffset cachedOffset) { - int offset = cachedOffset * sizeof(JSValue); - loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base); - storePtr(value, Address(base, offset)); + if (isInlineOffset(cachedOffset)) { + storePtr(value, Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset))); + return; + } + + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base); + storePtr(value, Address(base, sizeof(JSValue) * offsetInOutOfLineStorage(cachedOffset))); } // Compile a load from an object's property storage. May overwrite base. -void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, size_t cachedOffset) +void JIT::compileGetDirectOffset(RegisterID base, RegisterID result, PropertyOffset cachedOffset) { - int offset = cachedOffset * sizeof(JSValue); - loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), result); - loadPtr(Address(result, offset), result); + if (isInlineOffset(cachedOffset)) { + loadPtr(Address(base, JSObject::offsetOfInlineStorage() + sizeof(JSValue) * offsetInInlineStorage(cachedOffset)), result); + return; + } + + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), result); + loadPtr(Address(result, sizeof(JSValue) * offsetInOutOfLineStorage(cachedOffset)), result); } -void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, size_t cachedOffset) +void JIT::compileGetDirectOffset(JSObject* base, RegisterID result, PropertyOffset cachedOffset) { - loadPtr(base->addressOfPropertyStorage(), result); - loadPtr(Address(result, cachedOffset * sizeof(WriteBarrier<Unknown>)), result); + if (isInlineOffset(cachedOffset)) { + loadPtr(base->locationForOffset(cachedOffset), result); + return; + } + + loadPtr(base->addressOfOutOfLineStorage(), result); + loadPtr(Address(result, offsetInOutOfLineStorage(cachedOffset) * sizeof(WriteBarrier<Unknown>)), result); } -void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) +void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) { JumpList failureCases; // Check eax is an object of the right Structure. @@ -522,7 +554,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure #endif // emit a call only if storage realloc is needed - bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); + bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity(); if (willNeedStorageRealloc) { // This trampoline was called to like a JIT stub; before we can can call again we need to // remove the return address from the stack, to prevent the stack from becoming misaligned. @@ -532,7 +564,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure stubCall.skipArgument(); // base stubCall.skipArgument(); // ident stubCall.skipArgument(); // value - stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity())); + stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity())); stubCall.addArgument(TrustedImmPtr(newStructure)); stubCall.call(regT0); emitGetJITStubArg(2, regT1); @@ -564,15 +596,20 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc)); } - stubInfo->stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline put_by_id transition for CodeBlock %p, return point %p", - m_codeBlock, returnAddress.value())); + stubInfo->stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline put_by_id transition for CodeBlock %p, return point %p", + m_codeBlock, returnAddress.value())), + *m_globalData, + m_codeBlock->ownerExecutable(), + willNeedStorageRealloc, + newStructure); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code())); + repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code())); } -void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) +void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress) { RepatchBuffer repatchBuffer(codeBlock); @@ -580,14 +617,13 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St // Should probably go to cti_op_get_by_id_fail, but that doesn't do anything interesting right now. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail)); - int offset = sizeof(JSValue) * cachedOffset; - // Patch the offset into the propoerty map to load from, then patch the Structure to look for. repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offset); + repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset)); } -void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct) +void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct) { RepatchBuffer repatchBuffer(codeBlock); @@ -595,11 +631,10 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic)); - int offset = sizeof(JSValue) * cachedOffset; - // Patch the offset into the propoerty map to load from, then patch the Structure to look for. repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offset); + repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel), offsetRelativeToPatchedStorage(cachedOffset)); } void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) @@ -628,7 +663,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = FINALIZE_CODE( + stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( patchBuffer, ("Basline JIT get_by_id array length stub for CodeBlock %p, return point %p", m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( @@ -637,13 +672,13 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code())); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail)); } -void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) { // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) @@ -695,22 +730,26 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str } } // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT get_by_id proto stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + stubInfo->stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline JIT get_by_id proto stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code())); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); } -void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) +void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) { Jump failureCase = checkStructure(regT0, structure); bool needsStubLink = false; @@ -747,7 +786,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic } // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code()); + CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine)); if (!lastProtoBegin) lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); @@ -756,21 +795,25 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); - MacroAssemblerCodeRef stubCode = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT get_by_id list stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubCode = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline JIT get_by_id list stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code())); } -void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame) +void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame) { // The prototype object definitely exists (if this stub exists the CodeBlock is referencing a Structure that is // referencing the prototype object - let's speculatively load it's table nice and early!) @@ -819,27 +862,31 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi } // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code()); + CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); patchBuffer.link(failureCases1, lastProtoBegin); patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); - MacroAssemblerCodeRef stubCode = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT get_by_id proto list stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubCode = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline JIT get_by_id proto list stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); prototypeStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubCode, structure, prototypeStructure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubCode->code().code())); } -void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame) +void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame) { ASSERT(count); JumpList bucketsOfFail; @@ -892,18 +939,22 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi } // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code()); + CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); patchBuffer.link(bucketsOfFail, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); - CodeRef stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT get_by_id chain list stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline JIT get_by_id chain list stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); // Track the stub we have created so that it will be deleted later. prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect); @@ -911,10 +962,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); } -void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) { ASSERT(count); @@ -970,17 +1021,21 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. - CodeRef stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline JIT get_by_id chain stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline JIT get_by_id chain stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); stubInfo->stubRoutine = stubRoutine; // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); diff --git a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp index a44c576c5..84996d9f0 100644 --- a/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp +++ b/Source/JavaScriptCore/jit/JITPropertyAccess32_64.cpp @@ -30,6 +30,7 @@ #include "JIT.h" #include "CodeBlock.h" +#include "GCAwareJITStubRoutine.h" #include "Interpreter.h" #include "JITInlineMethods.h" #include "JITStubCall.h" @@ -93,7 +94,8 @@ void JIT::emit_op_del_by_id(Instruction* currentInstruction) void JIT::emit_op_method_check(Instruction* currentInstruction) { // Assert that the following instruction is a get_by_id. - ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id); + ASSERT(m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id + || m_interpreter->getOpcodeID((currentInstruction + OPCODE_LENGTH(op_method_check))->u.opcode) == op_get_by_id_out_of_line); currentInstruction += OPCODE_LENGTH(op_method_check); @@ -333,7 +335,7 @@ void JIT::compileGetByIdHotPath() PatchableJump structureCheck = patchableBranchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure))); addSlowCase(structureCheck); - loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT2); + ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT2); DataLabelCompact displacementLabel1 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT0); // payload DataLabelCompact displacementLabel2 = loadPtrWithCompactAddressOffsetPatch(Address(regT2, patchGetByIdDefaultOffset), regT1); // tag @@ -341,7 +343,7 @@ void JIT::compileGetByIdHotPath() END_UNINTERRUPTED_SEQUENCE(sequenceGetByIdHotPath); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, displacementLabel1, displacementLabel2, putResult)); + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubGetById, m_bytecodeOffset, hotPathBegin, structureToCompare, structureCheck, propertyStorageLoad, displacementLabel1, displacementLabel2, putResult)); } void JIT::emitSlow_op_get_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -399,7 +401,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) DataLabelPtr structureToCompare; addSlowCase(branchPtrWithPatch(NotEqual, Address(regT0, JSCell::structureOffset()), structureToCompare, TrustedImmPtr(reinterpret_cast<void*>(patchGetByIdDefaultStructure)))); - loadPtr(Address(regT0, JSObject::offsetOfPropertyStorage()), regT1); + ConvertibleLoadLabel propertyStorageLoad = convertibleLoadPtr(Address(regT0, JSObject::offsetOfOutOfLineStorage()), regT1); DataLabel32 displacementLabel1 = storePtrWithAddressOffsetPatch(regT2, Address(regT1, patchPutByIdDefaultOffset)); // payload DataLabel32 displacementLabel2 = storePtrWithAddressOffsetPatch(regT3, Address(regT1, patchPutByIdDefaultOffset)); // tag @@ -407,7 +409,7 @@ void JIT::emit_op_put_by_id(Instruction* currentInstruction) emitWriteBarrier(regT0, regT2, regT1, regT2, ShouldFilterImmediates, WriteBarrierForPropertyAccess); - m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, displacementLabel1, displacementLabel2)); + m_propertyAccessCompilationInfo.append(PropertyStubCompilationInfo(PropertyStubPutById, m_bytecodeOffset, hotPathBegin, structureToCompare, propertyStorageLoad, displacementLabel1, displacementLabel2)); } void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCaseEntry>::iterator& iter) @@ -430,30 +432,41 @@ void JIT::emitSlow_op_put_by_id(Instruction* currentInstruction, Vector<SlowCase } // Compile a store into an object's property storage. May overwrite base. -void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, size_t cachedOffset) +void JIT::compilePutDirectOffset(RegisterID base, RegisterID valueTag, RegisterID valuePayload, PropertyOffset cachedOffset) { - int offset = cachedOffset; - loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base); - emitStore(offset, valueTag, valuePayload, base); + if (isOutOfLineOffset(cachedOffset)) + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base); + emitStore(indexRelativeToBase(cachedOffset), valueTag, valuePayload, base); } // Compile a load from an object's property storage. May overwrite base. -void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset) +void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset) { - int offset = cachedOffset; + if (isInlineOffset(cachedOffset)) { + emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, base); + return; + } + RegisterID temp = resultPayload; - loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), temp); - emitLoad(offset, resultTag, resultPayload, temp); + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), temp); + emitLoad(indexRelativeToBase(cachedOffset), resultTag, resultPayload, temp); } -void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, size_t cachedOffset) +void JIT::compileGetDirectOffset(JSObject* base, RegisterID resultTag, RegisterID resultPayload, PropertyOffset cachedOffset) { - loadPtr(base->addressOfPropertyStorage(), resultTag); - load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); - load32(Address(resultTag, cachedOffset * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); + if (isInlineOffset(cachedOffset)) { + move(TrustedImmPtr(base->locationForOffset(cachedOffset)), resultTag); + load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); + load32(Address(resultTag, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); + return; + } + + loadPtr(base->addressOfOutOfLineStorage(), resultTag); + load32(Address(resultTag, offsetInOutOfLineStorage(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); + load32(Address(resultTag, offsetInOutOfLineStorage(cachedOffset) * sizeof(WriteBarrier<Unknown>) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); } -void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, size_t cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) +void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure* oldStructure, Structure* newStructure, PropertyOffset cachedOffset, StructureChain* chain, ReturnAddressPtr returnAddress, bool direct) { // The code below assumes that regT0 contains the basePayload and regT1 contains the baseTag. Restore them from the stack. #if CPU(MIPS) || CPU(SH4) || CPU(ARM) @@ -489,7 +502,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure // Reallocate property storage if needed. Call callTarget; - bool willNeedStorageRealloc = oldStructure->propertyStorageCapacity() != newStructure->propertyStorageCapacity(); + bool willNeedStorageRealloc = oldStructure->outOfLineCapacity() != newStructure->outOfLineCapacity(); if (willNeedStorageRealloc) { // This trampoline was called to like a JIT stub; before we can can call again we need to // remove the return address from the stack, to prevent the stack from becoming misaligned. @@ -499,7 +512,7 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure stubCall.skipArgument(); // base stubCall.skipArgument(); // ident stubCall.skipArgument(); // value - stubCall.addArgument(TrustedImm32(oldStructure->propertyStorageCapacity())); + stubCall.addArgument(TrustedImm32(oldStructure->outOfLineCapacity())); stubCall.addArgument(TrustedImmPtr(newStructure)); stubCall.call(regT0); @@ -545,15 +558,20 @@ void JIT::privateCompilePutByIdTransition(StructureStubInfo* stubInfo, Structure patchBuffer.link(m_calls[0].from, FunctionPtr(cti_op_put_by_id_transition_realloc)); } - stubInfo->stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline put_by_id transition stub for CodeBlock %p, return point %p", - m_codeBlock, returnAddress.value())); + stubInfo->stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline put_by_id transition stub for CodeBlock %p, return point %p", + m_codeBlock, returnAddress.value())), + *m_globalData, + m_codeBlock->ownerExecutable(), + willNeedStorageRealloc, + newStructure); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine.code())); + repatchBuffer.relinkCallerToTrampoline(returnAddress, CodeLocationLabel(stubInfo->stubRoutine->code().code())); } -void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress) +void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress) { RepatchBuffer repatchBuffer(codeBlock); @@ -561,15 +579,14 @@ void JIT::patchGetByIdSelf(CodeBlock* codeBlock, StructureStubInfo* stubInfo, St // Should probably go to JITStubs::cti_op_get_by_id_fail, but that doesn't do anything interesting right now. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_self_fail)); - int offset = sizeof(JSValue) * cachedOffset; - // Patch the offset into the propoerty map to load from, then patch the Structure to look for. repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.get.structureToCompare), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag + repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.get.propertyStorageLoad), isOutOfLineOffset(cachedOffset)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelCompactAtOffset(stubInfo->patch.baseline.u.get.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag } -void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, size_t cachedOffset, ReturnAddressPtr returnAddress, bool direct) +void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, Structure* structure, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, bool direct) { RepatchBuffer repatchBuffer(codeBlock); @@ -577,12 +594,11 @@ void JIT::patchPutByIdReplace(CodeBlock* codeBlock, StructureStubInfo* stubInfo, // Should probably go to cti_op_put_by_id_fail, but that doesn't do anything interesting right now. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(direct ? cti_op_put_by_id_direct_generic : cti_op_put_by_id_generic)); - int offset = sizeof(JSValue) * cachedOffset; - // Patch the offset into the propoerty map to load from, then patch the Structure to look for. repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabelPtrAtOffset(stubInfo->patch.baseline.u.put.structureToCompare), structure); - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), offset + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload - repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), offset + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag + repatchBuffer.setLoadInstructionIsActive(stubInfo->hotPathBegin.convertibleLoadAtOffset(stubInfo->patch.baseline.u.put.propertyStorageLoad), isOutOfLineOffset(cachedOffset)); + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel1), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.payload)); // payload + repatchBuffer.repatch(stubInfo->hotPathBegin.dataLabel32AtOffset(stubInfo->patch.baseline.u.put.displacementLabel2), offsetRelativeToPatchedStorage(cachedOffset) + OBJECT_OFFSETOF(JSValue, u.asBits.tag)); // tag } void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) @@ -614,7 +630,7 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = FINALIZE_CODE( + stubInfo->stubRoutine = FINALIZE_CODE_FOR_STUB( patchBuffer, ("Baseline get_by_id array length stub for CodeBlock %p, return point %p", m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( @@ -623,13 +639,13 @@ void JIT::privateCompilePatchGetArrayLength(ReturnAddressPtr returnAddress) // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code())); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_array_fail)); } -void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) { // regT0 holds a JSCell* @@ -684,23 +700,27 @@ void JIT::privateCompileGetByIdProto(StructureStubInfo* stubInfo, Structure* str } // Track the stub we have created so that it will be deleted later. - stubInfo->stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline get_by_id proto stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + stubInfo->stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline get_by_id proto stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubInfo->stubRoutine->code().code())); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); } -void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset) +void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* polymorphicStructures, int currentIndex, Structure* structure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset) { // regT0 holds a JSCell* Jump failureCase = checkStructure(regT0, structure); @@ -737,7 +757,7 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic } } // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = CodeLocationLabel(polymorphicStructures->list[currentIndex - 1].stubRoutine.code()); + CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(polymorphicStructures->list[currentIndex - 1].stubRoutine)); if (!lastProtoBegin) lastProtoBegin = stubInfo->callReturnLocation.labelAtOffset(-stubInfo->patch.baseline.u.get.coldPathBegin); @@ -746,21 +766,25 @@ void JIT::privateCompileGetByIdSelfList(StructureStubInfo* stubInfo, Polymorphic // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); - MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline get_by_id self list stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline get_by_id self list stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); polymorphicStructures->list[currentIndex].set(*m_globalData, m_codeBlock->ownerExecutable(), stubRoutine, structure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); } -void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame) +void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, Structure* prototypeStructure, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame) { // regT0 holds a JSCell* @@ -808,28 +832,32 @@ void JIT::privateCompileGetByIdProtoList(StructureStubInfo* stubInfo, Polymorphi } } // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code()); + CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); patchBuffer.link(failureCases1, lastProtoBegin); patchBuffer.link(failureCases2, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); - MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline get_by_id proto list stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline get_by_id proto list stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, prototypeStructure, isDirect); // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); } -void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, CallFrame* callFrame) +void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, PolymorphicAccessStructureList* prototypeStructures, int currentIndex, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, CallFrame* callFrame) { // regT0 holds a JSCell* ASSERT(count); @@ -882,18 +910,22 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi } } // Use the patch information to link the failure cases back to the original slow case routine. - CodeLocationLabel lastProtoBegin = CodeLocationLabel(prototypeStructures->list[currentIndex - 1].stubRoutine.code()); + CodeLocationLabel lastProtoBegin = CodeLocationLabel(JITStubRoutine::asCodePtr(prototypeStructures->list[currentIndex - 1].stubRoutine)); patchBuffer.link(bucketsOfFail, lastProtoBegin); // On success return back to the hot patch code, at a point it will perform the store to dest for us. patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); - MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline get_by_id chain list stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline get_by_id chain list stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); // Track the stub we have created so that it will be deleted later. prototypeStructures->list[currentIndex].set(callFrame->globalData(), m_codeBlock->ownerExecutable(), stubRoutine, structure, chain, isDirect); @@ -901,10 +933,10 @@ void JIT::privateCompileGetByIdChainList(StructureStubInfo* stubInfo, Polymorphi // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); } -void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, size_t cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) +void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* structure, StructureChain* chain, size_t count, const Identifier& ident, const PropertySlot& slot, PropertyOffset cachedOffset, ReturnAddressPtr returnAddress, CallFrame* callFrame) { // regT0 holds a JSCell* ASSERT(count); @@ -959,29 +991,47 @@ void JIT::privateCompileGetByIdChain(StructureStubInfo* stubInfo, Structure* str patchBuffer.link(success, stubInfo->hotPathBegin.labelAtOffset(stubInfo->patch.baseline.u.get.putResult)); // Track the stub we have created so that it will be deleted later. - MacroAssemblerCodeRef stubRoutine = FINALIZE_CODE( - patchBuffer, - ("Baseline get_by_id chain stub for CodeBlock %p, return point %p", - m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( - stubInfo->patch.baseline.u.get.putResult).executableAddress())); + RefPtr<JITStubRoutine> stubRoutine = createJITStubRoutine( + FINALIZE_CODE( + patchBuffer, + ("Baseline get_by_id chain stub for CodeBlock %p, return point %p", + m_codeBlock, stubInfo->hotPathBegin.labelAtOffset( + stubInfo->patch.baseline.u.get.putResult).executableAddress())), + *m_globalData, + m_codeBlock->ownerExecutable(), + needsStubLink); stubInfo->stubRoutine = stubRoutine; // Finally patch the jump to slow case back in the hot path to jump here instead. CodeLocationJump jumpLocation = stubInfo->hotPathBegin.jumpAtOffset(stubInfo->patch.baseline.u.get.structureCheck); RepatchBuffer repatchBuffer(m_codeBlock); - repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine.code())); + repatchBuffer.relink(jumpLocation, CodeLocationLabel(stubRoutine->code().code())); // We don't want to patch more than once - in future go to cti_op_put_by_id_generic. repatchBuffer.relinkCallerToFunction(returnAddress, FunctionPtr(cti_op_get_by_id_proto_list)); } -void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset) +void JIT::compileGetDirectOffset(RegisterID base, RegisterID resultTag, RegisterID resultPayload, RegisterID offset, FinalObjectMode finalObjectMode) { ASSERT(sizeof(JSValue) == 8); - loadPtr(Address(base, JSObject::offsetOfPropertyStorage()), base); - loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload)), resultPayload); - loadPtr(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag)), resultTag); + if (finalObjectMode == MayBeFinal) { + Jump isInline = branch32(LessThan, offset, TrustedImm32(inlineStorageCapacity)); + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base); + Jump done = jump(); + isInline.link(this); + addPtr(TrustedImmPtr(JSObject::offsetOfInlineStorage() + inlineStorageCapacity * sizeof(EncodedJSValue)), base); + done.link(this); + } else { +#if !ASSERT_DISABLED + Jump isOutOfLine = branch32(GreaterThanOrEqual, offset, TrustedImm32(inlineStorageCapacity)); + breakpoint(); + isOutOfLine.link(this); +#endif + loadPtr(Address(base, JSObject::offsetOfOutOfLineStorage()), base); + } + load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.payload) - inlineStorageCapacity * sizeof(EncodedJSValue)), resultPayload); + load32(BaseIndex(base, offset, TimesEight, OBJECT_OFFSETOF(JSValue, u.asBits.tag) - inlineStorageCapacity * sizeof(EncodedJSValue)), resultTag); } void JIT::emit_op_get_by_pname(Instruction* currentInstruction) @@ -1006,6 +1056,7 @@ void JIT::emit_op_get_by_pname(Instruction* currentInstruction) load32(addressFor(i), regT3); sub32(TrustedImm32(1), regT3); addSlowCase(branch32(AboveOrEqual, regT3, Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_numCacheableSlots)))); + add32(Address(regT1, OBJECT_OFFSETOF(JSPropertyNameIterator, m_offsetBase)), regT3); compileGetDirectOffset(regT2, regT1, regT0, regT3); emitStore(dst, regT1, regT0); diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.cpp b/Source/JavaScriptCore/jit/JITStubRoutine.cpp new file mode 100644 index 000000000..951665318 --- /dev/null +++ b/Source/JavaScriptCore/jit/JITStubRoutine.cpp @@ -0,0 +1,48 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#include "config.h" +#include "JITStubRoutine.h" + +#if ENABLE(JIT) + +#include "JSObject.h" +#include "ScopeChain.h" +#include "SlotVisitor.h" + +namespace JSC { + +JITStubRoutine::~JITStubRoutine() { } + +void JITStubRoutine::observeZeroRefCount() +{ + ASSERT(!m_refCount); + delete this; +} + +} // namespace JSC + +#endif // ENABLE(JIT) + diff --git a/Source/JavaScriptCore/jit/JITStubRoutine.h b/Source/JavaScriptCore/jit/JITStubRoutine.h new file mode 100644 index 000000000..4400589ff --- /dev/null +++ b/Source/JavaScriptCore/jit/JITStubRoutine.h @@ -0,0 +1,161 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef JITStubRoutine_h +#define JITStubRoutine_h + +#include <wtf/Platform.h> + +#if ENABLE(JIT) + +#include "ExecutableAllocator.h" +#include "MacroAssemblerCodeRef.h" +#include <wtf/RefCounted.h> +#include <wtf/Vector.h> + +namespace JSC { + +class JITStubRoutineSet; + +// This is a base-class for JIT stub routines, and also the class you want +// to instantiate directly if you have a routine that does not need any +// help from the GC. If in doubt, use one of the other stub routines. But +// if you know for sure that the stub routine cannot be on the stack while +// someone triggers a stub routine reset, then using this will speed up +// memory reclamation. One case where a stub routine satisfies this +// condition is if it doesn't make any calls, to either C++ or JS code. In +// such a routine you know that it cannot be on the stack when anything +// interesting happens. +// See GCAwareJITStubRoutine.h for the other stub routines. +class JITStubRoutine { + WTF_MAKE_NONCOPYABLE(JITStubRoutine); + WTF_MAKE_FAST_ALLOCATED; +public: + JITStubRoutine(const MacroAssemblerCodeRef& code) + : m_code(code) + , m_refCount(1) + { + } + + // Use this if you want to pass a CodePtr to someone who insists on taking + // a RefPtr<JITStubRoutine>. + static PassRefPtr<JITStubRoutine> createSelfManagedRoutine( + MacroAssemblerCodePtr rawCodePointer) + { + return adoptRef(new JITStubRoutine(MacroAssemblerCodeRef::createSelfManagedCodeRef(rawCodePointer))); + } + + virtual ~JITStubRoutine(); + + // MacroAssemblerCodeRef is copyable, but at the cost of reference + // counting churn. Returning a reference is a good way of reducing + // the churn. + const MacroAssemblerCodeRef& code() const { return m_code; } + + static MacroAssemblerCodePtr asCodePtr(PassRefPtr<JITStubRoutine> stubRoutine) + { + if (!stubRoutine) + return MacroAssemblerCodePtr(); + + MacroAssemblerCodePtr result = stubRoutine->code().code(); + ASSERT(!!result); + return result; + } + + void ref() + { + m_refCount++; + } + + void deref() + { + if (--m_refCount) + return; + observeZeroRefCount(); + } + + // Helpers for the GC to determine how to deal with marking JIT stub + // routines. + uintptr_t startAddress() const { return m_code.executableMemory()->startAsInteger(); } + uintptr_t endAddress() const { return m_code.executableMemory()->endAsInteger(); } + static uintptr_t addressStep() { return jitAllocationGranule; } + + static bool canPerformRangeFilter() + { +#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) + return true; +#else + return false; +#endif + } + static uintptr_t filteringStartAddress() + { +#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) + return startOfFixedExecutableMemoryPool; +#else + UNREACHABLE_FOR_PLATFORM(); + return 0; +#endif + } + static size_t filteringExtentSize() + { +#if ENABLE(EXECUTABLE_ALLOCATOR_FIXED) + return fixedExecutableMemoryPoolSize; +#else + UNREACHABLE_FOR_PLATFORM(); + return 0; +#endif + } + static bool passesFilter(uintptr_t address) + { + if (!canPerformRangeFilter()) { + // Just check that the address doesn't use any special values that would make + // our hashtables upset. + return address >= jitAllocationGranule && address != std::numeric_limits<uintptr_t>::max(); + } + + if (address - filteringStartAddress() >= filteringExtentSize()) + return false; + + return true; + } + +protected: + virtual void observeZeroRefCount(); + + MacroAssemblerCodeRef m_code; + unsigned m_refCount; +}; + +// Helper for the creation of simple stub routines that need no help from the GC. +#define FINALIZE_CODE_FOR_STUB(patchBuffer, dataLogArguments) \ + (adoptRef(new JITStubRoutine(FINALIZE_CODE((patchBuffer), dataLogArguments)))) + +} // namespace JSC + +#endif // ENABLE(JIT) + +#endif // JITStubRoutine_h + diff --git a/Source/JavaScriptCore/jit/JITStubs.cpp b/Source/JavaScriptCore/jit/JITStubs.cpp index 6b8082886..2273f0f38 100644 --- a/Source/JavaScriptCore/jit/JITStubs.cpp +++ b/Source/JavaScriptCore/jit/JITStubs.cpp @@ -622,38 +622,46 @@ SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" #elif COMPILER(GCC) && CPU(ARM_TRADITIONAL) asm ( +".text" "\n" ".globl " SYMBOL_STRING(ctiTrampoline) "\n" HIDE_SYMBOL(ctiTrampoline) "\n" +INLINE_ARM_FUNCTION(ctiTrampoline) SYMBOL_STRING(ctiTrampoline) ":" "\n" "stmdb sp!, {r1-r3}" "\n" - "stmdb sp!, {r4-r8, lr}" "\n" + "stmdb sp!, {r4-r6, r8-r11, lr}" "\n" "sub sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n" - "mov r4, r2" "\n" - "mov r5, #512" "\n" + "mov r5, r2" "\n" + "mov r6, #512" "\n" // r0 contains the code - "mov lr, pc" "\n" - "mov pc, r0" "\n" + "blx r0" "\n" "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n" - "ldmia sp!, {r4-r8, lr}" "\n" + "ldmia sp!, {r4-r6, r8-r11, lr}" "\n" "add sp, sp, #12" "\n" - "mov pc, lr" "\n" + "bx lr" "\n" +".globl " SYMBOL_STRING(ctiTrampolineEnd) "\n" +HIDE_SYMBOL(ctiTrampolineEnd) "\n" +SYMBOL_STRING(ctiTrampolineEnd) ":" "\n" ); asm ( +".text" "\n" ".globl " SYMBOL_STRING(ctiVMThrowTrampoline) "\n" HIDE_SYMBOL(ctiVMThrowTrampoline) "\n" +INLINE_ARM_FUNCTION(ctiVMThrowTrampoline) SYMBOL_STRING(ctiVMThrowTrampoline) ":" "\n" "mov r0, sp" "\n" "bl " SYMBOL_STRING(cti_vm_throw) "\n" // Both has the same return sequence +".text" "\n" ".globl " SYMBOL_STRING(ctiOpThrowNotCaught) "\n" HIDE_SYMBOL(ctiOpThrowNotCaught) "\n" +INLINE_ARM_FUNCTION(ctiOpThrowNotCaught) SYMBOL_STRING(ctiOpThrowNotCaught) ":" "\n" "add sp, sp, #" STRINGIZE_VALUE_OF(PRESERVEDR4_OFFSET) "\n" - "ldmia sp!, {r4-r8, lr}" "\n" + "ldmia sp!, {r4-r6, r8-r11, lr}" "\n" "add sp, sp, #12" "\n" - "mov pc, lr" "\n" + "bx lr" "\n" ); #elif COMPILER(RVCT) && CPU(ARM_THUMB2) @@ -954,7 +962,7 @@ NEVER_INLINE void JITThunks::tryCacheGetByID(CallFrame* callFrame, CodeBlock* co return; } - size_t offset = slot.cachedOffset(); + PropertyOffset offset = slot.cachedOffset(); size_t count = normalizePrototypeChain(callFrame, baseValue, slot.slotBase(), propertyName, offset); if (!count) { stubInfo->accessType = access_get_by_id_generic; @@ -1156,11 +1164,12 @@ template<typename T> static T throwExceptionFromOpCall(JITStackFrame& jitStackFr }; \ asm ( \ ".globl " SYMBOL_STRING(cti_##op) "\n" \ + INLINE_ARM_FUNCTION(cti_##op) \ SYMBOL_STRING(cti_##op) ":" "\n" \ "str lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \ "bl " SYMBOL_STRING(JITStubThunked_##op) "\n" \ "ldr lr, [sp, #" STRINGIZE_VALUE_OF(THUNK_RETURN_ADDRESS_OFFSET) "]" "\n" \ - "mov pc, lr" "\n" \ + "bx lr" "\n" \ ); \ rtype JITStubThunked_##op(STUB_ARGS_DECLARATION) @@ -1486,13 +1495,16 @@ DEFINE_STUB_FUNCTION(JSObject*, op_put_by_id_transition_realloc) JSValue baseValue = stackFrame.args[0].jsValue(); int32_t oldSize = stackFrame.args[3].int32(); Structure* newStructure = stackFrame.args[4].structure(); - int32_t newSize = newStructure->propertyStorageCapacity(); + int32_t newSize = newStructure->outOfLineCapacity(); + + ASSERT(oldSize >= 0); + ASSERT(newSize > oldSize); ASSERT(baseValue.isObject()); JSObject* base = asObject(baseValue); JSGlobalData& globalData = *stackFrame.globalData; - PropertyStorage newStorage = base->growPropertyStorage(globalData, oldSize, newSize); - base->setPropertyStorage(globalData, newStorage, newStructure); + PropertyStorage newStorage = base->growOutOfLineStorage(globalData, oldSize, newSize); + base->setOutOfLineStorage(globalData, newStorage, newStructure); return base; } @@ -1710,7 +1722,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_self_fail) if (stubInfo->accessType == access_get_by_id_self) { ASSERT(!stubInfo->stubRoutine); - polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), MacroAssemblerCodeRef(), stubInfo->u.getByIdSelf.baseObjectStructure.get(), true); + polymorphicStructureList = new PolymorphicAccessStructureList(callFrame->globalData(), codeBlock->ownerExecutable(), 0, stubInfo->u.getByIdSelf.baseObjectStructure.get(), true); stubInfo->initGetByIdSelfList(polymorphicStructureList, 1); } else { polymorphicStructureList = stubInfo->u.getByIdSelfList.structureList; @@ -1736,12 +1748,12 @@ static PolymorphicAccessStructureList* getPolymorphicAccessStructureListSlot(JSG switch (stubInfo->accessType) { case access_get_by_id_proto: prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdProto.baseObjectStructure.get(), stubInfo->u.getByIdProto.prototypeStructure.get(), true); - stubInfo->stubRoutine = MacroAssemblerCodeRef(); + stubInfo->stubRoutine.clear(); stubInfo->initGetByIdProtoList(prototypeStructureList, 2); break; case access_get_by_id_chain: prototypeStructureList = new PolymorphicAccessStructureList(globalData, owner, stubInfo->stubRoutine, stubInfo->u.getByIdChain.baseObjectStructure.get(), stubInfo->u.getByIdChain.chain.get(), true); - stubInfo->stubRoutine = MacroAssemblerCodeRef(); + stubInfo->stubRoutine.clear(); stubInfo->initGetByIdProtoList(prototypeStructureList, 2); break; case access_get_by_id_proto_list: @@ -1814,7 +1826,7 @@ DEFINE_STUB_FUNCTION(EncodedJSValue, op_get_by_id_proto_list) ASSERT(slot.slotBase().isObject()); JSObject* slotBaseObject = asObject(slot.slotBase()); - size_t offset = slot.cachedOffset(); + PropertyOffset offset = slot.cachedOffset(); if (slot.slotBase() == baseValue) ctiPatchCallByReturnAddress(codeBlock, STUB_RETURN_ADDRESS, FunctionPtr(cti_op_get_by_id_proto_fail)); @@ -1928,7 +1940,12 @@ DEFINE_STUB_FUNCTION(void, optimize) unsigned bytecodeIndex = stackFrame.args[0].int32(); #if ENABLE(JIT_VERBOSE_OSR) - dataLog("%p: Entered optimize with bytecodeIndex = %u, executeCounter = %s, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u\n", codeBlock, bytecodeIndex, codeBlock->jitExecuteCounter().status(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter()); + dataLog("%p: Entered optimize with bytecodeIndex = %u, executeCounter = %s, reoptimizationRetryCounter = %u, optimizationDelayCounter = %u, exitCounter = ", codeBlock, bytecodeIndex, codeBlock->jitExecuteCounter().status(), codeBlock->reoptimizationRetryCounter(), codeBlock->optimizationDelayCounter()); + if (codeBlock->hasOptimizedReplacement()) + dataLog("%u", codeBlock->replacement()->osrExitCounter()); + else + dataLog("N/A"); + dataLog("\n"); #endif if (!codeBlock->checkIfOptimizationThresholdReached()) { @@ -1938,8 +1955,21 @@ DEFINE_STUB_FUNCTION(void, optimize) if (codeBlock->hasOptimizedReplacement()) { #if ENABLE(JIT_VERBOSE_OSR) - dataLog("Considering OSR into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter()); + dataLog("Considering OSR into %p(%p).\n", codeBlock, codeBlock->replacement()); #endif + // If we have an optimized replacement, then it must be the case that we entered + // cti_optimize from a loop. That's because is there's an optimized replacement, + // then all calls to this function will be relinked to the replacement and so + // the prologue OSR will never fire. + + // This is an interesting threshold check. Consider that a function OSR exits + // in the middle of a loop, while having a relatively low exit count. The exit + // will reset the execution counter to some target threshold, meaning that this + // code won't be reached until that loop heats up for >=1000 executions. But then + // we do a second check here, to see if we should either reoptimize, or just + // attempt OSR entry. Hence it might even be correct for + // shouldReoptimizeFromLoopNow() to always return true. But we make it do some + // additional checking anyway, to reduce the amount of recompilation thrashing. if (codeBlock->replacement()->shouldReoptimizeFromLoopNow()) { #if ENABLE(JIT_VERBOSE_OSR) dataLog("Triggering reoptimization of %p(%p) (in loop).\n", codeBlock, codeBlock->replacement()); @@ -1985,7 +2015,6 @@ DEFINE_STUB_FUNCTION(void, optimize) #endif codeBlock->optimizeSoon(); - optimizedCodeBlock->countSpeculationSuccess(); STUB_SET_RETURN_ADDRESS(address); return; } @@ -1996,10 +2025,10 @@ DEFINE_STUB_FUNCTION(void, optimize) // Count the OSR failure as a speculation failure. If this happens a lot, then // reoptimize. - optimizedCodeBlock->countSpeculationFailure(); + optimizedCodeBlock->countOSRExit(); #if ENABLE(JIT_VERBOSE_OSR) - dataLog("Encountered OSR failure into %p(%p) with success/fail %u/%u.\n", codeBlock, codeBlock->replacement(), codeBlock->replacement()->speculativeSuccessCounter(), codeBlock->replacement()->speculativeFailCounter()); + dataLog("Encountered OSR failure into %p(%p).\n", codeBlock, codeBlock->replacement()); #endif // We are a lot more conservative about triggering reoptimization after OSR failure than diff --git a/Source/JavaScriptCore/jit/JITStubs.h b/Source/JavaScriptCore/jit/JITStubs.h index d2bc15e64..22a1dd773 100644 --- a/Source/JavaScriptCore/jit/JITStubs.h +++ b/Source/JavaScriptCore/jit/JITStubs.h @@ -190,8 +190,10 @@ namespace JSC { void* preservedR4; void* preservedR5; void* preservedR6; - void* preservedR7; void* preservedR8; + void* preservedR9; + void* preservedR10; + void* preservedR11; void* preservedLink; RegisterFile* registerFile; diff --git a/Source/JavaScriptCore/jit/JSInterfaceJIT.h b/Source/JavaScriptCore/jit/JSInterfaceJIT.h index 05d1ce5ad..6b7dd2184 100644 --- a/Source/JavaScriptCore/jit/JSInterfaceJIT.h +++ b/Source/JavaScriptCore/jit/JSInterfaceJIT.h @@ -94,7 +94,7 @@ namespace JSC { static const FPRegisterID fpRegT1 = X86Registers::xmm1; static const FPRegisterID fpRegT2 = X86Registers::xmm2; static const FPRegisterID fpRegT3 = X86Registers::xmm3; -#elif CPU(ARM_THUMB2) +#elif CPU(ARM) static const RegisterID returnValueRegister = ARMRegisters::r0; static const RegisterID cachedResultRegister = ARMRegisters::r0; static const RegisterID firstArgumentRegister = ARMRegisters::r0; @@ -107,35 +107,11 @@ namespace JSC { static const RegisterID regT1 = ARMRegisters::r1; static const RegisterID regT2 = ARMRegisters::r2; static const RegisterID regT3 = ARMRegisters::r4; - + + // Update ctiTrampoline in JITStubs.cpp if these values are changed! static const RegisterID callFrameRegister = ARMRegisters::r5; static const RegisterID timeoutCheckRegister = ARMRegisters::r6; - - static const FPRegisterID fpRegT0 = ARMRegisters::d0; - static const FPRegisterID fpRegT1 = ARMRegisters::d1; - static const FPRegisterID fpRegT2 = ARMRegisters::d2; - static const FPRegisterID fpRegT3 = ARMRegisters::d3; -#elif CPU(ARM_TRADITIONAL) - static const RegisterID returnValueRegister = ARMRegisters::r0; - static const RegisterID cachedResultRegister = ARMRegisters::r0; - static const RegisterID firstArgumentRegister = ARMRegisters::r0; - - static const RegisterID timeoutCheckRegister = ARMRegisters::r5; - static const RegisterID callFrameRegister = ARMRegisters::r4; - - static const RegisterID regT0 = ARMRegisters::r0; - static const RegisterID regT1 = ARMRegisters::r1; - static const RegisterID regT2 = ARMRegisters::r2; - // Callee preserved - static const RegisterID regT3 = ARMRegisters::r7; - - static const RegisterID regS0 = ARMRegisters::S0; - // Callee preserved - static const RegisterID regS1 = ARMRegisters::S1; - - static const RegisterID regStackPtr = ARMRegisters::sp; - static const RegisterID regLink = ARMRegisters::lr; - + static const FPRegisterID fpRegT0 = ARMRegisters::d0; static const FPRegisterID fpRegT1 = ARMRegisters::d1; static const FPRegisterID fpRegT2 = ARMRegisters::d2; diff --git a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h index c98e57d12..e17b45d94 100644 --- a/Source/JavaScriptCore/jit/SpecializedThunkJIT.h +++ b/Source/JavaScriptCore/jit/SpecializedThunkJIT.h @@ -37,9 +37,7 @@ namespace JSC { class SpecializedThunkJIT : public JSInterfaceJIT { public: static const int ThisArgument = -1; - SpecializedThunkJIT(int expectedArgCount, JSGlobalData* globalData) - : m_expectedArgCount(expectedArgCount) - , m_globalData(globalData) + SpecializedThunkJIT(int expectedArgCount) { // Check that we have the expected number of arguments m_failures.append(branch32(NotEqual, payloadFor(RegisterFile::ArgumentCount), TrustedImm32(expectedArgCount + 1))); @@ -166,8 +164,6 @@ namespace JSC { #endif } - int m_expectedArgCount; - JSGlobalData* m_globalData; MacroAssembler::JumpList m_failures; Vector<std::pair<Call, FunctionPtr> > m_calls; }; diff --git a/Source/JavaScriptCore/jit/ThunkGenerators.cpp b/Source/JavaScriptCore/jit/ThunkGenerators.cpp index c440b5157..c6431c22d 100644 --- a/Source/JavaScriptCore/jit/ThunkGenerators.cpp +++ b/Source/JavaScriptCore/jit/ThunkGenerators.cpp @@ -75,7 +75,7 @@ static void charToString(SpecializedThunkJIT& jit, JSGlobalData* globalData, Mac MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); stringCharLoad(jit); jit.returnInt32(SpecializedThunkJIT::regT0); return jit.finalize(*globalData, globalData->jitStubs->ctiNativeCall(), "charCodeAt"); @@ -83,7 +83,7 @@ MacroAssemblerCodeRef charCodeAtThunkGenerator(JSGlobalData* globalData) MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); stringCharLoad(jit); charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); jit.returnJSCell(SpecializedThunkJIT::regT0); @@ -92,7 +92,7 @@ MacroAssemblerCodeRef charAtThunkGenerator(JSGlobalData* globalData) MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); // load char code jit.loadInt32Argument(0, SpecializedThunkJIT::regT0); charToString(jit, globalData, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT0, SpecializedThunkJIT::regT1); @@ -102,7 +102,7 @@ MacroAssemblerCodeRef fromCharCodeThunkGenerator(JSGlobalData* globalData) MacroAssemblerCodeRef sqrtThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); if (!jit.supportsFloatingPointSqrt()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); @@ -182,7 +182,7 @@ static const double halfConstant = 0.5; MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); MacroAssembler::Jump nonIntJump; if (!UnaryDoubleOpWrapper(floor) || !jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); @@ -214,7 +214,7 @@ MacroAssemblerCodeRef floorThunkGenerator(JSGlobalData* globalData) MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); if (!UnaryDoubleOpWrapper(ceil) || !jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); MacroAssembler::Jump nonIntJump; @@ -233,7 +233,7 @@ MacroAssemblerCodeRef ceilThunkGenerator(JSGlobalData* globalData) MacroAssemblerCodeRef roundThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); if (!UnaryDoubleOpWrapper(jsRound) || !jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); MacroAssembler::Jump nonIntJump; @@ -269,7 +269,7 @@ MacroAssemblerCodeRef expThunkGenerator(JSGlobalData* globalData) { if (!UnaryDoubleOpWrapper(exp)) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); if (!jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); @@ -282,7 +282,7 @@ MacroAssemblerCodeRef logThunkGenerator(JSGlobalData* globalData) { if (!UnaryDoubleOpWrapper(log)) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); if (!jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); jit.loadDoubleArgument(0, SpecializedThunkJIT::fpRegT0, SpecializedThunkJIT::regT0); @@ -293,7 +293,7 @@ MacroAssemblerCodeRef logThunkGenerator(JSGlobalData* globalData) MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(1, globalData); + SpecializedThunkJIT jit(1); if (!jit.supportsFloatingPointAbs()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); MacroAssembler::Jump nonIntJump; @@ -313,7 +313,7 @@ MacroAssemblerCodeRef absThunkGenerator(JSGlobalData* globalData) MacroAssemblerCodeRef powThunkGenerator(JSGlobalData* globalData) { - SpecializedThunkJIT jit(2, globalData); + SpecializedThunkJIT jit(2); if (!jit.supportsFloatingPoint()) return MacroAssemblerCodeRef::createSelfManagedCodeRef(globalData->jitStubs->ctiNativeCall()); diff --git a/Source/JavaScriptCore/jsc.cpp b/Source/JavaScriptCore/jsc.cpp index f796333ca..43337e4ee 100644 --- a/Source/JavaScriptCore/jsc.cpp +++ b/Source/JavaScriptCore/jsc.cpp @@ -117,11 +117,13 @@ struct CommandLine { CommandLine() : interactive(false) , dump(false) + , exitCode(false) { } bool interactive; bool dump; + bool exitCode; Vector<Script> scripts; Vector<UString> arguments; }; @@ -299,7 +301,7 @@ EncodedJSValue JSC_HOST_CALL functionJSCStack(ExecState* exec) EncodedJSValue JSC_HOST_CALL functionGC(ExecState* exec) { - JSLock lock(SilenceAssertionsOnly); + JSLockHolder lock(exec); exec->heap()->collectAllGarbage(); return JSValue::encode(jsUndefined()); } @@ -307,7 +309,7 @@ EncodedJSValue JSC_HOST_CALL functionGC(ExecState* exec) #ifndef NDEBUG EncodedJSValue JSC_HOST_CALL functionReleaseExecutableMemory(ExecState* exec) { - JSLock lock(SilenceAssertionsOnly); + JSLockHolder lock(exec); exec->globalData().releaseExecutableMemory(); return JSValue::encode(jsUndefined()); } @@ -611,6 +613,12 @@ static NO_RETURN void printUsageStatement(bool help = false) #if HAVE(SIGNAL_H) fprintf(stderr, " -s Installs signal handlers that exit on a crash (Unix platforms only)\n"); #endif + fprintf(stderr, " -x Output exit code before terminating\n"); + fprintf(stderr, "\n"); + fprintf(stderr, " --options Dumps all JSC VM options and exits\n"); + fprintf(stderr, " --dumpOptions Dumps all JSC VM options before continuing\n"); + fprintf(stderr, " --<jsc VM option>=<value> Sets the specified JSC VM option\n"); + fprintf(stderr, "\n"); exit(help ? EXIT_SUCCESS : EXIT_FAILURE); } @@ -618,6 +626,9 @@ static NO_RETURN void printUsageStatement(bool help = false) static void parseArguments(int argc, char** argv, CommandLine& options) { int i = 1; + bool needToDumpOptions = false; + bool needToExit = false; + for (; i < argc; ++i) { const char* arg = argv[i]; if (!strcmp(arg, "-f")) { @@ -649,12 +660,36 @@ static void parseArguments(int argc, char** argv, CommandLine& options) #endif continue; } + if (!strcmp(arg, "-x")) { + options.exitCode = true; + continue; + } if (!strcmp(arg, "--")) { ++i; break; } if (!strcmp(arg, "-h") || !strcmp(arg, "--help")) printUsageStatement(true); + + if (!strcmp(arg, "--options")) { + needToDumpOptions = true; + needToExit = true; + continue; + } + if (!strcmp(arg, "--dumpOptions")) { + needToDumpOptions = true; + continue; + } + + // See if the -- option is a JSC VM option. + // NOTE: At this point, we know that the arg starts with "--". Skip it. + if (JSC::Options::setOption(&arg[2])) { + // The arg was recognized as a VM option and has been parsed. + continue; // Just continue with the next arg. + } + + // This arg is not recognized by the VM nor by jsc. Pass it on to the + // script. options.scripts.append(Script(true, argv[i])); } @@ -663,13 +698,18 @@ static void parseArguments(int argc, char** argv, CommandLine& options) for (; i < argc; ++i) options.arguments.append(argv[i]); + + if (needToDumpOptions) + JSC::Options::dumpAllOptions(stderr); + if (needToExit) + exit(EXIT_SUCCESS); } int jscmain(int argc, char** argv) { - JSLock lock(SilenceAssertionsOnly); - RefPtr<JSGlobalData> globalData = JSGlobalData::create(ThreadStackTypeLarge, LargeHeap); + JSLockHolder lock(globalData.get()); + int result; CommandLine options; parseArguments(argc, argv, options); @@ -679,7 +719,12 @@ int jscmain(int argc, char** argv) if (options.interactive && success) runInteractive(globalObject); - return success ? 0 : 3; + result = success ? 0 : 3; + + if (options.exitCode) + printf("jsc exiting %d\n", result); + + return result; } static bool fillBufferWithContentsOfFile(const UString& fileName, Vector<char>& buffer) diff --git a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp index a7698be37..fbc0146b8 100644 --- a/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp +++ b/Source/JavaScriptCore/llint/LLIntSlowPaths.cpp @@ -895,7 +895,13 @@ LLINT_SLOW_PATH_DECL(slow_path_get_by_id) && !structure->typeInfo().prohibitsPropertyCaching()) { pc[4].u.structure.set( globalData, codeBlock->ownerExecutable(), structure); - pc[5].u.operand = slot.cachedOffset() * sizeof(JSValue); + if (isInlineOffset(slot.cachedOffset())) { + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_get_by_id); + pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); + } else { + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_get_by_id_out_of_line); + pc[5].u.operand = offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue); + } } } @@ -940,7 +946,7 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) && baseCell == slot.base()) { if (slot.type() == PutPropertySlot::NewProperty) { - if (!structure->isDictionary() && structure->previousID()->propertyStorageCapacity() == structure->propertyStorageCapacity()) { + if (!structure->isDictionary() && structure->previousID()->outOfLineCapacity() == structure->outOfLineCapacity()) { ASSERT(structure->previousID()->transitionWatchpointSetHasBeenInvalidated()); // This is needed because some of the methods we call @@ -952,7 +958,10 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) ASSERT(structure->previousID()->isObject()); pc[4].u.structure.set( globalData, codeBlock->ownerExecutable(), structure->previousID()); - pc[5].u.operand = slot.cachedOffset() * sizeof(JSValue); + if (isInlineOffset(slot.cachedOffset())) + pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); + else + pc[5].u.operand = offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue); pc[6].u.structure.set( globalData, codeBlock->ownerExecutable(), structure); StructureChain* chain = structure->prototypeChain(exec); @@ -960,16 +969,28 @@ LLINT_SLOW_PATH_DECL(slow_path_put_by_id) pc[7].u.structureChain.set( globalData, codeBlock->ownerExecutable(), chain); - if (pc[8].u.operand) - pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_direct); - else - pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_normal); + if (pc[8].u.operand) { + if (isInlineOffset(slot.cachedOffset())) + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_direct); + else + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_direct_out_of_line); + } else { + if (isInlineOffset(slot.cachedOffset())) + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_normal); + else + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_transition_normal_out_of_line); + } } } else { - pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id); pc[4].u.structure.set( globalData, codeBlock->ownerExecutable(), structure); - pc[5].u.operand = slot.cachedOffset() * sizeof(JSValue); + if (isInlineOffset(slot.cachedOffset())) { + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id); + pc[5].u.operand = offsetInInlineStorage(slot.cachedOffset()) * sizeof(JSValue) + JSObject::offsetOfInlineStorage(); + } else { + pc[0].u.opcode = bitwise_cast<void*>(&llint_op_put_by_id_out_of_line); + pc[5].u.operand = offsetInOutOfLineStorage(slot.cachedOffset()) * sizeof(JSValue); + } } } } diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm index e59ddeba4..492535bb2 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter.asm @@ -84,6 +84,13 @@ const LLIntReturnPC = ArgumentCount + TagOffset # String flags. const HashFlags8BitBuffer = 64 +# Property storage constants +if JSVALUE64 + const InlineStorageCapacity = 4 +else + const InlineStorageCapacity = 6 +end + # Allocation constants if JSVALUE64 const JSFinalObjectSizeClassIndex = 1 @@ -312,8 +319,7 @@ macro allocateBasicJSObject(sizeClassIndex, classInfoOffset, structure, result, storep scratch2, [result] storep structure, JSCell::m_structure[result] storep 0, JSObject::m_inheritorID[result] - addp sizeof JSObject, result, scratch1 - storep scratch1, JSObject::m_propertyStorage[result] + storep 0, JSObject::m_outOfLineStorage[result] end end @@ -481,6 +487,21 @@ _llint_op_resolve_with_this: dispatch(5) +macro withInlineStorage(object, propertyStorage, continuation) + # Indicate that the object is the property storage, and that the + # property storage register is unused. + continuation(object, propertyStorage) +end + +macro withOutOfLineStorage(object, propertyStorage, continuation) + loadp JSObject::m_outOfLineStorage[object], propertyStorage + # Indicate that the propertyStorage register now points to the + # property storage, and that the object register may be reused + # if the object pointer is not needed anymore. + continuation(propertyStorage, object) +end + + _llint_op_del_by_id: traceExecution() callSlowPath(_llint_slow_path_del_by_id) diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm index d27fd8229..9d6304de7 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter32_64.asm @@ -919,6 +919,24 @@ _llint_op_is_string: dispatch(3) +macro loadPropertyAtVariableOffsetKnownNotFinal(propertyOffset, objectAndStorage, tag, payload) + assert(macro (ok) bigteq propertyOffset, InlineStorageCapacity, ok end) + loadp JSObject::m_outOfLineStorage[objectAndStorage], objectAndStorage + loadi TagOffset - 8 * InlineStorageCapacity[objectAndStorage, propertyOffset, 8], tag + loadi PayloadOffset - 8 * InlineStorageCapacity[objectAndStorage, propertyOffset, 8], payload +end + +macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, tag, payload) + bilt propertyOffset, InlineStorageCapacity, .isInline + loadp JSObject::m_outOfLineStorage[objectAndStorage], objectAndStorage + jmp .ready +.isInline: + addp JSFinalObject::m_inlineStorage + InlineStorageCapacity * 8, objectAndStorage +.ready: + loadi TagOffset - 8 * InlineStorageCapacity[objectAndStorage, propertyOffset, 8], tag + loadi PayloadOffset - 8 * InlineStorageCapacity[objectAndStorage, propertyOffset, 8], payload +end + macro resolveGlobal(size, slow) # Operands are as follows: # 4[PC] Destination for the load. @@ -930,9 +948,7 @@ macro resolveGlobal(size, slow) loadp JSCell::m_structure[t0], t1 bpneq t1, 12[PC], slow loadi 16[PC], t1 - loadp JSObject::m_propertyStorage[t0], t0 - loadi TagOffset[t0, t1, 8], t2 - loadi PayloadOffset[t0, t1, 8], t3 + loadPropertyAtVariableOffsetKnownNotFinal(t1, t0, t2, t3) loadi 4[PC], t0 storei t2, TagOffset[cfr, t0, 8] storei t3, PayloadOffset[cfr, t0, 8] @@ -1087,31 +1103,44 @@ _llint_op_put_global_var_check: dispatch(5) -_llint_op_get_by_id: +# We only do monomorphic get_by_id caching for now, and we do not modify the +# opcode. We do, however, allow for the cache to change anytime if fails, since +# ping-ponging is free. At best we get lucky and the get_by_id will continue +# to take fast path on the new cache. At worst we take slow path, which is what +# we would have been doing anyway. + +macro getById(getPropertyStorage) traceExecution() - # We only do monomorphic get_by_id caching for now, and we do not modify the - # opcode. We do, however, allow for the cache to change anytime if fails, since - # ping-ponging is free. At best we get lucky and the get_by_id will continue - # to take fast path on the new cache. At worst we take slow path, which is what - # we would have been doing anyway. loadi 8[PC], t0 loadi 16[PC], t1 loadConstantOrVariablePayload(t0, CellTag, t3, .opGetByIdSlow) loadi 20[PC], t2 - loadp JSObject::m_propertyStorage[t3], t0 - bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow - loadi 4[PC], t1 - loadi TagOffset[t0, t2], t3 - loadi PayloadOffset[t0, t2], t2 - storei t3, TagOffset[cfr, t1, 8] - storei t2, PayloadOffset[cfr, t1, 8] - loadi 32[PC], t1 - valueProfile(t3, t2, t1) - dispatch(9) + getPropertyStorage( + t3, + t0, + macro (propertyStorage, scratch) + bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow + loadi 4[PC], t1 + loadi TagOffset[propertyStorage, t2], scratch + loadi PayloadOffset[propertyStorage, t2], t2 + storei scratch, TagOffset[cfr, t1, 8] + storei t2, PayloadOffset[cfr, t1, 8] + loadi 32[PC], t1 + valueProfile(scratch, t2, t1) + dispatch(9) + end) + + .opGetByIdSlow: + callSlowPath(_llint_slow_path_get_by_id) + dispatch(9) +end + +_llint_op_get_by_id: + getById(withInlineStorage) -.opGetByIdSlow: - callSlowPath(_llint_slow_path_get_by_id) - dispatch(9) + +_llint_op_get_by_id_out_of_line: + getById(withOutOfLineStorage) _llint_op_get_arguments_length: @@ -1130,68 +1159,96 @@ _llint_op_get_arguments_length: dispatch(4) -_llint_op_put_by_id: +macro putById(getPropertyStorage) traceExecution() loadi 4[PC], t3 loadi 16[PC], t1 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow) loadi 12[PC], t2 - loadp JSObject::m_propertyStorage[t0], t3 - bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - loadi 20[PC], t1 - loadConstantOrVariable2Reg(t2, t0, t2) - writeBarrier(t0, t2) - storei t0, TagOffset[t3, t1] - storei t2, PayloadOffset[t3, t1] - dispatch(9) + getPropertyStorage( + t0, + t3, + macro (propertyStorage, scratch) + bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow + loadi 20[PC], t1 + loadConstantOrVariable2Reg(t2, scratch, t2) + writeBarrier(scratch, t2) + storei scratch, TagOffset[propertyStorage, t1] + storei t2, PayloadOffset[propertyStorage, t1] + dispatch(9) + end) +end + +_llint_op_put_by_id: + putById(withInlineStorage) .opPutByIdSlow: callSlowPath(_llint_slow_path_put_by_id) dispatch(9) -macro putByIdTransition(additionalChecks) +_llint_op_put_by_id_out_of_line: + putById(withOutOfLineStorage) + + +macro putByIdTransition(additionalChecks, getPropertyStorage) traceExecution() loadi 4[PC], t3 loadi 16[PC], t1 loadConstantOrVariablePayload(t3, CellTag, t0, .opPutByIdSlow) loadi 12[PC], t2 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - additionalChecks(t1, t3, .opPutByIdSlow) + additionalChecks(t1, t3) loadi 20[PC], t1 - loadp JSObject::m_propertyStorage[t0], t3 - addp t1, t3 - loadConstantOrVariable2Reg(t2, t1, t2) - writeBarrier(t1, t2) - storei t1, TagOffset[t3] - loadi 24[PC], t1 - storei t2, PayloadOffset[t3] - storep t1, JSCell::m_structure[t0] - dispatch(9) + getPropertyStorage( + t0, + t3, + macro (propertyStorage, scratch) + addp t1, propertyStorage, t3 + loadConstantOrVariable2Reg(t2, t1, t2) + writeBarrier(t1, t2) + storei t1, TagOffset[t3] + loadi 24[PC], t1 + storei t2, PayloadOffset[t3] + storep t1, JSCell::m_structure[t0] + dispatch(9) + end) +end + +macro noAdditionalChecks(oldStructure, scratch) +end + +macro structureChainChecks(oldStructure, scratch) + const protoCell = oldStructure # Reusing the oldStructure register for the proto + + loadp 28[PC], scratch + assert(macro (ok) btpnz scratch, ok end) + loadp StructureChain::m_vector[scratch], scratch + assert(macro (ok) btpnz scratch, ok end) + bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done +.loop: + loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell + loadp JSCell::m_structure[protoCell], oldStructure + bpneq oldStructure, [scratch], .opPutByIdSlow + addp 4, scratch + bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop +.done: end _llint_op_put_by_id_transition_direct: - putByIdTransition(macro (oldStructure, scratch, slow) end) + putByIdTransition(noAdditionalChecks, withInlineStorage) + + +_llint_op_put_by_id_transition_direct_out_of_line: + putByIdTransition(noAdditionalChecks, withOutOfLineStorage) _llint_op_put_by_id_transition_normal: - putByIdTransition( - macro (oldStructure, scratch, slow) - const protoCell = oldStructure # Reusing the oldStructure register for the proto - - loadp 28[PC], scratch - assert(macro (ok) btpnz scratch, ok end) - loadp StructureChain::m_vector[scratch], scratch - assert(macro (ok) btpnz scratch, ok end) - bieq Structure::m_prototype + TagOffset[oldStructure], NullTag, .done - .loop: - loadi Structure::m_prototype + PayloadOffset[oldStructure], protoCell - loadp JSCell::m_structure[protoCell], oldStructure - bpneq oldStructure, [scratch], slow - addp 4, scratch - bineq Structure::m_prototype + TagOffset[oldStructure], NullTag, .loop - .done: - end) + putByIdTransition(structureChainChecks, withInlineStorage) + + +_llint_op_put_by_id_transition_normal_out_of_line: + putByIdTransition(structureChainChecks, withOutOfLineStorage) _llint_op_get_by_val: @@ -1261,9 +1318,8 @@ _llint_op_get_by_pname: loadi [cfr, t0, 8], t0 subi 1, t0 biaeq t0, JSPropertyNameIterator::m_numCacheableSlots[t3], .opGetByPnameSlow - loadp JSObject::m_propertyStorage[t2], t2 - loadi TagOffset[t2, t0, 8], t1 - loadi PayloadOffset[t2, t0, 8], t3 + addi JSPropertyNameIterator::m_offsetBase[t3], t0 + loadPropertyAtVariableOffset(t0, t2, t1, t3) loadi 4[PC], t0 storei t1, TagOffset[cfr, t0, 8] storei t3, PayloadOffset[cfr, t0, 8] diff --git a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm index a153586f4..a7a2ce88f 100644 --- a/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm +++ b/Source/JavaScriptCore/llint/LowLevelInterpreter64.asm @@ -778,6 +778,22 @@ _llint_op_is_string: dispatch(3) +macro loadPropertyAtVariableOffsetKnownNotFinal(propertyOffset, objectAndStorage, value) + assert(macro (ok) bigteq propertyOffset, InlineStorageCapacity, ok end) + loadp JSObject::m_outOfLineStorage[objectAndStorage], objectAndStorage + loadp -8 * InlineStorageCapacity[objectAndStorage, propertyOffset, 8], value +end + +macro loadPropertyAtVariableOffset(propertyOffset, objectAndStorage, value) + bilt propertyOffset, InlineStorageCapacity, .isInline + loadp JSObject::m_outOfLineStorage[objectAndStorage], objectAndStorage + jmp .ready +.isInline: + addp JSFinalObject::m_inlineStorage + InlineStorageCapacity * 8, objectAndStorage +.ready: + loadp -8 * InlineStorageCapacity[objectAndStorage, propertyOffset, 8], value +end + macro resolveGlobal(size, slow) # Operands are as follows: # 8[PB, PC, 8] Destination for the load. @@ -789,8 +805,7 @@ macro resolveGlobal(size, slow) loadp JSCell::m_structure[t0], t1 bpneq t1, 24[PB, PC, 8], slow loadis 32[PB, PC, 8], t1 - loadp JSObject::m_propertyStorage[t0], t0 - loadp [t0, t1, 8], t2 + loadPropertyAtVariableOffset(t1, t0, t2) loadis 8[PB, PC, 8], t0 storep t2, [cfr, t0, 8] loadp (size - 1) * 8[PB, PC, 8], t0 @@ -937,7 +952,7 @@ _llint_op_put_global_var_check: dispatch(5) -_llint_op_get_by_id: +macro getById(getPropertyStorage) traceExecution() # We only do monomorphic get_by_id caching for now, and we do not modify the # opcode. We do, however, allow for the cache to change anytime if fails, since @@ -948,18 +963,30 @@ _llint_op_get_by_id: loadp 32[PB, PC, 8], t1 loadConstantOrVariableCell(t0, t3, .opGetByIdSlow) loadis 40[PB, PC, 8], t2 - loadp JSObject::m_propertyStorage[t3], t0 - bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow - loadis 8[PB, PC, 8], t1 - loadp [t0, t2], t3 - storep t3, [cfr, t1, 8] - loadp 64[PB, PC, 8], t1 - valueProfile(t3, t1) - dispatch(9) + getPropertyStorage( + t3, + t0, + macro (propertyStorage, scratch) + bpneq JSCell::m_structure[t3], t1, .opGetByIdSlow + loadis 8[PB, PC, 8], t1 + loadp [propertyStorage, t2], scratch + storep scratch, [cfr, t1, 8] + loadp 64[PB, PC, 8], t1 + valueProfile(scratch, t1) + dispatch(9) + end) + + .opGetByIdSlow: + callSlowPath(_llint_slow_path_get_by_id) + dispatch(9) +end + +_llint_op_get_by_id: + getById(withInlineStorage) -.opGetByIdSlow: - callSlowPath(_llint_slow_path_get_by_id) - dispatch(9) + +_llint_op_get_by_id_out_of_line: + getById(withOutOfLineStorage) _llint_op_get_arguments_length: @@ -978,65 +1005,93 @@ _llint_op_get_arguments_length: dispatch(4) -_llint_op_put_by_id: +macro putById(getPropertyStorage) traceExecution() loadis 8[PB, PC, 8], t3 loadp 32[PB, PC, 8], t1 loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) loadis 24[PB, PC, 8], t2 - loadp JSObject::m_propertyStorage[t0], t3 - bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - loadis 40[PB, PC, 8], t1 - loadConstantOrVariable(t2, t0) - writeBarrier(t0) - storep t0, [t3, t1] - dispatch(9) + getPropertyStorage( + t0, + t3, + macro (propertyStorage, scratch) + bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow + loadis 40[PB, PC, 8], t1 + loadConstantOrVariable(t2, scratch) + writeBarrier(t0) + storep scratch, [propertyStorage, t1] + dispatch(9) + end) +end + +_llint_op_put_by_id: + putById(withInlineStorage) .opPutByIdSlow: callSlowPath(_llint_slow_path_put_by_id) dispatch(9) -macro putByIdTransition(additionalChecks) +_llint_op_put_by_id_out_of_line: + putById(withOutOfLineStorage) + + +macro putByIdTransition(additionalChecks, getPropertyStorage) traceExecution() loadis 8[PB, PC, 8], t3 loadp 32[PB, PC, 8], t1 loadConstantOrVariableCell(t3, t0, .opPutByIdSlow) loadis 24[PB, PC, 8], t2 bpneq JSCell::m_structure[t0], t1, .opPutByIdSlow - additionalChecks(t1, t3, .opPutByIdSlow) + additionalChecks(t1, t3) loadis 40[PB, PC, 8], t1 - loadp JSObject::m_propertyStorage[t0], t3 - addp t1, t3 - loadConstantOrVariable(t2, t1) - writeBarrier(t1) - storep t1, [t3] - loadp 48[PB, PC, 8], t1 - storep t1, JSCell::m_structure[t0] - dispatch(9) + getPropertyStorage( + t0, + t3, + macro (propertyStorage, scratch) + addp t1, propertyStorage, t3 + loadConstantOrVariable(t2, t1) + writeBarrier(t1) + storep t1, [t3] + loadp 48[PB, PC, 8], t1 + storep t1, JSCell::m_structure[t0] + dispatch(9) + end) +end + +macro noAdditionalChecks(oldStructure, scratch) +end + +macro structureChainChecks(oldStructure, scratch) + const protoCell = oldStructure # Reusing the oldStructure register for the proto + loadp 56[PB, PC, 8], scratch + assert(macro (ok) btpnz scratch, ok end) + loadp StructureChain::m_vector[scratch], scratch + assert(macro (ok) btpnz scratch, ok end) + bpeq Structure::m_prototype[oldStructure], ValueNull, .done +.loop: + loadp Structure::m_prototype[oldStructure], protoCell + loadp JSCell::m_structure[protoCell], oldStructure + bpneq oldStructure, [scratch], .opPutByIdSlow + addp 8, scratch + bpneq Structure::m_prototype[oldStructure], ValueNull, .loop +.done: end _llint_op_put_by_id_transition_direct: - putByIdTransition(macro (oldStructure, scratch, slow) end) + putByIdTransition(noAdditionalChecks, withInlineStorage) + + +_llint_op_put_by_id_transition_direct_out_of_line: + putByIdTransition(noAdditionalChecks, withOutOfLineStorage) _llint_op_put_by_id_transition_normal: - putByIdTransition( - macro (oldStructure, scratch, slow) - const protoCell = oldStructure # Reusing the oldStructure register for the proto - loadp 56[PB, PC, 8], scratch - assert(macro (ok) btpnz scratch, ok end) - loadp StructureChain::m_vector[scratch], scratch - assert(macro (ok) btpnz scratch, ok end) - bpeq Structure::m_prototype[oldStructure], ValueNull, .done - .loop: - loadp Structure::m_prototype[oldStructure], protoCell - loadp JSCell::m_structure[protoCell], oldStructure - bpneq oldStructure, [scratch], slow - addp 8, scratch - bpneq Structure::m_prototype[oldStructure], ValueNull, .loop - .done: - end) + putByIdTransition(structureChainChecks, withInlineStorage) + + +_llint_op_put_by_id_transition_normal_out_of_line: + putByIdTransition(structureChainChecks, withOutOfLineStorage) _llint_op_get_by_val: @@ -1106,8 +1161,8 @@ _llint_op_get_by_pname: loadi PayloadOffset[cfr, t3, 8], t3 subi 1, t3 biaeq t3, JSPropertyNameIterator::m_numCacheableSlots[t1], .opGetByPnameSlow - loadp JSObject::m_propertyStorage[t0], t0 - loadp [t0, t3, 8], t0 + addi JSPropertyNameIterator::m_offsetBase[t1], t3 + loadPropertyAtVariableOffset(t3, t0, t0) loadis 8[PB, PC, 8], t1 storep t0, [cfr, t1, 8] dispatch(7) diff --git a/Source/JavaScriptCore/offlineasm/x86.rb b/Source/JavaScriptCore/offlineasm/x86.rb index e6a5c92ca..383526b25 100644 --- a/Source/JavaScriptCore/offlineasm/x86.rb +++ b/Source/JavaScriptCore/offlineasm/x86.rb @@ -555,7 +555,11 @@ class Instruction end def handleX86Add(kind) - if operands.size == 3 and operands[0].is_a? Immediate + if operands.size == 3 and operands[1] == operands[2] + unless Immediate.new(nil, 0) == operands[0] + $asm.puts "add#{x86Suffix(kind)} #{operands[0].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + end + elsif operands.size == 3 and operands[0].is_a? Immediate raise unless operands[1].is_a? RegisterID raise unless operands[2].is_a? RegisterID if operands[0].value == 0 @@ -568,7 +572,11 @@ class Instruction elsif operands.size == 3 and operands[0].is_a? RegisterID raise unless operands[1].is_a? RegisterID raise unless operands[2].is_a? RegisterID - $asm.puts "lea#{x86Suffix(kind)} (#{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}" + if operands[0] == operands[2] + $asm.puts "add#{x86Suffix(kind)} #{operands[1].x86Operand(kind)}, #{operands[2].x86Operand(kind)}" + else + $asm.puts "lea#{x86Suffix(kind)} (#{operands[0].x86Operand(kind)}, #{operands[1].x86Operand(kind)}), #{operands[2].x86Operand(kind)}" + end else unless Immediate.new(nil, 0) == operands[0] $asm.puts "add#{x86Suffix(kind)} #{x86Operands(kind, kind)}" diff --git a/Source/JavaScriptCore/parser/ASTBuilder.h b/Source/JavaScriptCore/parser/ASTBuilder.h index 0eb60cf89..d4c170617 100644 --- a/Source/JavaScriptCore/parser/ASTBuilder.h +++ b/Source/JavaScriptCore/parser/ASTBuilder.h @@ -247,11 +247,11 @@ public: return new (m_globalData) ConditionalNode(lineNumber, condition, lhs, rhs); } - ExpressionNode* createAssignResolve(int lineNumber, const Identifier& ident, ExpressionNode* rhs, bool rhsHasAssignment, int start, int divot, int end) + ExpressionNode* createAssignResolve(int lineNumber, const Identifier& ident, ExpressionNode* rhs, int start, int divot, int end) { if (rhs->isFuncExprNode()) static_cast<FuncExprNode*>(rhs)->body()->setInferredName(ident); - AssignResolveNode* node = new (m_globalData) AssignResolveNode(lineNumber, ident, rhs, rhsHasAssignment); + AssignResolveNode* node = new (m_globalData) AssignResolveNode(lineNumber, ident, rhs); setExceptionLocation(node, start, divot, end); return node; } @@ -347,9 +347,9 @@ public: return result; } - StatementNode* createForLoop(int lineNumber, ExpressionNode* initializer, ExpressionNode* condition, ExpressionNode* iter, StatementNode* statements, bool b, int start, int end) + StatementNode* createForLoop(int lineNumber, ExpressionNode* initializer, ExpressionNode* condition, ExpressionNode* iter, StatementNode* statements, int start, int end) { - ForNode* result = new (m_globalData) ForNode(lineNumber, initializer, condition, iter, statements, b); + ForNode* result = new (m_globalData) ForNode(lineNumber, initializer, condition, iter, statements); result->setLoc(start, end); return result; } @@ -364,7 +364,7 @@ public: StatementNode* createForInLoop(int lineNumber, ExpressionNode* lhs, ExpressionNode* iter, StatementNode* statements, int eStart, int eDivot, int eEnd, int start, int end) { - ForInNode* result = new (m_globalData) ForInNode(m_globalData, lineNumber, lhs, iter, statements); + ForInNode* result = new (m_globalData) ForInNode(lineNumber, lhs, iter, statements); result->setLoc(start, end); setExceptionLocation(result, eStart, eDivot, eEnd); return result; @@ -907,14 +907,14 @@ ExpressionNode* ASTBuilder::makeBinaryNode(int lineNumber, int token, pair<Expre ExpressionNode* ASTBuilder::makeAssignNode(int lineNumber, ExpressionNode* loc, Operator op, ExpressionNode* expr, bool locHasAssignments, bool exprHasAssignments, int start, int divot, int end) { if (!loc->isLocation()) - return new (m_globalData) AssignErrorNode(lineNumber, loc, op, expr, divot, divot - start, end - divot); + return new (m_globalData) AssignErrorNode(lineNumber, divot, divot - start, end - divot); if (loc->isResolveNode()) { ResolveNode* resolve = static_cast<ResolveNode*>(loc); if (op == OpEqual) { if (expr->isFuncExprNode()) static_cast<FuncExprNode*>(expr)->body()->setInferredName(resolve->identifier()); - AssignResolveNode* node = new (m_globalData) AssignResolveNode(lineNumber, resolve->identifier(), expr, exprHasAssignments); + AssignResolveNode* node = new (m_globalData) AssignResolveNode(lineNumber, resolve->identifier(), expr); setExceptionLocation(node, start, divot, end); return node; } @@ -944,7 +944,7 @@ ExpressionNode* ASTBuilder::makeAssignNode(int lineNumber, ExpressionNode* loc, ExpressionNode* ASTBuilder::makePrefixNode(int lineNumber, ExpressionNode* expr, Operator op, int start, int divot, int end) { if (!expr->isLocation()) - return new (m_globalData) PrefixErrorNode(lineNumber, expr, op, divot, divot - start, end - divot); + return new (m_globalData) PrefixErrorNode(lineNumber, op, divot, divot - start, end - divot); if (expr->isResolveNode()) { ResolveNode* resolve = static_cast<ResolveNode*>(expr); @@ -966,7 +966,7 @@ ExpressionNode* ASTBuilder::makePrefixNode(int lineNumber, ExpressionNode* expr, ExpressionNode* ASTBuilder::makePostfixNode(int lineNumber, ExpressionNode* expr, Operator op, int start, int divot, int end) { if (!expr->isLocation()) - return new (m_globalData) PostfixErrorNode(lineNumber, expr, op, divot, divot - start, end - divot); + return new (m_globalData) PostfixErrorNode(lineNumber, op, divot, divot - start, end - divot); if (expr->isResolveNode()) { ResolveNode* resolve = static_cast<ResolveNode*>(expr); diff --git a/Source/JavaScriptCore/parser/NodeConstructors.h b/Source/JavaScriptCore/parser/NodeConstructors.h index e496d2342..be50eeafe 100644 --- a/Source/JavaScriptCore/parser/NodeConstructors.h +++ b/Source/JavaScriptCore/parser/NodeConstructors.h @@ -317,10 +317,9 @@ namespace JSC { { } - inline PostfixErrorNode::PostfixErrorNode(int lineNumber, ExpressionNode* expr, Operator oper, unsigned divot, unsigned startOffset, unsigned endOffset) + inline PostfixErrorNode::PostfixErrorNode(int lineNumber, Operator oper, unsigned divot, unsigned startOffset, unsigned endOffset) : ExpressionNode(lineNumber) , ThrowableSubExpressionData(divot, startOffset, endOffset) - , m_expr(expr) , m_operator(oper) { } @@ -396,10 +395,9 @@ namespace JSC { { } - inline PrefixErrorNode::PrefixErrorNode(int lineNumber, ExpressionNode* expr, Operator oper, unsigned divot, unsigned startOffset, unsigned endOffset) + inline PrefixErrorNode::PrefixErrorNode(int lineNumber, Operator oper, unsigned divot, unsigned startOffset, unsigned endOffset) : ExpressionNode(lineNumber) , ThrowableExpressionData(divot, startOffset, endOffset) - , m_expr(expr) , m_operator(oper) { } @@ -592,11 +590,10 @@ namespace JSC { { } - inline AssignResolveNode::AssignResolveNode(int lineNumber, const Identifier& ident, ExpressionNode* right, bool rightHasAssignments) + inline AssignResolveNode::AssignResolveNode(int lineNumber, const Identifier& ident, ExpressionNode* right) : ExpressionNode(lineNumber) , m_ident(ident) , m_right(right) - , m_rightHasAssignments(rightHasAssignments) { } @@ -644,12 +641,9 @@ namespace JSC { { } - inline AssignErrorNode::AssignErrorNode(int lineNumber, ExpressionNode* left, Operator oper, ExpressionNode* right, unsigned divot, unsigned startOffset, unsigned endOffset) + inline AssignErrorNode::AssignErrorNode(int lineNumber, unsigned divot, unsigned startOffset, unsigned endOffset) : ExpressionNode(lineNumber) , ThrowableExpressionData(divot, startOffset, endOffset) - , m_left(left) - , m_operator(oper) - , m_right(right) { } @@ -719,13 +713,12 @@ namespace JSC { { } - inline ForNode::ForNode(int lineNumber, ExpressionNode* expr1, ExpressionNode* expr2, ExpressionNode* expr3, StatementNode* statement, bool expr1WasVarDecl) + inline ForNode::ForNode(int lineNumber, ExpressionNode* expr1, ExpressionNode* expr2, ExpressionNode* expr3, StatementNode* statement) : StatementNode(lineNumber) , m_expr1(expr1) , m_expr2(expr2) , m_expr3(expr3) , m_statement(statement) - , m_expr1WasVarDecl(expr1 && expr1WasVarDecl) { ASSERT(statement); } @@ -865,9 +858,8 @@ namespace JSC { { } - inline ForInNode::ForInNode(JSGlobalData* globalData, int lineNumber, ExpressionNode* l, ExpressionNode* expr, StatementNode* statement) + inline ForInNode::ForInNode(int lineNumber, ExpressionNode* l, ExpressionNode* expr, StatementNode* statement) : StatementNode(lineNumber) - , m_ident(globalData->propertyNames->nullIdentifier) , m_init(0) , m_lexpr(l) , m_expr(expr) @@ -878,7 +870,6 @@ namespace JSC { inline ForInNode::ForInNode(JSGlobalData* globalData, int lineNumber, const Identifier& ident, ExpressionNode* in, ExpressionNode* expr, StatementNode* statement, int divot, int startOffset, int endOffset) : StatementNode(lineNumber) - , m_ident(ident) , m_init(0) , m_lexpr(new (globalData) ResolveNode(lineNumber, ident, divot - startOffset)) , m_expr(expr) @@ -886,7 +877,7 @@ namespace JSC { , m_identIsVarDecl(true) { if (in) { - AssignResolveNode* node = new (globalData) AssignResolveNode(lineNumber, ident, in, true); + AssignResolveNode* node = new (globalData) AssignResolveNode(lineNumber, ident, in); node->setExceptionSourceCode(divot, divot - startOffset, endOffset - divot); m_init = node; } diff --git a/Source/JavaScriptCore/parser/Nodes.h b/Source/JavaScriptCore/parser/Nodes.h index 5c90bb9fe..5b15be44c 100644 --- a/Source/JavaScriptCore/parser/Nodes.h +++ b/Source/JavaScriptCore/parser/Nodes.h @@ -546,8 +546,6 @@ namespace JSC { const Identifier& m_ident; ArgumentsNode* m_args; - size_t m_index; // Used by LocalVarFunctionCallNode. - size_t m_scopeDepth; // Used by ScopedVarFunctionCallNode and NonLocalVarFunctionCallNode }; class FunctionCallBracketNode : public ExpressionNode, public ThrowableSubExpressionData { @@ -635,12 +633,11 @@ namespace JSC { class PostfixErrorNode : public ExpressionNode, public ThrowableSubExpressionData { public: - PostfixErrorNode(int, ExpressionNode*, Operator, unsigned divot, unsigned startOffset, unsigned endOffset); + PostfixErrorNode(int, Operator, unsigned divot, unsigned startOffset, unsigned endOffset); private: virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0); - ExpressionNode* m_expr; Operator m_operator; }; @@ -754,12 +751,11 @@ namespace JSC { class PrefixErrorNode : public ExpressionNode, public ThrowableExpressionData { public: - PrefixErrorNode(int, ExpressionNode*, Operator, unsigned divot, unsigned startOffset, unsigned endOffset); + PrefixErrorNode(int, Operator, unsigned divot, unsigned startOffset, unsigned endOffset); private: virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0); - ExpressionNode* m_expr; Operator m_operator; }; @@ -1004,22 +1000,19 @@ namespace JSC { const Identifier& m_ident; ExpressionNode* m_right; - size_t m_index; // Used by ReadModifyLocalVarNode. Operator m_operator; bool m_rightHasAssignments; }; class AssignResolveNode : public ExpressionNode, public ThrowableExpressionData { public: - AssignResolveNode(int, const Identifier&, ExpressionNode* right, bool rightHasAssignments); + AssignResolveNode(int, const Identifier&, ExpressionNode* right); private: virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0); const Identifier& m_ident; ExpressionNode* m_right; - size_t m_index; // Used by ReadModifyLocalVarNode. - bool m_rightHasAssignments; }; class ReadModifyBracketNode : public ExpressionNode, public ThrowableSubExpressionData { @@ -1080,14 +1073,10 @@ namespace JSC { class AssignErrorNode : public ExpressionNode, public ThrowableExpressionData { public: - AssignErrorNode(int, ExpressionNode* left, Operator, ExpressionNode* right, unsigned divot, unsigned startOffset, unsigned endOffset); + AssignErrorNode(int, unsigned divot, unsigned startOffset, unsigned endOffset); private: virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0); - - ExpressionNode* m_left; - Operator m_operator; - ExpressionNode* m_right; }; typedef Vector<ExpressionNode*, 8> ExpressionVector; @@ -1254,7 +1243,7 @@ namespace JSC { class ForNode : public StatementNode { public: - ForNode(int, ExpressionNode* expr1, ExpressionNode* expr2, ExpressionNode* expr3, StatementNode*, bool expr1WasVarDecl); + ForNode(int, ExpressionNode* expr1, ExpressionNode* expr2, ExpressionNode* expr3, StatementNode*); private: virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0); @@ -1263,18 +1252,16 @@ namespace JSC { ExpressionNode* m_expr2; ExpressionNode* m_expr3; StatementNode* m_statement; - bool m_expr1WasVarDecl; }; class ForInNode : public StatementNode, public ThrowableExpressionData { public: - ForInNode(JSGlobalData*, int, ExpressionNode*, ExpressionNode*, StatementNode*); + ForInNode(int, ExpressionNode*, ExpressionNode*, StatementNode*); ForInNode(JSGlobalData*, int, const Identifier&, ExpressionNode*, ExpressionNode*, StatementNode*, int divot, int startOffset, int endOffset); private: virtual RegisterID* emitBytecode(BytecodeGenerator&, RegisterID* = 0); - const Identifier& m_ident; ExpressionNode* m_init; ExpressionNode* m_lexpr; ExpressionNode* m_expr; diff --git a/Source/JavaScriptCore/parser/Parser.cpp b/Source/JavaScriptCore/parser/Parser.cpp index d88a9a8b7..558f50e5f 100644 --- a/Source/JavaScriptCore/parser/Parser.cpp +++ b/Source/JavaScriptCore/parser/Parser.cpp @@ -260,13 +260,12 @@ template <class TreeBuilder> TreeExpression Parser<LexerType>::parseVarDeclarati int varDivot = tokenStart() + 1; initStart = tokenStart(); next(TreeBuilder::DontBuildStrings); // consume '=' - int initialAssignments = m_assignmentCount; TreeExpression initializer = parseAssignmentExpression(context); initEnd = lastTokenEnd(); lastInitializer = initializer; failIfFalse(initializer); - TreeExpression node = context.createAssignResolve(m_lexer->lastLineNumber(), *name, initializer, initialAssignments != m_assignmentCount, varStart, varDivot, lastTokenEnd()); + TreeExpression node = context.createAssignResolve(m_lexer->lastLineNumber(), *name, initializer, varStart, varDivot, lastTokenEnd()); if (!varDecls) varDecls = node; else @@ -314,14 +313,12 @@ template <class TreeBuilder> TreeStatement Parser<LexerType>::parseForStatement( int declsStart = 0; int declsEnd = 0; TreeExpression decls = 0; - bool hasDeclaration = false; if (match(VAR)) { /* for (var IDENT in expression) statement for (var IDENT = expression in expression) statement for (var varDeclarationList; expressionOpt; expressionOpt) */ - hasDeclaration = true; const Identifier* forInTarget = 0; TreeExpression forInInitializer = 0; m_allowsIn = false; @@ -391,7 +388,7 @@ template <class TreeBuilder> TreeStatement Parser<LexerType>::parseForStatement( TreeStatement statement = parseStatement(context, unused); endLoop(); failIfFalse(statement); - return context.createForLoop(m_lexer->lastLineNumber(), decls, condition, increment, statement, hasDeclaration, startLine, endLine); + return context.createForLoop(m_lexer->lastLineNumber(), decls, condition, increment, statement, startLine, endLine); } // For-in loop diff --git a/Source/JavaScriptCore/parser/SyntaxChecker.h b/Source/JavaScriptCore/parser/SyntaxChecker.h index c2c93756d..fe3ce714f 100644 --- a/Source/JavaScriptCore/parser/SyntaxChecker.h +++ b/Source/JavaScriptCore/parser/SyntaxChecker.h @@ -148,7 +148,7 @@ public: ExpressionType createNewExpr(int, ExpressionType, int, int, int, int) { return NewExpr; } ExpressionType createNewExpr(int, ExpressionType, int, int) { return NewExpr; } ExpressionType createConditionalExpr(int, ExpressionType, ExpressionType, ExpressionType) { return ConditionalExpr; } - ExpressionType createAssignResolve(int, const Identifier&, ExpressionType, bool, int, int, int) { return AssignmentExpr; } + ExpressionType createAssignResolve(int, const Identifier&, ExpressionType, int, int, int) { return AssignmentExpr; } ExpressionType createFunctionExpr(int, const Identifier*, int, int, int, int, int, int) { return FunctionExpr; } int createFunctionBody(int, bool) { return 1; } int createArguments() { return 1; } @@ -183,7 +183,7 @@ public: int createExprStatement(int, int, int, int) { return 1; } int createIfStatement(int, int, int, int, int) { return 1; } int createIfStatement(int, int, int, int, int, int) { return 1; } - int createForLoop(int, int, int, int, int, bool, int, int) { return 1; } + int createForLoop(int, int, int, int, int, int, int) { return 1; } int createForInLoop(int, const Identifier*, int, int, int, int, int, int, int, int, int, int) { return 1; } int createForInLoop(int, int, int, int, int, int, int, int, int) { return 1; } int createEmptyStatement(int) { return 1; } diff --git a/Source/JavaScriptCore/runtime/Completion.cpp b/Source/JavaScriptCore/runtime/Completion.cpp index 311d660a0..1c35b9626 100644 --- a/Source/JavaScriptCore/runtime/Completion.cpp +++ b/Source/JavaScriptCore/runtime/Completion.cpp @@ -37,7 +37,7 @@ namespace JSC { bool checkSyntax(ExecState* exec, const SourceCode& source, JSValue* returnedException) { - JSLock lock(exec); + JSLockHolder lock(exec); ASSERT(exec->globalData().identifierTable == wtfThreadData().currentIdentifierTable()); ProgramExecutable* program = ProgramExecutable::create(exec, source); @@ -53,7 +53,7 @@ bool checkSyntax(ExecState* exec, const SourceCode& source, JSValue* returnedExc JSValue evaluate(ExecState* exec, ScopeChainNode* scopeChain, const SourceCode& source, JSValue thisValue, JSValue* returnedException) { - JSLock lock(exec); + JSLockHolder lock(exec); ASSERT(exec->globalData().identifierTable == wtfThreadData().currentIdentifierTable()); if (exec->globalData().isCollectorBusy()) CRASH(); diff --git a/Source/JavaScriptCore/runtime/GCActivityCallback.h b/Source/JavaScriptCore/runtime/GCActivityCallback.h index 18bbd31e0..67ee17420 100644 --- a/Source/JavaScriptCore/runtime/GCActivityCallback.h +++ b/Source/JavaScriptCore/runtime/GCActivityCallback.h @@ -69,7 +69,7 @@ protected: class DefaultGCActivityCallback : public GCActivityCallback { public: - static PassOwnPtr<DefaultGCActivityCallback> create(Heap*); + static DefaultGCActivityCallback* create(Heap*); DefaultGCActivityCallback(Heap*); @@ -91,9 +91,9 @@ private: #endif }; -inline PassOwnPtr<DefaultGCActivityCallback> DefaultGCActivityCallback::create(Heap* heap) +inline DefaultGCActivityCallback* DefaultGCActivityCallback::create(Heap* heap) { - return adoptPtr(new DefaultGCActivityCallback(heap)); + return new DefaultGCActivityCallback(heap); } } diff --git a/Source/JavaScriptCore/runtime/InitializeThreading.cpp b/Source/JavaScriptCore/runtime/InitializeThreading.cpp index 4c0e123a4..c7fbd332e 100644 --- a/Source/JavaScriptCore/runtime/InitializeThreading.cpp +++ b/Source/JavaScriptCore/runtime/InitializeThreading.cpp @@ -53,7 +53,7 @@ static void initializeThreadingOnce() { WTF::double_conversion::initialize(); WTF::initializeThreading(); - Options::initializeOptions(); + Options::initialize(); #if ENABLE(WRITE_BARRIER_PROFILING) WriteBarrierCounters::initialize(); #endif diff --git a/Source/JavaScriptCore/runtime/JSArray.cpp b/Source/JavaScriptCore/runtime/JSArray.cpp index 96cc44780..7218604d1 100644 --- a/Source/JavaScriptCore/runtime/JSArray.cpp +++ b/Source/JavaScriptCore/runtime/JSArray.cpp @@ -1258,7 +1258,10 @@ JSValue JSArray::pop(ExecState* exec) if (exec->hadException()) return jsUndefined(); // Call the [[Delete]] internal method of O with arguments indx and true. - deletePropertyByIndex(this, exec, index); + if (!deletePropertyByIndex(this, exec, index)) { + throwTypeError(exec, "Unable to delete property."); + return jsUndefined(); + } // Call the [[Put]] internal method of O with arguments "length", indx, and true. setLength(exec, index, true); // Return element. diff --git a/Source/JavaScriptCore/runtime/JSFunction.cpp b/Source/JavaScriptCore/runtime/JSFunction.cpp index f2d9c81e2..0a65deee2 100644 --- a/Source/JavaScriptCore/runtime/JSFunction.cpp +++ b/Source/JavaScriptCore/runtime/JSFunction.cpp @@ -103,13 +103,16 @@ void JSFunction::finishCreation(ExecState* exec, NativeExecutable* executable, i void JSFunction::finishCreation(ExecState* exec, FunctionExecutable* executable, ScopeChainNode* scopeChainNode) { - Base::finishCreation(exec->globalData()); + JSGlobalData& globalData = exec->globalData(); + Base::finishCreation(globalData); ASSERT(inherits(&s_info)); // Switching the structure here is only safe if we currently have the function structure! ASSERT(structure() == scopeChainNode->globalObject->functionStructure()); - setStructure(exec->globalData(), scopeChainNode->globalObject->namedFunctionStructure()); - putDirectOffset(exec->globalData(), scopeChainNode->globalObject->functionNameOffset(), executable->nameValue()); + setStructureAndReallocateStorageIfNecessary( + globalData, + scopeChainNode->globalObject->namedFunctionStructure()); + putDirectOffset(globalData, scopeChainNode->globalObject->functionNameOffset(), executable->nameValue()); } Structure* JSFunction::cacheInheritorID(ExecState* exec) diff --git a/Source/JavaScriptCore/runtime/JSGlobalData.cpp b/Source/JavaScriptCore/runtime/JSGlobalData.cpp index 1fb90df40..dd05005c7 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalData.cpp +++ b/Source/JavaScriptCore/runtime/JSGlobalData.cpp @@ -34,8 +34,10 @@ #include "CommonIdentifiers.h" #include "DebuggerActivation.h" #include "FunctionConstructor.h" +#include "GCActivityCallback.h" #include "GetterSetter.h" #include "HostCallReturnValue.h" +#include "IncrementalSweeper.h" #include "Interpreter.h" #include "JSActivation.h" #include "JSAPIValueWrapper.h" @@ -97,7 +99,7 @@ extern const HashTable stringConstructorTable; #if ENABLE(ASSEMBLER) && (ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT)) static bool enableAssembler(ExecutableAllocator& executableAllocator) { - if (!executableAllocator.isValid() || !Options::useJIT) + if (!executableAllocator.isValid() || !Options::useJIT()) return false; #if USE(CF) @@ -168,6 +170,7 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread #if CPU(X86) && ENABLE(JIT) , m_timeoutCount(512) #endif + , m_newStringsSinceLastHashConst(0) #if ENABLE(ASSEMBLER) && (ENABLE(CLASSIC_INTERPRETER) || ENABLE(LLINT)) , m_canUseAssembler(enableAssembler(executableAllocator)) #endif @@ -178,12 +181,9 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread { interpreter = new Interpreter; - if (isSharedInstance()) - turnOffVerifier(); - // Need to be careful to keep everything consistent here + JSLockHolder lock(this); IdentifierTable* existingEntryIdentifierTable = wtfThreadData().setCurrentIdentifierTable(identifierTable); - JSLock lock(SilenceAssertionsOnly); structureStructure.set(*this, Structure::createStructure(*this)); debuggerActivationStructure.set(*this, DebuggerActivation::createStructure(*this, 0, jsNull())); activationStructure.set(*this, JSActivation::createStructure(*this, 0, jsNull())); @@ -222,6 +222,9 @@ JSGlobalData::JSGlobalData(GlobalDataType globalDataType, ThreadStackType thread JSGlobalData::~JSGlobalData() { + ASSERT(!m_apiLock.currentThreadIsHoldingLock()); + heap.activityCallback()->didStartVMShutdown(); + heap.sweeper()->didStartVMShutdown(); heap.lastChanceToFinalize(); delete interpreter; @@ -311,6 +314,7 @@ bool JSGlobalData::sharedInstanceExists() JSGlobalData& JSGlobalData::sharedInstance() { + GlobalJSLock globalLock; JSGlobalData*& instance = sharedInstanceInternal(); if (!instance) { instance = adoptRef(new JSGlobalData(APIShared, ThreadStackTypeSmall, SmallHeap)).leakRef(); @@ -321,7 +325,6 @@ JSGlobalData& JSGlobalData::sharedInstance() JSGlobalData*& JSGlobalData::sharedInstanceInternal() { - ASSERT(JSLock::currentThreadIsHoldingLock()); static JSGlobalData* sharedInstance; return sharedInstance; } diff --git a/Source/JavaScriptCore/runtime/JSGlobalData.h b/Source/JavaScriptCore/runtime/JSGlobalData.h index f8833104a..90925778b 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalData.h +++ b/Source/JavaScriptCore/runtime/JSGlobalData.h @@ -35,6 +35,7 @@ #include "Heap.h" #include "Intrinsic.h" #include "JITStubs.h" +#include "JSLock.h" #include "JSValue.h" #include "LLIntData.h" #include "NumericStrings.h" @@ -46,8 +47,8 @@ #include <wtf/BumpPointerAllocator.h> #include <wtf/Forward.h> #include <wtf/HashMap.h> -#include <wtf/RefCounted.h> #include <wtf/SimpleStats.h> +#include <wtf/ThreadSafeRefCounted.h> #include <wtf/ThreadSpecific.h> #include <wtf/WTFThreadData.h> #if ENABLE(REGEXP_TRACING) @@ -152,7 +153,7 @@ namespace JSC { }; #endif - class JSGlobalData : public RefCounted<JSGlobalData> { + class JSGlobalData : public ThreadSafeRefCounted<JSGlobalData> { public: // WebCore has a one-to-one mapping of threads to JSGlobalDatas; // either create() or createLeaked() should only be called once @@ -180,6 +181,10 @@ namespace JSC { void makeUsableFromMultipleThreads() { heap.machineThreads().makeUsableFromMultipleThreads(); } + private: + JSLock m_apiLock; + + public: Heap heap; // The heap is our first data member to ensure that it's destructed after all the objects that reference it. GlobalDataType globalDataType; @@ -390,6 +395,13 @@ namespace JSC { unsigned m_timeoutCount; #endif + unsigned m_newStringsSinceLastHashConst; + + static const unsigned s_minNumberOfNewStringsToHashConst = 100; + + bool haveEnoughNewStringsToHashConst() { return m_newStringsSinceLastHashConst > s_minNumberOfNewStringsToHashConst; } + void resetNewStringsSinceLastHashConst() { m_newStringsSinceLastHashConst = 0; } + #define registerTypedArrayFunction(type, capitalizedType) \ void registerTypedArrayDescriptor(const capitalizedType##Array*, const TypedArrayDescriptor& descriptor) \ { \ @@ -409,6 +421,8 @@ namespace JSC { registerTypedArrayFunction(float64, Float64); #undef registerTypedArrayFunction + JSLock& apiLock() { return m_apiLock; } + private: friend class LLIntOffsetsExtractor; diff --git a/Source/JavaScriptCore/runtime/JSGlobalObject.cpp b/Source/JavaScriptCore/runtime/JSGlobalObject.cpp index d19db4fd8..0edc0a8a9 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalObject.cpp +++ b/Source/JavaScriptCore/runtime/JSGlobalObject.cpp @@ -115,7 +115,7 @@ template <typename T> static inline void visitIfNeeded(SlotVisitor& visitor, Wri JSGlobalObject::JSGlobalObject(JSGlobalData& globalData, Structure* structure, const GlobalObjectMethodTable* globalObjectMethodTable) : JSSegmentedVariableObject(globalData, structure, &m_symbolTable) , m_globalScopeChain() - , m_weakRandom(Options::forceWeakRandomSeed ? Options::forcedWeakRandomSeed : static_cast<unsigned>(randomNumber() * (std::numeric_limits<unsigned>::max() + 1.0))) + , m_weakRandom(Options::forceWeakRandomSeed() ? Options::forcedWeakRandomSeed() : static_cast<unsigned>(randomNumber() * (std::numeric_limits<unsigned>::max() + 1.0))) , m_evalEnabled(true) , m_globalObjectMethodTable(globalObjectMethodTable ? globalObjectMethodTable : &s_globalObjectMethodTable) { @@ -123,8 +123,6 @@ JSGlobalObject::JSGlobalObject(JSGlobalData& globalData, Structure* structure, c JSGlobalObject::~JSGlobalObject() { - ASSERT(JSLock::currentThreadIsHoldingLock()); - if (m_debugger) m_debugger->detach(this); @@ -139,7 +137,7 @@ void JSGlobalObject::destroy(JSCell* cell) void JSGlobalObject::init(JSObject* thisValue) { - ASSERT(JSLock::currentThreadIsHoldingLock()); + ASSERT(globalData().apiLock().currentThreadIsHoldingLock()); m_globalScopeChain.set(globalData(), this, ScopeChainNode::create(0, this, &globalData(), this, thisValue)); diff --git a/Source/JavaScriptCore/runtime/JSGlobalObject.h b/Source/JavaScriptCore/runtime/JSGlobalObject.h index 1dcfc63cc..af03f32e6 100644 --- a/Source/JavaScriptCore/runtime/JSGlobalObject.h +++ b/Source/JavaScriptCore/runtime/JSGlobalObject.h @@ -132,7 +132,7 @@ namespace JSC { WriteBarrier<Structure> m_functionStructure; WriteBarrier<Structure> m_boundFunctionStructure; WriteBarrier<Structure> m_namedFunctionStructure; - size_t m_functionNameOffset; + PropertyOffset m_functionNameOffset; WriteBarrier<Structure> m_numberObjectStructure; WriteBarrier<Structure> m_privateNameStructure; WriteBarrier<Structure> m_regExpMatchesArrayStructure; @@ -262,7 +262,7 @@ namespace JSC { Structure* functionStructure() const { return m_functionStructure.get(); } Structure* boundFunctionStructure() const { return m_boundFunctionStructure.get(); } Structure* namedFunctionStructure() const { return m_namedFunctionStructure.get(); } - size_t functionNameOffset() const { return m_functionNameOffset; } + PropertyOffset functionNameOffset() const { return m_functionNameOffset; } Structure* numberObjectStructure() const { return m_numberObjectStructure.get(); } Structure* privateNameStructure() const { return m_privateNameStructure.get(); } Structure* internalFunctionStructure() const { return m_internalFunctionStructure.get(); } diff --git a/Source/JavaScriptCore/runtime/JSLock.cpp b/Source/JavaScriptCore/runtime/JSLock.cpp index 90e2f5d2a..be30c0c9c 100644 --- a/Source/JavaScriptCore/runtime/JSLock.cpp +++ b/Source/JavaScriptCore/runtime/JSLock.cpp @@ -23,6 +23,7 @@ #include "Heap.h" #include "CallFrame.h" +#include "JSGlobalObject.h" #include "JSObject.h" #include "ScopeChain.h" @@ -37,95 +38,96 @@ namespace JSC { // So it's safe to disable it on non-mac platforms where we don't have native pthreads. #if (OS(DARWIN) || USE(PTHREADS)) -// Acquire this mutex before accessing lock-related data. -static pthread_mutex_t JSMutex = PTHREAD_MUTEX_INITIALIZER; +static pthread_mutex_t sharedInstanceLock = PTHREAD_MUTEX_INITIALIZER; -// Thread-specific key that tells whether a thread holds the JSMutex, and how many times it was taken recursively. -pthread_key_t JSLockCount; +GlobalJSLock::GlobalJSLock() +{ + pthread_mutex_lock(&sharedInstanceLock); +} -static void createJSLockCount() +GlobalJSLock::~GlobalJSLock() { - pthread_key_create(&JSLockCount, 0); + pthread_mutex_unlock(&sharedInstanceLock); } -pthread_once_t createJSLockCountOnce = PTHREAD_ONCE_INIT; +JSLockHolder::JSLockHolder(ExecState* exec) + : m_globalData(&exec->globalData()) +{ + m_globalData->apiLock().lock(); +} -// Lock nesting count. -intptr_t JSLock::lockCount() +JSLockHolder::JSLockHolder(JSGlobalData* globalData) + : m_globalData(globalData) { - pthread_once(&createJSLockCountOnce, createJSLockCount); + m_globalData->apiLock().lock(); +} - return reinterpret_cast<intptr_t>(pthread_getspecific(JSLockCount)); +JSLockHolder::JSLockHolder(JSGlobalData& globalData) + : m_globalData(&globalData) +{ + m_globalData->apiLock().lock(); } -static void setLockCount(intptr_t count) +JSLockHolder::~JSLockHolder() { - ASSERT(count >= 0); - pthread_setspecific(JSLockCount, reinterpret_cast<void*>(count)); + m_globalData->apiLock().unlock(); } -JSLock::JSLock(ExecState* exec) - : m_lockBehavior(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly) +JSLock::JSLock() + : m_lockCount(0) { - lock(m_lockBehavior); + m_spinLock.Init(); } -JSLock::JSLock(JSGlobalData* globalData) - : m_lockBehavior(globalData->isSharedInstance() ? LockForReal : SilenceAssertionsOnly) +JSLock::~JSLock() { - lock(m_lockBehavior); } -void JSLock::lock(JSLockBehavior lockBehavior) +void JSLock::lock() { -#ifdef NDEBUG - // Locking "not for real" is a debug-only feature. - if (lockBehavior == SilenceAssertionsOnly) - return; -#endif + ThreadIdentifier currentThread = WTF::currentThread(); + { + SpinLockHolder holder(&m_spinLock); + if (m_ownerThread == currentThread && m_lockCount) { + m_lockCount++; + return; + } + } - pthread_once(&createJSLockCountOnce, createJSLockCount); + m_lock.lock(); - intptr_t currentLockCount = lockCount(); - if (!currentLockCount && lockBehavior == LockForReal) { - int result = pthread_mutex_lock(&JSMutex); - ASSERT_UNUSED(result, !result); + { + SpinLockHolder holder(&m_spinLock); + m_ownerThread = currentThread; + ASSERT(!m_lockCount); + m_lockCount = 1; } - setLockCount(currentLockCount + 1); } -void JSLock::unlock(JSLockBehavior lockBehavior) +void JSLock::unlock() { - ASSERT(lockCount()); + ASSERT(currentThreadIsHoldingLock()); -#ifdef NDEBUG - // Locking "not for real" is a debug-only feature. - if (lockBehavior == SilenceAssertionsOnly) - return; -#endif + SpinLockHolder holder(&m_spinLock); + m_lockCount--; - intptr_t newLockCount = lockCount() - 1; - setLockCount(newLockCount); - if (!newLockCount && lockBehavior == LockForReal) { - int result = pthread_mutex_unlock(&JSMutex); - ASSERT_UNUSED(result, !result); - } + if (!m_lockCount) + m_lock.unlock(); } void JSLock::lock(ExecState* exec) { - lock(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly); + exec->globalData().apiLock().lock(); } void JSLock::unlock(ExecState* exec) { - unlock(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly); + exec->globalData().apiLock().unlock(); } bool JSLock::currentThreadIsHoldingLock() { - pthread_once(&createJSLockCountOnce, createJSLockCount); - return !!pthread_getspecific(JSLockCount); + return m_lockCount && m_ownerThread == WTF::currentThread(); } // This is fairly nasty. We allow multiple threads to run on the same @@ -149,7 +151,7 @@ bool JSLock::currentThreadIsHoldingLock() // this to happen, and were its stack to grow further, then it may potentially // write over the second thread's call frames. // -// In avoid JS stack corruption we enforce a policy of only ever allowing two +// To avoid JS stack corruption we enforce a policy of only ever allowing two // threads to use a JS context concurrently, and only allowing the second of // these threads to execute until it has completed and fully returned from its // outermost call into JSC. We enforce this policy using 'lockDropDepth'. The @@ -158,7 +160,7 @@ bool JSLock::currentThreadIsHoldingLock() // same thread again, enter JSC (through evaluate script or call function), and exit // again through a callback, then the locks will not be dropped when DropAllLocks // is called (since lockDropDepth is non-zero). Since this thread is still holding -// the locks, only it will re able to re-enter JSC (either be returning from the +// the locks, only it will be able to re-enter JSC (either be returning from the // callback, or by re-entering through another call to evaulate script or call // function). // @@ -168,61 +170,84 @@ bool JSLock::currentThreadIsHoldingLock() // order in which they were made - though implementing the less restrictive policy // would likely increase complexity and overhead. // -static unsigned lockDropDepth = 0; -JSLock::DropAllLocks::DropAllLocks(ExecState* exec) - : m_lockBehavior(exec->globalData().isSharedInstance() ? LockForReal : SilenceAssertionsOnly) +// This function returns the number of locks that were dropped. +unsigned JSLock::dropAllLocks() { - pthread_once(&createJSLockCountOnce, createJSLockCount); + if (m_lockDropDepth++) + return 0; - if (lockDropDepth++) { - m_lockCount = 0; - return; - } + return dropAllLocksUnconditionally(); +} + +unsigned JSLock::dropAllLocksUnconditionally() +{ + unsigned lockCount = m_lockCount; + for (unsigned i = 0; i < lockCount; i++) + unlock(); - m_lockCount = JSLock::lockCount(); - for (intptr_t i = 0; i < m_lockCount; i++) - JSLock::unlock(m_lockBehavior); + return lockCount; } -JSLock::DropAllLocks::DropAllLocks(JSLockBehavior JSLockBehavior) - : m_lockBehavior(JSLockBehavior) +void JSLock::grabAllLocks(unsigned lockCount) { - pthread_once(&createJSLockCountOnce, createJSLockCount); + for (unsigned i = 0; i < lockCount; i++) + lock(); - if (lockDropDepth++) { - m_lockCount = 0; - return; - } + m_lockDropDepth--; +} - // It is necessary to drop even "unreal" locks, because having a non-zero lock count - // will prevent a real lock from being taken. +JSLock::DropAllLocks::DropAllLocks(ExecState* exec) + : m_lockCount(0) + , m_globalData(&exec->globalData()) +{ + m_lockCount = m_globalData->apiLock().dropAllLocks(); +} - m_lockCount = JSLock::lockCount(); - for (intptr_t i = 0; i < m_lockCount; i++) - JSLock::unlock(m_lockBehavior); +JSLock::DropAllLocks::DropAllLocks(JSGlobalData* globalData) + : m_lockCount(0) + , m_globalData(globalData) +{ + m_lockCount = m_globalData->apiLock().dropAllLocks(); } JSLock::DropAllLocks::~DropAllLocks() { - for (intptr_t i = 0; i < m_lockCount; i++) - JSLock::lock(m_lockBehavior); - - --lockDropDepth; + m_globalData->apiLock().grabAllLocks(m_lockCount); } #else // (OS(DARWIN) || USE(PTHREADS)) -JSLock::JSLock(ExecState*) - : m_lockBehavior(SilenceAssertionsOnly) +GlobalJSLock::GlobalJSLock() +{ +} + +GlobalJSLock::~GlobalJSLock() +{ +} + +JSLockHolder::JSLockHolder(JSGlobalData*) { } -// If threading support is off, set the lock count to a constant value of 1 so ssertions -// that the lock is held don't fail -intptr_t JSLock::lockCount() +JSLockHolder::JSLockHolder(JSGlobalData&) +{ +} + +JSLockHolder::JSLockHolder(ExecState*) +{ +} + +JSLockHolder::~JSLockHolder() +{ +} + +JSLock::JSLock() +{ +} + +JSLock::~JSLock() { - return 1; } bool JSLock::currentThreadIsHoldingLock() @@ -230,11 +255,11 @@ bool JSLock::currentThreadIsHoldingLock() return true; } -void JSLock::lock(JSLockBehavior) +void JSLock::lock() { } -void JSLock::unlock(JSLockBehavior) +void JSLock::unlock() { } @@ -246,11 +271,33 @@ void JSLock::unlock(ExecState*) { } +void JSLock::lock(JSGlobalData&) +{ +} + +void JSLock::unlock(JSGlobalData&) +{ +} + +unsigned JSLock::dropAllLocks() +{ + return 0; +} + +unsigned JSLock::dropAllLocksUnconditionally() +{ + return 0; +} + +void JSLock::grabAllLocks(unsigned) +{ +} + JSLock::DropAllLocks::DropAllLocks(ExecState*) { } -JSLock::DropAllLocks::DropAllLocks(JSLockBehavior) +JSLock::DropAllLocks::DropAllLocks(JSGlobalData*) { } diff --git a/Source/JavaScriptCore/runtime/JSLock.h b/Source/JavaScriptCore/runtime/JSLock.h index a0eb96975..94108d013 100644 --- a/Source/JavaScriptCore/runtime/JSLock.h +++ b/Source/JavaScriptCore/runtime/JSLock.h @@ -23,6 +23,9 @@ #include <wtf/Assertions.h> #include <wtf/Noncopyable.h> +#include <wtf/RefPtr.h> +#include <wtf/TCSpinLock.h> +#include <wtf/Threading.h> namespace JSC { @@ -30,8 +33,9 @@ namespace JSC { // important to lock before doing anything that allocates a // JavaScript data structure or that interacts with shared state // such as the protect count hash table. The simplest way to lock - // is to create a local JSLock object in the scope where the lock - // must be held. The lock is recursive so nesting is ok. The JSLock + // is to create a local JSLockHolder object in the scope where the lock + // must be held and pass it the context that requires protection. + // The lock is recursive so nesting is ok. The JSLock // object also acts as a convenience short-hand for running important // initialization routines. @@ -44,62 +48,65 @@ namespace JSC { // DropAllLocks object takes care to release the JSLock only if your // thread acquired it to begin with. - // For contexts other than the single shared one, implicit locking is not done, - // but we still need to perform all the counting in order to keep debug - // assertions working, so that clients that use the shared context don't break. - class ExecState; class JSGlobalData; - enum JSLockBehavior { SilenceAssertionsOnly, LockForReal }; + // This class is used to protect the initialization of the legacy single + // shared JSGlobalData. + class GlobalJSLock { + WTF_MAKE_NONCOPYABLE(GlobalJSLock); + public: + JS_EXPORT_PRIVATE GlobalJSLock(); + JS_EXPORT_PRIVATE ~GlobalJSLock(); + }; + + class JSLockHolder { + public: + JS_EXPORT_PRIVATE JSLockHolder(JSGlobalData*); + JS_EXPORT_PRIVATE JSLockHolder(JSGlobalData&); + JS_EXPORT_PRIVATE JSLockHolder(ExecState*); + + JS_EXPORT_PRIVATE ~JSLockHolder(); + private: + RefPtr<JSGlobalData> m_globalData; + }; class JSLock { WTF_MAKE_NONCOPYABLE(JSLock); public: - JS_EXPORT_PRIVATE JSLock(ExecState*); - JSLock(JSGlobalData*); - - JSLock(JSLockBehavior lockBehavior) - : m_lockBehavior(lockBehavior) - { -#ifdef NDEBUG - // Locking "not for real" is a debug-only feature. - if (lockBehavior == SilenceAssertionsOnly) - return; -#endif - lock(lockBehavior); - } - - ~JSLock() - { -#ifdef NDEBUG - // Locking "not for real" is a debug-only feature. - if (m_lockBehavior == SilenceAssertionsOnly) - return; -#endif - unlock(m_lockBehavior); - } - - JS_EXPORT_PRIVATE static void lock(JSLockBehavior); - JS_EXPORT_PRIVATE static void unlock(JSLockBehavior); + JSLock(); + JS_EXPORT_PRIVATE ~JSLock(); + + JS_EXPORT_PRIVATE void lock(); + JS_EXPORT_PRIVATE void unlock(); + static void lock(ExecState*); static void unlock(ExecState*); + static void lock(JSGlobalData&); + static void unlock(JSGlobalData&); + + JS_EXPORT_PRIVATE bool currentThreadIsHoldingLock(); - JS_EXPORT_PRIVATE static intptr_t lockCount(); - JS_EXPORT_PRIVATE static bool currentThreadIsHoldingLock(); + unsigned dropAllLocks(); + unsigned dropAllLocksUnconditionally(); + void grabAllLocks(unsigned lockCount); - JSLockBehavior m_lockBehavior; + SpinLock m_spinLock; + Mutex m_lock; + ThreadIdentifier m_ownerThread; + intptr_t m_lockCount; + unsigned m_lockDropDepth; class DropAllLocks { WTF_MAKE_NONCOPYABLE(DropAllLocks); public: JS_EXPORT_PRIVATE DropAllLocks(ExecState* exec); - JS_EXPORT_PRIVATE DropAllLocks(JSLockBehavior); + JS_EXPORT_PRIVATE DropAllLocks(JSGlobalData*); JS_EXPORT_PRIVATE ~DropAllLocks(); private: intptr_t m_lockCount; - JSLockBehavior m_lockBehavior; + RefPtr<JSGlobalData> m_globalData; }; }; diff --git a/Source/JavaScriptCore/runtime/JSObject.cpp b/Source/JavaScriptCore/runtime/JSObject.cpp index 66cc89809..ccc49fd5c 100644 --- a/Source/JavaScriptCore/runtime/JSObject.cpp +++ b/Source/JavaScriptCore/runtime/JSObject.cpp @@ -93,24 +93,54 @@ void JSObject::visitChildren(JSCell* cell, SlotVisitor& visitor) bool wasCheckingForDefaultMarkViolation = visitor.m_isCheckingForDefaultMarkViolation; visitor.m_isCheckingForDefaultMarkViolation = false; #endif + + JSCell::visitChildren(thisObject, visitor); + + PropertyStorage storage = thisObject->outOfLineStorage(); + if (storage) { + size_t storageSize = thisObject->structure()->outOfLineSizeForKnownNonFinalObject(); + // We have this extra temp here to slake GCC's thirst for the blood of those who dereference type-punned pointers. + void* temp = storage; + visitor.copyAndAppend(&temp, thisObject->structure()->outOfLineCapacity() * sizeof(WriteBarrierBase<Unknown>), storage->slot(), storageSize); + storage = static_cast<PropertyStorage>(temp); + thisObject->m_outOfLineStorage.set(storage, StorageBarrier::Unchecked); + } + + if (thisObject->m_inheritorID) + visitor.append(&thisObject->m_inheritorID); + +#if !ASSERT_DISABLED + visitor.m_isCheckingForDefaultMarkViolation = wasCheckingForDefaultMarkViolation; +#endif +} +void JSFinalObject::visitChildren(JSCell* cell, SlotVisitor& visitor) +{ + JSFinalObject* thisObject = jsCast<JSFinalObject*>(cell); + ASSERT_GC_OBJECT_INHERITS(thisObject, &s_info); +#if !ASSERT_DISABLED + bool wasCheckingForDefaultMarkViolation = visitor.m_isCheckingForDefaultMarkViolation; + visitor.m_isCheckingForDefaultMarkViolation = false; +#endif + JSCell::visitChildren(thisObject, visitor); - PropertyStorage storage = thisObject->propertyStorage(); - size_t storageSize = thisObject->structure()->propertyStorageSize(); - if (thisObject->isUsingInlineStorage()) - visitor.appendValues(storage, storageSize); - else { + PropertyStorage storage = thisObject->outOfLineStorage(); + if (storage) { + size_t storageSize = thisObject->structure()->outOfLineSizeForKnownFinalObject(); // We have this extra temp here to slake GCC's thirst for the blood of those who dereference type-punned pointers. void* temp = storage; - visitor.copyAndAppend(&temp, thisObject->structure()->propertyStorageCapacity() * sizeof(WriteBarrierBase<Unknown>), storage->slot(), storageSize); + visitor.copyAndAppend(&temp, thisObject->structure()->outOfLineCapacity() * sizeof(WriteBarrierBase<Unknown>), storage->slot(), storageSize); storage = static_cast<PropertyStorage>(temp); - thisObject->m_propertyStorage.set(storage, StorageBarrier::Unchecked); + thisObject->m_outOfLineStorage.set(storage, StorageBarrier::Unchecked); } if (thisObject->m_inheritorID) visitor.append(&thisObject->m_inheritorID); + size_t storageSize = thisObject->structure()->inlineSizeForKnownFinalObject(); + visitor.appendValues(thisObject->inlineStorage(), storageSize); + #if !ASSERT_DISABLED visitor.m_isCheckingForDefaultMarkViolation = wasCheckingForDefaultMarkViolation; #endif @@ -153,8 +183,8 @@ void JSObject::put(JSCell* cell, ExecState* exec, PropertyName propertyName, JSV for (JSObject* obj = thisObject; ; obj = asObject(prototype)) { unsigned attributes; JSCell* specificValue; - size_t offset = obj->structure()->get(globalData, propertyName, attributes, specificValue); - if (offset != WTF::notFound) { + PropertyOffset offset = obj->structure()->get(globalData, propertyName, attributes, specificValue); + if (offset != invalidOffset) { if (attributes & ReadOnly) { if (slot.isStrictMode()) throwError(exec, createTypeError(exec, StrictModeReadonlyPropertyWriteError)); @@ -272,7 +302,7 @@ bool JSObject::deleteProperty(JSCell* cell, ExecState* exec, PropertyName proper unsigned attributes; JSCell* specificValue; - if (thisObject->structure()->get(exec->globalData(), propertyName, attributes, specificValue) != WTF::notFound) { + if (isValidOffset(thisObject->structure()->get(exec->globalData(), propertyName, attributes, specificValue))) { if (attributes & DontDelete && !exec->globalData().isInDefineOwnProperty()) return false; thisObject->removeDirect(exec->globalData(), propertyName); @@ -394,7 +424,7 @@ bool JSObject::propertyIsEnumerable(ExecState* exec, const Identifier& propertyN bool JSObject::getPropertySpecificValue(ExecState* exec, PropertyName propertyName, JSCell*& specificValue) const { unsigned attributes; - if (structure()->get(exec->globalData(), propertyName, attributes, specificValue) != WTF::notFound) + if (isValidOffset(structure()->get(exec->globalData(), propertyName, attributes, specificValue))) return true; // This could be a function within the static table? - should probably @@ -516,20 +546,20 @@ void JSObject::reifyStaticFunctionsForDelete(ExecState* exec) bool JSObject::removeDirect(JSGlobalData& globalData, PropertyName propertyName) { - if (structure()->get(globalData, propertyName) == WTF::notFound) + if (!isValidOffset(structure()->get(globalData, propertyName))) return false; - size_t offset; + PropertyOffset offset; if (structure()->isUncacheableDictionary()) { offset = structure()->removePropertyWithoutTransition(globalData, propertyName); - if (offset == WTF::notFound) + if (offset == invalidOffset) return false; putUndefinedAtDirectOffset(offset); return true; } setStructure(globalData, Structure::removePropertyTransition(globalData, structure(), propertyName, offset)); - if (offset == WTF::notFound) + if (offset == invalidOffset) return false; putUndefinedAtDirectOffset(offset); return true; @@ -559,25 +589,22 @@ Structure* JSObject::createInheritorID(JSGlobalData& globalData) return m_inheritorID.get(); } -PropertyStorage JSObject::growPropertyStorage(JSGlobalData& globalData, size_t oldSize, size_t newSize) +PropertyStorage JSObject::growOutOfLineStorage(JSGlobalData& globalData, size_t oldSize, size_t newSize) { ASSERT(newSize > oldSize); // It's important that this function not rely on structure(), since // we might be in the middle of a transition. - PropertyStorage oldPropertyStorage = m_propertyStorage.get(); + PropertyStorage oldPropertyStorage = m_outOfLineStorage.get(); PropertyStorage newPropertyStorage = 0; - if (isUsingInlineStorage()) { + if (!oldPropertyStorage) { // We have this extra temp here to slake GCC's thirst for the blood of those who dereference type-punned pointers. void* temp = newPropertyStorage; if (!globalData.heap.tryAllocateStorage(sizeof(WriteBarrierBase<Unknown>) * newSize, &temp)) CRASH(); newPropertyStorage = static_cast<PropertyStorage>(temp); - - for (unsigned i = 0; i < oldSize; ++i) - newPropertyStorage[i] = oldPropertyStorage[i]; } else { // We have this extra temp here to slake GCC's thirst for the blood of those who dereference type-punned pointers. void* temp = oldPropertyStorage; @@ -594,8 +621,8 @@ bool JSObject::getOwnPropertyDescriptor(JSObject* object, ExecState* exec, Prope { unsigned attributes = 0; JSCell* cell = 0; - size_t offset = object->structure()->get(exec->globalData(), propertyName, attributes, cell); - if (offset == WTF::notFound) + PropertyOffset offset = object->structure()->get(exec->globalData(), propertyName, attributes, cell); + if (offset == invalidOffset) return false; descriptor.setDescriptor(object->getDirectOffset(offset), attributes); return true; diff --git a/Source/JavaScriptCore/runtime/JSObject.h b/Source/JavaScriptCore/runtime/JSObject.h index fdb708dd9..9972d6077 100644 --- a/Source/JavaScriptCore/runtime/JSObject.h +++ b/Source/JavaScriptCore/runtime/JSObject.h @@ -79,10 +79,13 @@ namespace JSC { Accessor = 1 << 5, // property is a getter/setter }; + class JSFinalObject; + class JSObject : public JSCell { friend class BatchedTransitionOptimizer; friend class JIT; friend class JSCell; + friend class JSFinalObject; friend class MarkedBlock; JS_EXPORT_PRIVATE friend bool setUpStaticFunctionSlot(ExecState*, const HashEntry*, JSObject*, PropertyName, PropertySlot&); @@ -161,26 +164,72 @@ namespace JSC { // This get function only looks at the property map. JSValue getDirect(JSGlobalData& globalData, PropertyName propertyName) const { - size_t offset = structure()->get(globalData, propertyName); - return offset != WTF::notFound ? getDirectOffset(offset) : JSValue(); + PropertyOffset offset = structure()->get(globalData, propertyName); + checkOffset(offset, structure()->typeInfo().type()); + return offset != invalidOffset ? getDirectOffset(offset) : JSValue(); } WriteBarrierBase<Unknown>* getDirectLocation(JSGlobalData& globalData, PropertyName propertyName) { - size_t offset = structure()->get(globalData, propertyName); - return offset != WTF::notFound ? locationForOffset(offset) : 0; + PropertyOffset offset = structure()->get(globalData, propertyName); + checkOffset(offset, structure()->typeInfo().type()); + return offset != invalidOffset ? locationForOffset(offset) : 0; } WriteBarrierBase<Unknown>* getDirectLocation(JSGlobalData& globalData, PropertyName propertyName, unsigned& attributes) { JSCell* specificFunction; - size_t offset = structure()->get(globalData, propertyName, attributes, specificFunction); - return offset != WTF::notFound ? locationForOffset(offset) : 0; + PropertyOffset offset = structure()->get(globalData, propertyName, attributes, specificFunction); + return offset != invalidOffset ? locationForOffset(offset) : 0; } - size_t offsetForLocation(WriteBarrierBase<Unknown>* location) const + bool hasInlineStorage() const { return structure()->hasInlineStorage(); } + ConstPropertyStorage inlineStorageUnsafe() const + { + return bitwise_cast<ConstPropertyStorage>(this + 1); + } + PropertyStorage inlineStorageUnsafe() + { + return bitwise_cast<PropertyStorage>(this + 1); + } + ConstPropertyStorage inlineStorage() const + { + ASSERT(hasInlineStorage()); + return inlineStorageUnsafe(); + } + PropertyStorage inlineStorage() { - return location - propertyStorage(); + ASSERT(hasInlineStorage()); + return inlineStorageUnsafe(); + } + + ConstPropertyStorage outOfLineStorage() const { return m_outOfLineStorage.get(); } + PropertyStorage outOfLineStorage() { return m_outOfLineStorage.get(); } + + const WriteBarrierBase<Unknown>* locationForOffset(PropertyOffset offset) const + { + if (isInlineOffset(offset)) + return &inlineStorage()[offsetInInlineStorage(offset)]; + return &outOfLineStorage()[offsetInOutOfLineStorage(offset)]; + } + + WriteBarrierBase<Unknown>* locationForOffset(PropertyOffset offset) + { + if (isInlineOffset(offset)) + return &inlineStorage()[offsetInInlineStorage(offset)]; + return &outOfLineStorage()[offsetInOutOfLineStorage(offset)]; + } + + PropertyOffset offsetForLocation(WriteBarrierBase<Unknown>* location) const + { + PropertyOffset result; + size_t offsetInInlineStorage = location - inlineStorageUnsafe(); + if (offsetInInlineStorage < static_cast<size_t>(inlineStorageCapacity)) + result = offsetInInlineStorage; + else + result = location - outOfLineStorage() + firstOutOfLineOffset; + validateOffset(result, structure()->typeInfo().type()); + return result; } void transitionTo(JSGlobalData&, Structure*); @@ -197,9 +246,9 @@ namespace JSC { bool putOwnDataProperty(JSGlobalData&, PropertyName, JSValue, PutPropertySlot&); // Fast access to known property offsets. - JSValue getDirectOffset(size_t offset) const { return propertyStorage()[offset].get(); } - void putDirectOffset(JSGlobalData& globalData, size_t offset, JSValue value) { propertyStorage()[offset].set(globalData, this, value); } - void putUndefinedAtDirectOffset(size_t offset) { propertyStorage()[offset].setUndefined(); } + JSValue getDirectOffset(PropertyOffset offset) const { return locationForOffset(offset)->get(); } + void putDirectOffset(JSGlobalData& globalData, PropertyOffset offset, JSValue value) { locationForOffset(offset)->set(globalData, this, value); } + void putUndefinedAtDirectOffset(PropertyOffset offset) { locationForOffset(offset)->setUndefined(); } JS_EXPORT_PRIVATE static bool defineOwnProperty(JSObject*, ExecState*, PropertyName, PropertyDescriptor&, bool shouldThrow); @@ -220,17 +269,18 @@ namespace JSC { bool staticFunctionsReified() { return structure()->staticFunctionsReified(); } void reifyStaticFunctionsForDelete(ExecState* exec); - JS_EXPORT_PRIVATE PropertyStorage growPropertyStorage(JSGlobalData&, size_t oldSize, size_t newSize); - bool isUsingInlineStorage() const { return static_cast<const void*>(m_propertyStorage.get()) == static_cast<const void*>(this + 1); } - void setPropertyStorage(JSGlobalData&, PropertyStorage, Structure*); + JS_EXPORT_PRIVATE PropertyStorage growOutOfLineStorage(JSGlobalData&, size_t oldSize, size_t newSize); + void setOutOfLineStorage(JSGlobalData&, PropertyStorage, Structure*); + + bool reallocateStorageIfNecessary(JSGlobalData&, unsigned oldCapacity, Structure*); + void setStructureAndReallocateStorageIfNecessary(JSGlobalData&, unsigned oldCapacity, Structure*); + void setStructureAndReallocateStorageIfNecessary(JSGlobalData&, Structure*); - void* addressOfPropertyStorage() + void* addressOfOutOfLineStorage() { - return &m_propertyStorage; + return &m_outOfLineStorage; } - static const unsigned baseExternalStorageCapacity = 16; - void flattenDictionaryObject(JSGlobalData& globalData) { structure()->flattenDictionaryStructure(globalData, this); @@ -244,20 +294,19 @@ namespace JSC { } static size_t offsetOfInlineStorage(); - static size_t offsetOfPropertyStorage(); + static size_t offsetOfOutOfLineStorage(); static size_t offsetOfInheritorID(); static JS_EXPORTDATA const ClassInfo s_info; protected: - void finishCreation(JSGlobalData& globalData, PropertyStorage inlineStorage) + void finishCreation(JSGlobalData& globalData) { Base::finishCreation(globalData); ASSERT(inherits(&s_info)); - ASSERT(structure()->propertyStorageCapacity() < baseExternalStorageCapacity); + ASSERT(!structure()->outOfLineCapacity()); ASSERT(structure()->isEmpty()); ASSERT(prototype().isNull() || Heap::heap(this) == Heap::heap(prototype())); - ASSERT_UNUSED(inlineStorage, static_cast<void*>(inlineStorage) == static_cast<void*>(this + 1)); ASSERT(structure()->isObject()); ASSERT(classInfo()); } @@ -271,7 +320,7 @@ namespace JSC { // To instantiate objects you likely want JSFinalObject, below. // To create derived types you likely want JSNonFinalObject, below. - JSObject(JSGlobalData&, Structure*, PropertyStorage inlineStorage); + JSObject(JSGlobalData&, Structure*); void resetInheritorID() { @@ -289,19 +338,6 @@ namespace JSC { void isObject(); void isString(); - ConstPropertyStorage propertyStorage() const { return m_propertyStorage.get(); } - PropertyStorage propertyStorage() { return m_propertyStorage.get(); } - - const WriteBarrierBase<Unknown>* locationForOffset(size_t offset) const - { - return &propertyStorage()[offset]; - } - - WriteBarrierBase<Unknown>* locationForOffset(size_t offset) - { - return &propertyStorage()[offset]; - } - template<PutMode> bool putDirectInternal(JSGlobalData&, PropertyName, JSValue, unsigned attr, PutPropertySlot&, JSCell*); @@ -311,21 +347,11 @@ namespace JSC { const HashEntry* findPropertyHashEntry(ExecState*, PropertyName) const; Structure* createInheritorID(JSGlobalData&); - StorageBarrier m_propertyStorage; + StorageBarrier m_outOfLineStorage; WriteBarrier<Structure> m_inheritorID; }; -#if USE(JSVALUE32_64) -#define JSNonFinalObject_inlineStorageCapacity 4 -#define JSFinalObject_inlineStorageCapacity 6 -#else -#define JSNonFinalObject_inlineStorageCapacity 2 -#define JSFinalObject_inlineStorageCapacity 4 -#endif - -COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineStorageCapacity), final_storage_is_at_least_as_large_as_non_final); - // JSNonFinalObject is a type of JSObject that has some internal storage, // but also preserves some space in the collector cell for additional // data members in derived types. @@ -340,22 +366,23 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt return Structure::create(globalData, globalObject, prototype, TypeInfo(ObjectType, StructureFlags), &s_info); } + static bool hasInlineStorage() + { + return false; + } + protected: explicit JSNonFinalObject(JSGlobalData& globalData, Structure* structure) - : JSObject(globalData, structure, m_inlineStorage) + : JSObject(globalData, structure) { } void finishCreation(JSGlobalData& globalData) { - Base::finishCreation(globalData, m_inlineStorage); - ASSERT(!(OBJECT_OFFSETOF(JSNonFinalObject, m_inlineStorage) % sizeof(double))); - ASSERT(this->structure()->propertyStorageCapacity() == JSNonFinalObject_inlineStorageCapacity); + Base::finishCreation(globalData); + ASSERT(!this->structure()->totalStorageCapacity()); ASSERT(classInfo()); } - - private: - WriteBarrier<Unknown> m_inlineStorage[JSNonFinalObject_inlineStorageCapacity]; }; class JSFinalObject; @@ -374,14 +401,23 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt return Structure::create(globalData, globalObject, prototype, TypeInfo(FinalObjectType, StructureFlags), &s_info); } + JS_EXPORT_PRIVATE static void visitChildren(JSCell*, SlotVisitor&); + static JS_EXPORTDATA const ClassInfo s_info; + static bool hasInlineStorage() + { + return true; + } protected: + void visitChildrenCommon(SlotVisitor&); + void finishCreation(JSGlobalData& globalData) { - Base::finishCreation(globalData, m_inlineStorage); + Base::finishCreation(globalData); ASSERT(!(OBJECT_OFFSETOF(JSFinalObject, m_inlineStorage) % sizeof(double))); - ASSERT(this->structure()->propertyStorageCapacity() == JSFinalObject_inlineStorageCapacity); + ASSERT(this->structure()->inlineCapacity() == static_cast<unsigned>(inlineStorageCapacity)); + ASSERT(this->structure()->totalStorageCapacity() == static_cast<unsigned>(inlineStorageCapacity)); ASSERT(classInfo()); } @@ -389,13 +425,13 @@ COMPILE_ASSERT((JSFinalObject_inlineStorageCapacity >= JSNonFinalObject_inlineSt friend class LLIntOffsetsExtractor; explicit JSFinalObject(JSGlobalData& globalData, Structure* structure) - : JSObject(globalData, structure, m_inlineStorage) + : JSObject(globalData, structure) { } static const unsigned StructureFlags = JSObject::StructureFlags; - WriteBarrierBase<Unknown> m_inlineStorage[JSFinalObject_inlineStorageCapacity]; + WriteBarrierBase<Unknown> m_inlineStorage[INLINE_STORAGE_CAPACITY]; }; inline JSFinalObject* JSFinalObject::create(ExecState* exec, Structure* structure) @@ -417,13 +453,12 @@ inline bool isJSFinalObject(JSValue value) inline size_t JSObject::offsetOfInlineStorage() { - ASSERT(OBJECT_OFFSETOF(JSFinalObject, m_inlineStorage) == OBJECT_OFFSETOF(JSNonFinalObject, m_inlineStorage)); return OBJECT_OFFSETOF(JSFinalObject, m_inlineStorage); } -inline size_t JSObject::offsetOfPropertyStorage() +inline size_t JSObject::offsetOfOutOfLineStorage() { - return OBJECT_OFFSETOF(JSObject, m_propertyStorage); + return OBJECT_OFFSETOF(JSObject, m_outOfLineStorage); } inline size_t JSObject::offsetOfInheritorID() @@ -461,12 +496,18 @@ inline bool JSObject::isGlobalThis() const return structure()->typeInfo().type() == GlobalThisType; } -inline void JSObject::setPropertyStorage(JSGlobalData& globalData, PropertyStorage storage, Structure* structure) +inline void JSObject::setOutOfLineStorage(JSGlobalData& globalData, PropertyStorage storage, Structure* structure) { - ASSERT(storage); ASSERT(structure); + if (!storage) { + ASSERT(!structure->outOfLineCapacity()); + ASSERT(!structure->outOfLineSize()); + } else { + ASSERT(structure->outOfLineCapacity()); + ASSERT(structure->outOfLineSize()); + } setStructure(globalData, structure); - m_propertyStorage.set(globalData, this, storage); + m_outOfLineStorage.set(globalData, this, storage); } inline JSObject* constructEmptyObject(ExecState* exec, Structure* structure) @@ -504,9 +545,9 @@ inline JSObject* asObject(JSValue value) return asObject(value.asCell()); } -inline JSObject::JSObject(JSGlobalData& globalData, Structure* structure, PropertyStorage inlineStorage) +inline JSObject::JSObject(JSGlobalData& globalData, Structure* structure) : JSCell(globalData, structure) - , m_propertyStorage(globalData, this, inlineStorage) + , m_outOfLineStorage(globalData, this, 0) { } @@ -530,11 +571,6 @@ inline Structure* JSObject::inheritorID(JSGlobalData& globalData) return createInheritorID(globalData); } -inline bool Structure::isUsingInlineStorage() const -{ - return propertyStorageCapacity() < JSObject::baseExternalStorageCapacity; -} - inline bool JSCell::inherits(const ClassInfo* info) const { return classInfo()->isSubClassOf(info); @@ -591,10 +627,10 @@ ALWAYS_INLINE bool JSCell::fastGetOwnPropertySlot(ExecState* exec, PropertyName ALWAYS_INLINE JSValue JSCell::fastGetOwnProperty(ExecState* exec, const UString& name) { if (!structure()->typeInfo().overridesGetOwnPropertySlot() && !structure()->hasGetterSetterProperties()) { - size_t offset = name.impl()->hasHash() + PropertyOffset offset = name.impl()->hasHash() ? structure()->get(exec->globalData(), Identifier(exec, name)) : structure()->get(exec->globalData(), name); - if (offset != WTF::notFound) + if (offset != invalidOffset) return asObject(this)->locationForOffset(offset)->get(); } return JSValue(); @@ -656,8 +692,8 @@ inline bool JSObject::putDirectInternal(JSGlobalData& globalData, PropertyName p if (structure()->isDictionary()) { unsigned currentAttributes; JSCell* currentSpecificFunction; - size_t offset = structure()->get(globalData, propertyName, currentAttributes, currentSpecificFunction); - if (offset != WTF::notFound) { + PropertyOffset offset = structure()->get(globalData, propertyName, currentAttributes, currentSpecificFunction); + if (offset != invalidOffset) { // If there is currently a specific function, and there now either isn't, // or the new value is different, then despecify. if (currentSpecificFunction && (specificFunction != currentSpecificFunction)) @@ -680,13 +716,14 @@ inline bool JSObject::putDirectInternal(JSGlobalData& globalData, PropertyName p if ((mode == PutModePut) && !isExtensible()) return false; - PropertyStorage newStorage = propertyStorage(); - if (structure()->shouldGrowPropertyStorage()) - newStorage = growPropertyStorage(globalData, structure()->propertyStorageCapacity(), structure()->suggestedNewPropertyStorageSize()); + PropertyStorage newStorage = outOfLineStorage(); + if (structure()->putWillGrowOutOfLineStorage()) + newStorage = growOutOfLineStorage(globalData, structure()->outOfLineCapacity(), structure()->suggestedNewOutOfLineStorageCapacity()); offset = structure()->addPropertyWithoutTransition(globalData, propertyName, attributes, specificFunction); - setPropertyStorage(globalData, newStorage, structure()); + setOutOfLineStorage(globalData, newStorage, structure()); - ASSERT(offset < structure()->propertyStorageCapacity()); + validateOffset(offset); + ASSERT(structure()->isValidOffset(offset)); putDirectOffset(globalData, offset, value); // See comment on setNewProperty call below. if (!specificFunction) @@ -694,15 +731,16 @@ inline bool JSObject::putDirectInternal(JSGlobalData& globalData, PropertyName p return true; } - size_t offset; - size_t currentCapacity = structure()->propertyStorageCapacity(); + PropertyOffset offset; + size_t currentCapacity = structure()->outOfLineCapacity(); if (Structure* structure = Structure::addPropertyTransitionToExistingStructure(this->structure(), propertyName, attributes, specificFunction, offset)) { - PropertyStorage newStorage = propertyStorage(); - if (currentCapacity != structure->propertyStorageCapacity()) - newStorage = growPropertyStorage(globalData, currentCapacity, structure->propertyStorageCapacity()); + PropertyStorage newStorage = outOfLineStorage(); + if (currentCapacity != structure->outOfLineCapacity()) + newStorage = growOutOfLineStorage(globalData, currentCapacity, structure->outOfLineCapacity()); - ASSERT(offset < structure->propertyStorageCapacity()); - setPropertyStorage(globalData, newStorage, structure); + validateOffset(offset); + ASSERT(structure->isValidOffset(offset)); + setOutOfLineStorage(globalData, newStorage, structure); putDirectOffset(globalData, offset, value); // This is a new property; transitions with specific values are not currently cachable, // so leave the slot in an uncachable state. @@ -714,7 +752,7 @@ inline bool JSObject::putDirectInternal(JSGlobalData& globalData, PropertyName p unsigned currentAttributes; JSCell* currentSpecificFunction; offset = structure()->get(globalData, propertyName, currentAttributes, currentSpecificFunction); - if (offset != WTF::notFound) { + if (offset != invalidOffset) { if ((mode == PutModePut) && currentAttributes & ReadOnly) return false; @@ -746,14 +784,12 @@ inline bool JSObject::putDirectInternal(JSGlobalData& globalData, PropertyName p if ((mode == PutModePut) && !isExtensible()) return false; - PropertyStorage newStorage = propertyStorage(); - if (structure()->shouldGrowPropertyStorage()) - newStorage = growPropertyStorage(globalData, structure()->propertyStorageCapacity(), structure()->suggestedNewPropertyStorageSize()); - Structure* structure = Structure::addPropertyTransition(globalData, this->structure(), propertyName, attributes, specificFunction, offset); + + validateOffset(offset); + ASSERT(structure->isValidOffset(offset)); + setStructureAndReallocateStorageIfNecessary(globalData, structure); - ASSERT(offset < structure->propertyStorageCapacity()); - setPropertyStorage(globalData, newStorage, structure); putDirectOffset(globalData, offset, value); // This is a new property; transitions with specific values are not currently cachable, // so leave the slot in an uncachable state. @@ -762,6 +798,26 @@ inline bool JSObject::putDirectInternal(JSGlobalData& globalData, PropertyName p return true; } +inline void JSObject::setStructureAndReallocateStorageIfNecessary(JSGlobalData& globalData, unsigned oldCapacity, Structure* newStructure) +{ + ASSERT(oldCapacity <= newStructure->outOfLineCapacity()); + + if (oldCapacity == newStructure->outOfLineCapacity()) { + setStructure(globalData, newStructure); + return; + } + + PropertyStorage newStorage = growOutOfLineStorage( + globalData, oldCapacity, newStructure->outOfLineCapacity()); + setOutOfLineStorage(globalData, newStorage, newStructure); +} + +inline void JSObject::setStructureAndReallocateStorageIfNecessary(JSGlobalData& globalData, Structure* newStructure) +{ + setStructureAndReallocateStorageIfNecessary( + globalData, structure()->outOfLineCapacity(), newStructure); +} + inline bool JSObject::putOwnDataProperty(JSGlobalData& globalData, PropertyName propertyName, JSValue value, PutPropertySlot& slot) { ASSERT(value); @@ -787,22 +843,14 @@ inline void JSObject::putDirect(JSGlobalData& globalData, PropertyName propertyN inline void JSObject::putDirectWithoutTransition(JSGlobalData& globalData, PropertyName propertyName, JSValue value, unsigned attributes) { ASSERT(!value.isGetterSetter() && !(attributes & Accessor)); - PropertyStorage newStorage = propertyStorage(); - if (structure()->shouldGrowPropertyStorage()) - newStorage = growPropertyStorage(globalData, structure()->propertyStorageCapacity(), structure()->suggestedNewPropertyStorageSize()); - size_t offset = structure()->addPropertyWithoutTransition(globalData, propertyName, attributes, getCallableObject(value)); - setPropertyStorage(globalData, newStorage, structure()); + PropertyStorage newStorage = outOfLineStorage(); + if (structure()->putWillGrowOutOfLineStorage()) + newStorage = growOutOfLineStorage(globalData, structure()->outOfLineCapacity(), structure()->suggestedNewOutOfLineStorageCapacity()); + PropertyOffset offset = structure()->addPropertyWithoutTransition(globalData, propertyName, attributes, getCallableObject(value)); + setOutOfLineStorage(globalData, newStorage, structure()); putDirectOffset(globalData, offset, value); } -inline void JSObject::transitionTo(JSGlobalData& globalData, Structure* newStructure) -{ - PropertyStorage newStorage = propertyStorage(); - if (structure()->propertyStorageCapacity() != newStructure->propertyStorageCapacity()) - newStorage = growPropertyStorage(globalData, structure()->propertyStorageCapacity(), newStructure->propertyStorageCapacity()); - setPropertyStorage(globalData, newStorage, newStructure); -} - inline JSValue JSObject::toPrimitive(ExecState* exec, PreferredPrimitiveType preferredType) const { return methodTable()->defaultValue(this, exec, preferredType); @@ -877,8 +925,6 @@ inline void JSValue::putByIndex(ExecState* exec, unsigned propertyName, JSValue asCell()->methodTable()->putByIndex(asCell(), exec, propertyName, value, shouldThrow); } -// --- JSValue inlines ---------------------------- - ALWAYS_INLINE JSObject* Register::function() const { if (!jsValue()) @@ -893,6 +939,32 @@ ALWAYS_INLINE Register Register::withCallee(JSObject* callee) return r; } +// This is a helper for patching code where you want to emit a load or store and +// the base is: +// For inline offsets: a pointer to the out-of-line storage pointer. +// For out-of-line offsets: the base of the out-of-line storage. +inline size_t offsetRelativeToPatchedStorage(PropertyOffset offset) +{ + if (isOutOfLineOffset(offset)) + return sizeof(EncodedJSValue) * offsetInOutOfLineStorage(offset); + return JSObject::offsetOfInlineStorage() - JSObject::offsetOfOutOfLineStorage() + sizeof(EncodedJSValue) * offsetInInlineStorage(offset); +} + +inline int indexRelativeToBase(PropertyOffset offset) +{ + if (isOutOfLineOffset(offset)) + return offsetInOutOfLineStorage(offset); + ASSERT(!(JSObject::offsetOfInlineStorage() % sizeof(EncodedJSValue))); + return JSObject::offsetOfInlineStorage() / sizeof(EncodedJSValue) + offsetInInlineStorage(offset); +} + +inline int offsetRelativeToBase(PropertyOffset offset) +{ + if (isOutOfLineOffset(offset)) + return offsetInOutOfLineStorage(offset) * sizeof(EncodedJSValue); + return JSObject::offsetOfInlineStorage() + offsetInInlineStorage(offset) * sizeof(EncodedJSValue); +} + } // namespace JSC #endif // JSObject_h diff --git a/Source/JavaScriptCore/runtime/JSPropertyNameIterator.cpp b/Source/JavaScriptCore/runtime/JSPropertyNameIterator.cpp index 6ceb3c411..aaf946d3d 100644 --- a/Source/JavaScriptCore/runtime/JSPropertyNameIterator.cpp +++ b/Source/JavaScriptCore/runtime/JSPropertyNameIterator.cpp @@ -56,10 +56,10 @@ JSPropertyNameIterator* JSPropertyNameIterator::create(ExecState* exec, JSObject size_t numCacheableSlots = 0; if (!o->structure()->hasNonEnumerableProperties() && !o->structure()->hasGetterSetterProperties() && !o->structure()->isUncacheableDictionary() && !o->structure()->typeInfo().overridesGetPropertyNames()) - numCacheableSlots = o->structure()->propertyStorageSize(); + numCacheableSlots = o->structure()->totalStorageSize(); JSPropertyNameIterator* jsPropertyNameIterator = new (NotNull, allocateCell<JSPropertyNameIterator>(*exec->heap())) JSPropertyNameIterator(exec, propertyNames.data(), numCacheableSlots); - jsPropertyNameIterator->finishCreation(exec, propertyNames.data()); + jsPropertyNameIterator->finishCreation(exec, propertyNames.data(), o); if (o->structure()->isDictionary()) return jsPropertyNameIterator; diff --git a/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h b/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h index 5b65e59f2..653ee0463 100644 --- a/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h +++ b/Source/JavaScriptCore/runtime/JSPropertyNameIterator.h @@ -47,12 +47,6 @@ namespace JSC { typedef JSCell Base; static JSPropertyNameIterator* create(ExecState*, JSObject*); - static JSPropertyNameIterator* create(ExecState* exec, PropertyNameArrayData* propertyNameArrayData, size_t numCacheableSlot) - { - JSPropertyNameIterator* iterator = new (NotNull, allocateCell<JSPropertyNameIterator>(*exec->heap())) JSPropertyNameIterator(exec, propertyNameArrayData, numCacheableSlot); - iterator->finishCreation(exec, propertyNameArrayData); - return iterator; - } static void destroy(JSCell*); @@ -63,11 +57,11 @@ namespace JSC { static void visitChildren(JSCell*, SlotVisitor&); - bool getOffset(size_t i, int& offset) + bool getOffset(size_t i, PropertyOffset& offset) { if (i >= m_numCacheableSlots) return false; - offset = i; + offset = i + m_offsetBase; return true; } @@ -88,12 +82,13 @@ namespace JSC { static const ClassInfo s_info; protected: - void finishCreation(ExecState* exec, PropertyNameArrayData* propertyNameArrayData) + void finishCreation(ExecState* exec, PropertyNameArrayData* propertyNameArrayData, JSObject* object) { Base::finishCreation(exec->globalData()); PropertyNameArrayData::PropertyNameVector& propertyNameVector = propertyNameArrayData->propertyNameVector(); for (size_t i = 0; i < m_jsStringsSize; ++i) m_jsStrings[i].set(exec->globalData(), this, jsOwnedString(exec, propertyNameVector[i].ustring())); + m_offsetBase = object->structure()->firstValidOffset(); } private: @@ -105,6 +100,7 @@ namespace JSC { WriteBarrier<StructureChain> m_cachedPrototypeChain; uint32_t m_numCacheableSlots; uint32_t m_jsStringsSize; + PropertyOffset m_offsetBase; OwnArrayPtr<WriteBarrier<Unknown> > m_jsStrings; }; diff --git a/Source/JavaScriptCore/runtime/JSString.h b/Source/JavaScriptCore/runtime/JSString.h index 4fb157c8b..d6fc4c2a1 100644 --- a/Source/JavaScriptCore/runtime/JSString.h +++ b/Source/JavaScriptCore/runtime/JSString.h @@ -67,6 +67,7 @@ namespace JSC { friend class JSGlobalData; friend class SpecializedThunkJIT; friend class JSRopeString; + friend class MarkStack; friend class SlotVisitor; friend struct ThunkHelpers; @@ -77,12 +78,14 @@ namespace JSC { private: JSString(JSGlobalData& globalData, PassRefPtr<StringImpl> value) : JSCell(globalData, globalData.stringStructure.get()) + , m_flags(0) , m_value(value) { } JSString(JSGlobalData& globalData) : JSCell(globalData, globalData.stringStructure.get()) + , m_flags(0) { } @@ -91,7 +94,8 @@ namespace JSC { ASSERT(!m_value.isNull()); Base::finishCreation(globalData); m_length = length; - m_is8Bit = m_value.impl()->is8Bit(); + setIs8Bit(m_value.impl()->is8Bit()); + globalData.m_newStringsSinceLastHashConst++; } void finishCreation(JSGlobalData& globalData, size_t length, size_t cost) @@ -99,8 +103,9 @@ namespace JSC { ASSERT(!m_value.isNull()); Base::finishCreation(globalData); m_length = length; - m_is8Bit = m_value.impl()->is8Bit(); + setIs8Bit(m_value.impl()->is8Bit()); Heap::heap(this)->reportExtraMemoryCost(cost); + globalData.m_newStringsSinceLastHashConst++; } protected: @@ -108,7 +113,8 @@ namespace JSC { { Base::finishCreation(globalData); m_length = 0; - m_is8Bit = true; + setIs8Bit(true); + globalData.m_newStringsSinceLastHashConst++; } public: @@ -161,10 +167,30 @@ namespace JSC { protected: bool isRope() const { return m_value.isNull(); } - bool is8Bit() const { return m_is8Bit; } + bool is8Bit() const { return m_flags & Is8Bit; } + void setIs8Bit(bool flag) + { + if (flag) + m_flags |= Is8Bit; + else + m_flags &= ~Is8Bit; + } + bool shouldTryHashConst(); + bool isHashConstSingleton() const { return m_flags & IsHashConstSingleton; } + void clearHashConstSingleton() { m_flags &= ~IsHashConstSingleton; } + void setHashConstSingleton() { m_flags |= IsHashConstSingleton; } + bool tryHashConstLock(); + void releaseHashConstLock(); + + unsigned m_flags; + + enum { + HashConstLock = 1u << 2, + IsHashConstSingleton = 1u << 1, + Is8Bit = 1u + }; // A string is represented either by a UString or a rope of fibers. - bool m_is8Bit : 1; unsigned m_length; mutable UString m_value; @@ -231,7 +257,7 @@ namespace JSC { { Base::finishCreation(globalData); m_length = s1->length() + s2->length(); - m_is8Bit = (s1->is8Bit() && s2->is8Bit()); + setIs8Bit(s1->is8Bit() && s2->is8Bit()); m_fibers[0].set(globalData, this, s1); m_fibers[1].set(globalData, this, s2); } @@ -240,7 +266,7 @@ namespace JSC { { Base::finishCreation(globalData); m_length = s1->length() + s2->length() + s3->length(); - m_is8Bit = (s1->is8Bit() && s2->is8Bit() && s3->is8Bit()); + setIs8Bit(s1->is8Bit() && s2->is8Bit() && s3->is8Bit()); m_fibers[0].set(globalData, this, s1); m_fibers[1].set(globalData, this, s2); m_fibers[2].set(globalData, this, s3); @@ -255,7 +281,7 @@ namespace JSC { { m_fibers[index].set(globalData, this, jsString); m_length += jsString->m_length; - m_is8Bit = m_is8Bit && jsString->m_is8Bit; + setIs8Bit(is8Bit() && jsString->is8Bit()); } static JSRopeString* createNull(JSGlobalData& globalData) diff --git a/Source/JavaScriptCore/runtime/JSValue.cpp b/Source/JavaScriptCore/runtime/JSValue.cpp index e10867176..c34431178 100644 --- a/Source/JavaScriptCore/runtime/JSValue.cpp +++ b/Source/JavaScriptCore/runtime/JSValue.cpp @@ -130,8 +130,8 @@ void JSValue::putToPrimitive(ExecState* exec, PropertyName propertyName, JSValue for (; ; obj = asObject(prototype)) { unsigned attributes; JSCell* specificValue; - size_t offset = obj->structure()->get(globalData, propertyName, attributes, specificValue); - if (offset != WTF::notFound) { + PropertyOffset offset = obj->structure()->get(globalData, propertyName, attributes, specificValue); + if (offset != invalidOffset) { if (attributes & ReadOnly) { if (slot.isStrictMode()) throwError(exec, createTypeError(exec, StrictModeReadonlyPropertyWriteError)); diff --git a/Source/JavaScriptCore/runtime/Operations.h b/Source/JavaScriptCore/runtime/Operations.h index b2081f3dd..497b19d82 100644 --- a/Source/JavaScriptCore/runtime/Operations.h +++ b/Source/JavaScriptCore/runtime/Operations.h @@ -297,7 +297,7 @@ namespace JSC { return jsAddSlowCase(callFrame, v1, v2); } - inline size_t normalizePrototypeChain(CallFrame* callFrame, JSValue base, JSValue slotBase, const Identifier& propertyName, size_t& slotOffset) + inline size_t normalizePrototypeChain(CallFrame* callFrame, JSValue base, JSValue slotBase, const Identifier& propertyName, PropertyOffset& slotOffset) { JSCell* cell = base.asCell(); size_t count = 0; diff --git a/Source/JavaScriptCore/runtime/Options.cpp b/Source/JavaScriptCore/runtime/Options.cpp index 894ca8cc0..b5ce39c0d 100644 --- a/Source/JavaScriptCore/runtime/Options.cpp +++ b/Source/JavaScriptCore/runtime/Options.cpp @@ -26,9 +26,15 @@ #include "config.h" #include "Options.h" +#include <algorithm> #include <limits> +#include <stdio.h> +#include <stdlib.h> +#include <string.h> #include <wtf/NumberOfCores.h> #include <wtf/PageBlock.h> +#include <wtf/StdLibExtras.h> +#include <wtf/StringExtras.h> #if OS(DARWIN) && ENABLE(PARALLEL_GC) #include <sys/sysctl.h> @@ -37,71 +43,9 @@ // Set to 1 to control the heuristics using environment variables. #define ENABLE_RUN_TIME_HEURISTICS 0 -#if ENABLE(RUN_TIME_HEURISTICS) -#include <stdio.h> -#include <stdlib.h> -#include <wtf/StdLibExtras.h> -#endif - -namespace JSC { namespace Options { - -bool useJIT; - -bool showDisassembly; -bool showDFGDisassembly; - -unsigned maximumOptimizationCandidateInstructionCount; - -unsigned maximumFunctionForCallInlineCandidateInstructionCount; -unsigned maximumFunctionForConstructInlineCandidateInstructionCount; - -unsigned maximumInliningDepth; - -int32_t thresholdForJITAfterWarmUp; -int32_t thresholdForJITSoon; - -int32_t thresholdForOptimizeAfterWarmUp; -int32_t thresholdForOptimizeAfterLongWarmUp; -int32_t thresholdForOptimizeSoon; -int32_t executionCounterIncrementForLoop; -int32_t executionCounterIncrementForReturn; +namespace JSC { -bool randomizeExecutionCountsBetweenCheckpoints; -int32_t maximumExecutionCountsBetweenCheckpoints; - -unsigned desiredSpeculativeSuccessFailRatio; - -double likelyToTakeSlowCaseThreshold; -double couldTakeSlowCaseThreshold; -unsigned likelyToTakeSlowCaseMinimumCount; -unsigned couldTakeSlowCaseMinimumCount; - -double osrExitProminenceForFrequentExitSite; - -unsigned largeFailCountThresholdBase; -unsigned largeFailCountThresholdBaseForLoop; -unsigned forcedOSRExitCountForReoptimization; - -unsigned reoptimizationRetryCounterMax; -unsigned reoptimizationRetryCounterStep; - -unsigned minimumOptimizationDelay; -unsigned maximumOptimizationDelay; -double desiredProfileLivenessRate; -double desiredProfileFullnessRate; - -double doubleVoteRatioForDoubleFormat; - -unsigned minimumNumberOfScansBetweenRebalance; -unsigned gcMarkStackSegmentSize; -unsigned numberOfGCMarkers; -unsigned opaqueRootMergeThreshold; - -bool forceWeakRandomSeed; -unsigned forcedWeakRandomSeed; - -#if ENABLE(RUN_TIME_HEURISTICS) static bool parse(const char* string, bool& value) { if (!strcasecmp(string, "true") || !strcasecmp(string, "yes") || !strcmp(string, "1")) { @@ -130,27 +74,22 @@ static bool parse(const char* string, double& value) return sscanf(string, "%lf", &value) == 1; } -template<typename T, typename U> -void setHeuristic(T& variable, const char* name, U value) +#if ENABLE(RUN_TIME_HEURISTICS) +template<typename T> +void overrideOptionWithHeuristic(T& variable, const char* name) { const char* stringValue = getenv(name); - if (!stringValue) { - variable = safeCast<T>(value); + if (!stringValue) return; - } if (parse(stringValue, variable)) return; fprintf(stderr, "WARNING: failed to parse %s=%s\n", name, stringValue); - variable = safeCast<T>(value); } - -#define SET(variable, value) setHeuristic(variable, "JSC_" #variable, value) -#else -#define SET(variable, value) variable = value #endif + static unsigned computeNumberOfGCMarkers(int maxNumberOfGCMarkers) { int cpusToUse = 1; @@ -167,80 +106,116 @@ static unsigned computeNumberOfGCMarkers(int maxNumberOfGCMarkers) return cpusToUse; } -void initializeOptions() -{ - SET(useJIT, true); - - SET(showDisassembly, false); - SET(showDFGDisassembly, false); - - SET(maximumOptimizationCandidateInstructionCount, 10000); - - SET(maximumFunctionForCallInlineCandidateInstructionCount, 180); - SET(maximumFunctionForConstructInlineCandidateInstructionCount, 100); - - SET(maximumInliningDepth, 5); +Options::Entry Options::s_options[Options::numberOfOptions]; - SET(thresholdForJITAfterWarmUp, 100); - SET(thresholdForJITSoon, 100); +// Realize the names for each of the options: +const Options::EntryInfo Options::s_optionsInfo[Options::numberOfOptions] = { +#define FOR_EACH_OPTION(type_, name_, defaultValue_) \ + { #name_, Options::type_##Type }, + JSC_OPTIONS(FOR_EACH_OPTION) +#undef FOR_EACH_OPTION +}; - SET(thresholdForOptimizeAfterWarmUp, 1000); - SET(thresholdForOptimizeAfterLongWarmUp, 5000); - SET(thresholdForOptimizeSoon, 1000); - - SET(executionCounterIncrementForLoop, 1); - SET(executionCounterIncrementForReturn, 15); - - SET(randomizeExecutionCountsBetweenCheckpoints, false); - SET(maximumExecutionCountsBetweenCheckpoints, 1000); - - SET(desiredSpeculativeSuccessFailRatio, 6); - - SET(likelyToTakeSlowCaseThreshold, 0.15); - SET(couldTakeSlowCaseThreshold, 0.05); // Shouldn't be zero because some ops will spuriously take slow case, for example for linking or caching. - SET(likelyToTakeSlowCaseMinimumCount, 100); - SET(couldTakeSlowCaseMinimumCount, 10); +void Options::initialize() +{ + // Initialize each of the options with their default values: +#define FOR_EACH_OPTION(type_, name_, defaultValue_) \ + name_() = defaultValue_; + JSC_OPTIONS(FOR_EACH_OPTION) +#undef FOR_EACH_OPTION + + // Allow environment vars to override options if applicable. + // The evn var should be the name of the option prefixed with + // "JSC_". +#if ENABLE(RUN_TIME_HEURISTICS) +#define FOR_EACH_OPTION(type_, name_, defaultValue_) \ + overrideOptionWithHeuristic(name_(), "JSC_" #name_); + JSC_OPTIONS(FOR_EACH_OPTION) +#undef FOR_EACH_OPTION +#endif // RUN_TIME_HEURISTICS + +#if 0 + ; // Deconfuse editors that do auto indentation +#endif - SET(osrExitProminenceForFrequentExitSite, 0.3); - - SET(largeFailCountThresholdBase, 20); - SET(largeFailCountThresholdBaseForLoop, 1); - SET(forcedOSRExitCountForReoptimization, 250); + // Do range checks where needed and make corrections to the options: + ASSERT(thresholdForOptimizeAfterLongWarmUp() >= thresholdForOptimizeAfterWarmUp()); + ASSERT(thresholdForOptimizeAfterWarmUp() >= thresholdForOptimizeSoon()); + ASSERT(thresholdForOptimizeAfterWarmUp() >= 0); - SET(reoptimizationRetryCounterStep, 1); - - SET(minimumOptimizationDelay, 1); - SET(maximumOptimizationDelay, 5); - SET(desiredProfileLivenessRate, 0.75); - SET(desiredProfileFullnessRate, 0.35); - - SET(doubleVoteRatioForDoubleFormat, 2); - - SET(minimumNumberOfScansBetweenRebalance, 100); - SET(gcMarkStackSegmentSize, pageSize()); - SET(opaqueRootMergeThreshold, 1000); - SET(numberOfGCMarkers, computeNumberOfGCMarkers(7)); // We don't scale so well beyond 7. - - ASSERT(thresholdForOptimizeAfterLongWarmUp >= thresholdForOptimizeAfterWarmUp); - ASSERT(thresholdForOptimizeAfterWarmUp >= thresholdForOptimizeSoon); - ASSERT(thresholdForOptimizeAfterWarmUp >= 0); - // Compute the maximum value of the reoptimization retry counter. This is simply // the largest value at which we don't overflow the execute counter, when using it // to left-shift the execution counter by this amount. Currently the value ends // up being 18, so this loop is not so terrible; it probably takes up ~100 cycles // total on a 32-bit processor. - reoptimizationRetryCounterMax = 0; - while ((static_cast<int64_t>(thresholdForOptimizeAfterLongWarmUp) << (reoptimizationRetryCounterMax + 1)) <= static_cast<int64_t>(std::numeric_limits<int32_t>::max())) - reoptimizationRetryCounterMax++; - - ASSERT((static_cast<int64_t>(thresholdForOptimizeAfterLongWarmUp) << reoptimizationRetryCounterMax) > 0); - ASSERT((static_cast<int64_t>(thresholdForOptimizeAfterLongWarmUp) << reoptimizationRetryCounterMax) <= static_cast<int64_t>(std::numeric_limits<int32_t>::max())); - - SET(forceWeakRandomSeed, false); - SET(forcedWeakRandomSeed, 0); + reoptimizationRetryCounterMax() = 0; + while ((static_cast<int64_t>(thresholdForOptimizeAfterLongWarmUp()) << (reoptimizationRetryCounterMax() + 1)) <= static_cast<int64_t>(std::numeric_limits<int32>::max())) + reoptimizationRetryCounterMax()++; + + ASSERT((static_cast<int64_t>(thresholdForOptimizeAfterLongWarmUp()) << reoptimizationRetryCounterMax()) > 0); + ASSERT((static_cast<int64_t>(thresholdForOptimizeAfterLongWarmUp()) << reoptimizationRetryCounterMax()) <= static_cast<int64_t>(std::numeric_limits<int32>::max())); } -} } // namespace JSC::Options +// Parses a single command line option in the format "<optionName>=<value>" +// (no spaces allowed) and set the specified option if appropriate. +bool Options::setOption(const char* arg) +{ + // arg should look like this: + // <jscOptionName>=<appropriate value> + const char* equalStr = strchr(arg, '='); + if (!equalStr) + return false; + + const char* valueStr = equalStr + 1; + + // For each option, check if the specify arg is a match. If so, set the arg + // if the value makes sense. Otherwise, move on to checking the next option. +#define FOR_EACH_OPTION(type_, name_, defaultValue_) \ + if (!strncmp(arg, #name_, equalStr - arg)) { \ + type_ value; \ + bool success = parse(valueStr, value); \ + if (success) { \ + name_() = value; \ + return true; \ + } \ + return false; \ + } + + JSC_OPTIONS(FOR_EACH_OPTION) +#undef FOR_EACH_OPTION + + return false; // No option matched. +} + +void Options::dumpAllOptions(FILE* stream) +{ + fprintf(stream, "JSC runtime options:\n"); + for (int id = 0; id < numberOfOptions; id++) + dumpOption(static_cast<OptionID>(id), stream, " ", "\n"); +} + +void Options::dumpOption(OptionID id, FILE* stream, const char* header, const char* footer) +{ + if (id >= numberOfOptions) + return; // Illegal option. + + fprintf(stream, "%s%s: ", header, s_optionsInfo[id].name); + switch (s_optionsInfo[id].type) { + case boolType: + fprintf(stream, "%s", s_options[id].u.boolVal?"true":"false"); + break; + case unsignedType: + fprintf(stream, "%u", s_options[id].u.unsignedVal); + break; + case doubleType: + fprintf(stream, "%lf", s_options[id].u.doubleVal); + break; + case int32Type: + fprintf(stream, "%d", s_options[id].u.int32Val); + break; + } + fprintf(stream, "%s", footer); +} +} // namespace JSC diff --git a/Source/JavaScriptCore/runtime/Options.h b/Source/JavaScriptCore/runtime/Options.h index 1bce5b944..0a55bda6b 100644 --- a/Source/JavaScriptCore/runtime/Options.h +++ b/Source/JavaScriptCore/runtime/Options.h @@ -26,70 +26,167 @@ #ifndef Options_h #define Options_h +#include "JSExportMacros.h" #include <stdint.h> - -namespace JSC { namespace Options { - -extern bool useJIT; - -extern bool showDisassembly; -extern bool showDFGDisassembly; // showDisassembly implies showDFGDisassembly. - -extern unsigned maximumOptimizationCandidateInstructionCount; - -extern unsigned maximumFunctionForCallInlineCandidateInstructionCount; -extern unsigned maximumFunctionForConstructInlineCandidateInstructionCount; - -extern unsigned maximumInliningDepth; // Depth of inline stack, so 1 = no inlining, 2 = one level, etc. - -extern int32_t thresholdForJITAfterWarmUp; -extern int32_t thresholdForJITSoon; - -extern int32_t thresholdForOptimizeAfterWarmUp; -extern int32_t thresholdForOptimizeAfterLongWarmUp; -extern int32_t thresholdForOptimizeSoon; -extern int32_t thresholdForOptimizeNextInvocation; - -extern int32_t executionCounterIncrementForLoop; -extern int32_t executionCounterIncrementForReturn; - -extern bool randomizeExecutionCountsBetweenCheckpoints; -extern int32_t maximumExecutionCountsBetweenCheckpoints; - -extern unsigned desiredSpeculativeSuccessFailRatio; - -extern double likelyToTakeSlowCaseThreshold; -extern double couldTakeSlowCaseThreshold; -extern unsigned likelyToTakeSlowCaseMinimumCount; -extern unsigned couldTakeSlowCaseMinimumCount; - -extern double osrExitProminenceForFrequentExitSite; - -extern unsigned largeFailCountThresholdBase; -extern unsigned largeFailCountThresholdBaseForLoop; -extern unsigned forcedOSRExitCountForReoptimization; - -extern unsigned reoptimizationRetryCounterMax; -extern unsigned reoptimizationRetryCounterStep; - -extern unsigned minimumOptimizationDelay; -extern unsigned maximumOptimizationDelay; -extern double desiredProfileLivenessRate; -extern double desiredProfileFullnessRate; - -extern double doubleVoteRatioForDoubleFormat; - -extern unsigned minimumNumberOfScansBetweenRebalance; -extern unsigned gcMarkStackSegmentSize; -JS_EXPORTDATA extern unsigned numberOfGCMarkers; -JS_EXPORTDATA extern unsigned opaqueRootMergeThreshold; - -extern bool forceWeakRandomSeed; -extern unsigned forcedWeakRandomSeed; - -void initializeOptions(); - -} } // namespace JSC::Options +#include <stdio.h> + +namespace JSC { + +// How do JSC VM options work? +// =========================== +// The JSC_OPTIONS() macro below defines a list of all JSC options in use, +// along with their types and default values. The options values are actually +// realized as an array of Options::Entry elements. +// +// Options::initialize() will initialize the array of options values with +// the defaults specified in JSC_OPTIONS() below. After that, the values can +// be programmatically read and written to using an accessor method with the +// same name as the option. For example, the option "useJIT" can be read and +// set like so: +// +// bool jitIsOn = Options::useJIT(); // Get the option value. +// Options::useJIT() = false; // Sets the option value. +// +// If you want to tweak any of these values programmatically for testing +// purposes, you can do so in Options::initialize() after the default values +// are set. +// +// Alternatively, you can enable RUN_TIME_HEURISTICS which will allow you +// to override the default values by specifying environment variables of the +// form: JSC_<name of JSC option>. +// +// Note: Options::initialize() tries to ensure some sanity on the option values +// which are set by doing some range checks, and value corrections. These +// checks are done after the option values are set. If you alter the option +// values after the sanity checks (for your own testing), then you're liable to +// ensure that the new values set are sane and reasonable for your own run. + + +#define JSC_OPTIONS(v) \ + v(bool, useJIT, true) \ + v(bool, useDFGJIT, true) \ + \ + /* showDisassembly implies showDFGDisassembly. */ \ + v(bool, showDisassembly, false) \ + v(bool, showDFGDisassembly, false) \ + \ + v(unsigned, maximumOptimizationCandidateInstructionCount, 10000) \ + \ + v(unsigned, maximumFunctionForCallInlineCandidateInstructionCount, 180) \ + v(unsigned, maximumFunctionForConstructInlineCandidateInstructionCount, 100) \ + \ + /* Depth of inline stack, so 1 = no inlining, 2 = one level, etc. */ \ + v(unsigned, maximumInliningDepth, 5) \ + \ + v(int32, thresholdForJITAfterWarmUp, 100) \ + v(int32, thresholdForJITSoon, 100) \ + \ + v(int32, thresholdForOptimizeAfterWarmUp, 1000) \ + v(int32, thresholdForOptimizeAfterLongWarmUp, 5000) \ + v(int32, thresholdForOptimizeSoon, 1000) \ + \ + v(int32, executionCounterIncrementForLoop, 1) \ + v(int32, executionCounterIncrementForReturn, 15) \ + \ + v(bool, randomizeExecutionCountsBetweenCheckpoints, false) \ + v(int32, maximumExecutionCountsBetweenCheckpoints, 1000) \ + \ + v(double, likelyToTakeSlowCaseThreshold, 0.15) \ + v(double, couldTakeSlowCaseThreshold, 0.05) \ + v(unsigned, likelyToTakeSlowCaseMinimumCount, 100) \ + v(unsigned, couldTakeSlowCaseMinimumCount, 10) \ + \ + v(double, osrExitProminenceForFrequentExitSite, 0.3) \ + v(unsigned, osrExitCountForReoptimization, 100) \ + v(unsigned, osrExitCountForReoptimizationFromLoop, 5) \ + \ + v(unsigned, reoptimizationRetryCounterMax, 0) \ + v(unsigned, reoptimizationRetryCounterStep, 1) \ + \ + v(unsigned, minimumOptimizationDelay, 1) \ + v(unsigned, maximumOptimizationDelay, 5) \ + v(double, desiredProfileLivenessRate, 0.75) \ + v(double, desiredProfileFullnessRate, 0.35) \ + \ + v(double, doubleVoteRatioForDoubleFormat, 2) \ + \ + v(unsigned, minimumNumberOfScansBetweenRebalance, 100) \ + v(unsigned, gcMarkStackSegmentSize, pageSize()) \ + v(unsigned, numberOfGCMarkers, computeNumberOfGCMarkers(7)) \ + v(unsigned, opaqueRootMergeThreshold, 1000) \ + \ + v(bool, forceWeakRandomSeed, false) \ + v(unsigned, forcedWeakRandomSeed, 0) + + +class Options { +public: + // This typedef is to allow us to eliminate the '_' in the field name in + // union inside Entry. This is needed to keep the style checker happy. + typedef int32_t int32; + + // Declare the option IDs: + enum OptionID { +#define FOR_EACH_OPTION(type_, name_, defaultValue_) \ + OPT_##name_, + JSC_OPTIONS(FOR_EACH_OPTION) +#undef FOR_EACH_OPTION + numberOfOptions + }; + + + static void initialize(); + + // Parses a single command line option in the format "<optionName>=<value>" + // (no spaces allowed) and set the specified option if appropriate. + JS_EXPORT_PRIVATE static bool setOption(const char* arg); + JS_EXPORT_PRIVATE static void dumpAllOptions(FILE* stream = stdout); + static void dumpOption(OptionID id, FILE* stream = stdout, const char* header = "", const char* footer = ""); + + // Declare accessors for each option: +#define FOR_EACH_OPTION(type_, name_, defaultValue_) \ + ALWAYS_INLINE static type_& name_() { return s_options[OPT_##name_].u.type_##Val; } + + JSC_OPTIONS(FOR_EACH_OPTION) +#undef FOR_EACH_OPTION + +private: + enum EntryType { + boolType, + unsignedType, + doubleType, + int32Type + }; + + // For storing for an option value: + struct Entry { + union { + bool boolVal; + unsigned unsignedVal; + double doubleVal; + int32 int32Val; + } u; + }; + + // For storing constant meta data about each option: + struct EntryInfo { + const char* name; + EntryType type; + }; + + Options(); + + // Declare the options: +#define FOR_EACH_OPTION(type_, name_, defaultValue_) \ + type_ m_##name_; + JSC_OPTIONS(FOR_EACH_OPTION) +#undef FOR_EACH_OPTION + + // Declare the singleton instance of the options store: + JS_EXPORTDATA static Entry s_options[numberOfOptions]; + static const EntryInfo s_optionsInfo[numberOfOptions]; +}; + +} // namespace JSC #endif // Options_h - diff --git a/Source/JavaScriptCore/runtime/PropertyMapHashTable.h b/Source/JavaScriptCore/runtime/PropertyMapHashTable.h index c47f3476a..5953f5e87 100644 --- a/Source/JavaScriptCore/runtime/PropertyMapHashTable.h +++ b/Source/JavaScriptCore/runtime/PropertyMapHashTable.h @@ -21,6 +21,7 @@ #ifndef PropertyMapHashTable_h #define PropertyMapHashTable_h +#include "PropertyOffset.h" #include "UString.h" #include "WriteBarrier.h" #include <wtf/HashTable.h> @@ -72,11 +73,11 @@ inline unsigned nextPowerOf2(unsigned v) struct PropertyMapEntry { StringImpl* key; - unsigned offset; + PropertyOffset offset; unsigned attributes; WriteBarrier<JSCell> specificValue; - PropertyMapEntry(JSGlobalData& globalData, JSCell* owner, StringImpl* key, unsigned offset, unsigned attributes, JSCell* specificValue) + PropertyMapEntry(JSGlobalData& globalData, JSCell* owner, StringImpl* key, PropertyOffset offset, unsigned attributes, JSCell* specificValue) : key(key) , offset(offset) , attributes(attributes) @@ -174,8 +175,10 @@ public: // Used to maintain a list of unused entries in the property storage. void clearDeletedOffsets(); bool hasDeletedOffset(); - unsigned getDeletedOffset(); - void addDeletedOffset(unsigned offset); + PropertyOffset getDeletedOffset(); + void addDeletedOffset(PropertyOffset); + + PropertyOffset nextOffset(JSType); // Copy this PropertyTable, ensuring the copy has at least the capacity provided. PassOwnPtr<PropertyTable> copy(JSGlobalData&, JSCell* owner, unsigned newCapacity); @@ -230,7 +233,7 @@ private: unsigned* m_index; unsigned m_keyCount; unsigned m_deletedCount; - OwnPtr< Vector<unsigned> > m_deletedOffsets; + OwnPtr< Vector<PropertyOffset> > m_deletedOffsets; static const unsigned MinimumTableSize = 16; static const unsigned EmptyEntryIndex = 0; @@ -264,9 +267,9 @@ inline PropertyTable::PropertyTable(JSGlobalData&, JSCell* owner, const Property } // Copy the m_deletedOffsets vector. - Vector<unsigned>* otherDeletedOffsets = other.m_deletedOffsets.get(); + Vector<PropertyOffset>* otherDeletedOffsets = other.m_deletedOffsets.get(); if (otherDeletedOffsets) - m_deletedOffsets = adoptPtr(new Vector<unsigned>(*otherDeletedOffsets)); + m_deletedOffsets = adoptPtr(new Vector<PropertyOffset>(*otherDeletedOffsets)); } inline PropertyTable::PropertyTable(JSGlobalData&, JSCell* owner, unsigned initialCapacity, const PropertyTable& other) @@ -288,9 +291,9 @@ inline PropertyTable::PropertyTable(JSGlobalData&, JSCell* owner, unsigned initi } // Copy the m_deletedOffsets vector. - Vector<unsigned>* otherDeletedOffsets = other.m_deletedOffsets.get(); + Vector<PropertyOffset>* otherDeletedOffsets = other.m_deletedOffsets.get(); if (otherDeletedOffsets) - m_deletedOffsets = adoptPtr(new Vector<unsigned>(*otherDeletedOffsets)); + m_deletedOffsets = adoptPtr(new Vector<PropertyOffset>(*otherDeletedOffsets)); } inline PropertyTable::~PropertyTable() @@ -469,20 +472,31 @@ inline bool PropertyTable::hasDeletedOffset() return m_deletedOffsets && !m_deletedOffsets->isEmpty(); } -inline unsigned PropertyTable::getDeletedOffset() +inline PropertyOffset PropertyTable::getDeletedOffset() { - unsigned offset = m_deletedOffsets->last(); + PropertyOffset offset = m_deletedOffsets->last(); m_deletedOffsets->removeLast(); return offset; } -inline void PropertyTable::addDeletedOffset(unsigned offset) +inline void PropertyTable::addDeletedOffset(PropertyOffset offset) { if (!m_deletedOffsets) - m_deletedOffsets = adoptPtr(new Vector<unsigned>); + m_deletedOffsets = adoptPtr(new Vector<PropertyOffset>); m_deletedOffsets->append(offset); } +inline PropertyOffset PropertyTable::nextOffset(JSType type) +{ + if (hasDeletedOffset()) + return getDeletedOffset(); + + if (type == FinalObjectType) + return size(); + + return size() + firstOutOfLineOffset; +} + inline PassOwnPtr<PropertyTable> PropertyTable::copy(JSGlobalData& globalData, JSCell* owner, unsigned newCapacity) { ASSERT(newCapacity >= m_keyCount); @@ -499,7 +513,7 @@ inline size_t PropertyTable::sizeInMemory() { size_t result = sizeof(PropertyTable) + dataSize(); if (m_deletedOffsets) - result += (m_deletedOffsets->capacity() * sizeof(unsigned)); + result += (m_deletedOffsets->capacity() * sizeof(PropertyOffset)); return result; } #endif diff --git a/Source/JavaScriptCore/runtime/PropertyOffset.h b/Source/JavaScriptCore/runtime/PropertyOffset.h new file mode 100644 index 000000000..c0d1316c4 --- /dev/null +++ b/Source/JavaScriptCore/runtime/PropertyOffset.h @@ -0,0 +1,163 @@ +/* + * Copyright (C) 2012 Apple Inc. All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * 1. Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * 2. Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in the + * documentation and/or other materials provided with the distribution. + * + * THIS SOFTWARE IS PROVIDED BY APPLE INC. ``AS IS'' AND ANY + * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE + * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR + * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR + * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, + * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, + * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR + * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY + * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + */ + +#ifndef PropertyOffset_h +#define PropertyOffset_h + +#include "JSType.h" +#include <wtf/Platform.h> +#include <wtf/StdLibExtras.h> +#include <wtf/UnusedParam.h> + +namespace JSC { + +#if USE(JSVALUE32_64) +#define INLINE_STORAGE_CAPACITY 6 +#else +#define INLINE_STORAGE_CAPACITY 4 +#endif + +typedef int PropertyOffset; + +static const PropertyOffset invalidOffset = -1; +static const PropertyOffset inlineStorageCapacity = INLINE_STORAGE_CAPACITY; +static const PropertyOffset firstOutOfLineOffset = inlineStorageCapacity; + +// Declare all of the functions because they tend to do forward calls. +inline void checkOffset(PropertyOffset); +inline void checkOffset(PropertyOffset, JSType); +inline void validateOffset(PropertyOffset); +inline void validateOffset(PropertyOffset, JSType); +inline bool isValidOffset(PropertyOffset); +inline bool isInlineOffset(PropertyOffset); +inline bool isOutOfLineOffset(PropertyOffset); +inline size_t offsetInInlineStorage(PropertyOffset); +inline size_t offsetInOutOfLineStorage(PropertyOffset); +inline size_t offsetInRespectiveStorage(PropertyOffset); +inline size_t numberOfOutOfLineSlotsForLastOffset(PropertyOffset); +inline size_t numberOfSlotsForLastOffset(PropertyOffset, JSType); +inline PropertyOffset nextPropertyOffsetFor(PropertyOffset, JSType); +inline PropertyOffset firstPropertyOffsetFor(JSType); + +inline void checkOffset(PropertyOffset offset) +{ + UNUSED_PARAM(offset); + ASSERT(offset >= invalidOffset); +} + +inline void checkOffset(PropertyOffset offset, JSType type) +{ + UNUSED_PARAM(offset); + UNUSED_PARAM(type); + ASSERT(offset >= invalidOffset); + ASSERT(offset == invalidOffset + || type == FinalObjectType + || isOutOfLineOffset(offset)); +} + +inline void validateOffset(PropertyOffset offset) +{ + checkOffset(offset); + ASSERT(isValidOffset(offset)); +} + +inline void validateOffset(PropertyOffset offset, JSType type) +{ + checkOffset(offset, type); + ASSERT(isValidOffset(offset)); +} + +inline bool isValidOffset(PropertyOffset offset) +{ + checkOffset(offset); + return offset != invalidOffset; +} + +inline bool isInlineOffset(PropertyOffset offset) +{ + checkOffset(offset); + return offset < inlineStorageCapacity; +} + +inline bool isOutOfLineOffset(PropertyOffset offset) +{ + checkOffset(offset); + return !isInlineOffset(offset); +} + +inline size_t offsetInInlineStorage(PropertyOffset offset) +{ + validateOffset(offset); + ASSERT(isInlineOffset(offset)); + return offset; +} + +inline size_t offsetInOutOfLineStorage(PropertyOffset offset) +{ + validateOffset(offset); + ASSERT(isOutOfLineOffset(offset)); + return offset - firstOutOfLineOffset; +} + +inline size_t offsetInRespectiveStorage(PropertyOffset offset) +{ + if (isInlineOffset(offset)) + return offsetInInlineStorage(offset); + return offsetInOutOfLineStorage(offset); +} + +inline size_t numberOfOutOfLineSlotsForLastOffset(PropertyOffset offset) +{ + checkOffset(offset); + if (offset < firstOutOfLineOffset) + return 0; + return offset - firstOutOfLineOffset + 1; +} + +inline size_t numberOfSlotsForLastOffset(PropertyOffset offset, JSType type) +{ + checkOffset(offset, type); + if (type == FinalObjectType) + return offset + 1; + return numberOfOutOfLineSlotsForLastOffset(offset); +} + +inline PropertyOffset nextPropertyOffsetFor(PropertyOffset offset, JSType type) +{ + checkOffset(offset, type); + if (type != FinalObjectType && offset == invalidOffset) + return firstOutOfLineOffset; + return offset + 1; +} + +inline PropertyOffset firstPropertyOffsetFor(JSType type) +{ + return nextPropertyOffsetFor(invalidOffset, type); +} + +} // namespace JSC + +#endif // PropertyOffset_h + diff --git a/Source/JavaScriptCore/runtime/PropertySlot.h b/Source/JavaScriptCore/runtime/PropertySlot.h index 131cf7a92..c673eaa50 100644 --- a/Source/JavaScriptCore/runtime/PropertySlot.h +++ b/Source/JavaScriptCore/runtime/PropertySlot.h @@ -23,6 +23,7 @@ #include "JSValue.h" #include "PropertyName.h" +#include "PropertyOffset.h" #include "Register.h" #include <wtf/Assertions.h> #include <wtf/NotFound.h> @@ -89,7 +90,7 @@ namespace JSC { CachedPropertyType cachedPropertyType() const { return m_cachedPropertyType; } bool isCacheable() const { return m_cachedPropertyType != Uncacheable; } bool isCacheableValue() const { return m_cachedPropertyType == Value; } - size_t cachedOffset() const + PropertyOffset cachedOffset() const { ASSERT(isCacheable()); return m_offset; @@ -104,7 +105,7 @@ namespace JSC { m_value = value; } - void setValue(JSValue slotBase, JSValue value, size_t offset) + void setValue(JSValue slotBase, JSValue value, PropertyOffset offset) { ASSERT(value); m_getValue = JSC_VALUE_MARKER; @@ -160,7 +161,7 @@ namespace JSC { m_data.getterFunc = getterFunc; } - void setCacheableGetterSlot(JSValue slotBase, JSObject* getterFunc, unsigned offset) + void setCacheableGetterSlot(JSValue slotBase, JSObject* getterFunc, PropertyOffset offset) { ASSERT(getterFunc); m_getValue = GETTER_FUNCTION_MARKER; @@ -206,7 +207,7 @@ namespace JSC { { // Clear offset even in release builds, in case this PropertySlot has been used before. // (For other data members, we don't need to clear anything because reuse would meaningfully overwrite them.) - m_offset = 0; + m_offset = invalidOffset; m_cachedPropertyType = Uncacheable; } @@ -232,7 +233,7 @@ namespace JSC { JSValue m_value; JSValue m_thisValue; - size_t m_offset; + PropertyOffset m_offset; CachedPropertyType m_cachedPropertyType; }; diff --git a/Source/JavaScriptCore/runtime/PutPropertySlot.h b/Source/JavaScriptCore/runtime/PutPropertySlot.h index 69d1f8bd2..0f694e33b 100644 --- a/Source/JavaScriptCore/runtime/PutPropertySlot.h +++ b/Source/JavaScriptCore/runtime/PutPropertySlot.h @@ -45,14 +45,14 @@ namespace JSC { { } - void setExistingProperty(JSObject* base, size_t offset) + void setExistingProperty(JSObject* base, PropertyOffset offset) { m_type = ExistingProperty; m_base = base; m_offset = offset; } - void setNewProperty(JSObject* base, size_t offset) + void setNewProperty(JSObject* base, PropertyOffset offset) { m_type = NewProperty; m_base = base; @@ -64,7 +64,8 @@ namespace JSC { bool isStrictMode() const { return m_isStrictMode; } bool isCacheable() const { return m_type != Uncachable; } - size_t cachedOffset() const { + PropertyOffset cachedOffset() const + { ASSERT(isCacheable()); return m_offset; } @@ -72,7 +73,7 @@ namespace JSC { private: Type m_type; JSObject* m_base; - size_t m_offset; + PropertyOffset m_offset; bool m_isStrictMode; }; diff --git a/Source/JavaScriptCore/runtime/Structure.cpp b/Source/JavaScriptCore/runtime/Structure.cpp index 569126147..509ff3d45 100644 --- a/Source/JavaScriptCore/runtime/Structure.cpp +++ b/Source/JavaScriptCore/runtime/Structure.cpp @@ -156,8 +156,8 @@ Structure::Structure(JSGlobalData& globalData, JSGlobalObject* globalObject, JSV , m_prototype(globalData, this, prototype) , m_classInfo(classInfo) , m_transitionWatchpointSet(InitializedWatching) - , m_propertyStorageCapacity(typeInfo.isFinalObject() ? JSFinalObject_inlineStorageCapacity : JSNonFinalObject_inlineStorageCapacity) - , m_offset(noOffset) + , m_outOfLineCapacity(0) + , m_offset(invalidOffset) , m_dictionaryKind(NoneDictionaryKind) , m_isPinnedPropertyTable(false) , m_hasGetterSetterProperties(false) @@ -179,8 +179,8 @@ Structure::Structure(JSGlobalData& globalData) , m_prototype(globalData, this, jsNull()) , m_classInfo(&s_info) , m_transitionWatchpointSet(InitializedWatching) - , m_propertyStorageCapacity(0) - , m_offset(noOffset) + , m_outOfLineCapacity(0) + , m_offset(invalidOffset) , m_dictionaryKind(NoneDictionaryKind) , m_isPinnedPropertyTable(false) , m_hasGetterSetterProperties(false) @@ -200,8 +200,8 @@ Structure::Structure(JSGlobalData& globalData, const Structure* previous) , m_prototype(globalData, this, previous->storedPrototype()) , m_classInfo(previous->m_classInfo) , m_transitionWatchpointSet(InitializedWatching) - , m_propertyStorageCapacity(previous->m_propertyStorageCapacity) - , m_offset(noOffset) + , m_outOfLineCapacity(previous->m_outOfLineCapacity) + , m_offset(invalidOffset) , m_dictionaryKind(previous->m_dictionaryKind) , m_isPinnedPropertyTable(false) , m_hasGetterSetterProperties(previous->m_hasGetterSetterProperties) @@ -239,7 +239,7 @@ void Structure::materializePropertyMap(JSGlobalData& globalData) ASSERT(structure->m_propertyTable); ASSERT(!structure->m_previous); - m_propertyTable = structure->m_propertyTable->copy(globalData, 0, m_offset + 1); + m_propertyTable = structure->m_propertyTable->copy(globalData, 0, numberOfSlotsForLastOffset(m_offset, m_typeInfo.type())); break; } @@ -247,7 +247,7 @@ void Structure::materializePropertyMap(JSGlobalData& globalData) } if (!m_propertyTable) - createPropertyMap(m_offset + 1); + createPropertyMap(numberOfSlotsForLastOffset(m_offset, m_typeInfo.type())); for (ptrdiff_t i = structures.size() - 2; i >= 0; --i) { structure = structures[i]; @@ -256,19 +256,21 @@ void Structure::materializePropertyMap(JSGlobalData& globalData) } } -void Structure::growPropertyStorageCapacity() +inline size_t nextOutOfLineStorageCapacity(size_t currentCapacity) { - if (isUsingInlineStorage()) - m_propertyStorageCapacity = JSObject::baseExternalStorageCapacity; - else - m_propertyStorageCapacity *= 2; + if (!currentCapacity) + return 4; + return currentCapacity * 2; } -size_t Structure::suggestedNewPropertyStorageSize() +void Structure::growOutOfLineCapacity() { - if (isUsingInlineStorage()) - return JSObject::baseExternalStorageCapacity; - return m_propertyStorageCapacity * 2; + m_outOfLineCapacity = nextOutOfLineStorageCapacity(m_outOfLineCapacity); +} + +size_t Structure::suggestedNewOutOfLineStorageCapacity() +{ + return nextOutOfLineStorageCapacity(m_outOfLineCapacity); } void Structure::despecifyDictionaryFunction(JSGlobalData& globalData, PropertyName propertyName) @@ -285,7 +287,7 @@ void Structure::despecifyDictionaryFunction(JSGlobalData& globalData, PropertyNa entry->specificValue.clear(); } -Structure* Structure::addPropertyTransitionToExistingStructure(Structure* structure, PropertyName propertyName, unsigned attributes, JSCell* specificValue, size_t& offset) +Structure* Structure::addPropertyTransitionToExistingStructure(Structure* structure, PropertyName propertyName, unsigned attributes, JSCell* specificValue, PropertyOffset& offset) { ASSERT(!structure->isDictionary()); ASSERT(structure->isObject()); @@ -294,7 +296,7 @@ Structure* Structure::addPropertyTransitionToExistingStructure(Structure* struct JSCell* specificValueInPrevious = existingTransition->m_specificValueInPrevious.get(); if (specificValueInPrevious && specificValueInPrevious != specificValue) return 0; - ASSERT(existingTransition->m_offset != noOffset); + validateOffset(existingTransition->m_offset, structure->m_typeInfo.type()); offset = existingTransition->m_offset; return existingTransition; } @@ -302,7 +304,7 @@ Structure* Structure::addPropertyTransitionToExistingStructure(Structure* struct return 0; } -Structure* Structure::addPropertyTransition(JSGlobalData& globalData, Structure* structure, PropertyName propertyName, unsigned attributes, JSCell* specificValue, size_t& offset) +Structure* Structure::addPropertyTransition(JSGlobalData& globalData, Structure* structure, PropertyName propertyName, unsigned attributes, JSCell* specificValue, PropertyOffset& offset) { // If we have a specific function, we may have got to this point if there is // already a transition with the correct property name and attributes, but @@ -325,8 +327,8 @@ Structure* Structure::addPropertyTransition(JSGlobalData& globalData, Structure* Structure* transition = toCacheableDictionaryTransition(globalData, structure); ASSERT(structure != transition); offset = transition->putSpecificValue(globalData, propertyName, attributes, specificValue); - if (transition->propertyStorageSize() > transition->propertyStorageCapacity()) - transition->growPropertyStorageCapacity(); + if (transition->outOfLineSize() > transition->outOfLineCapacity()) + transition->growOutOfLineCapacity(); return transition; } @@ -351,15 +353,15 @@ Structure* Structure::addPropertyTransition(JSGlobalData& globalData, Structure* } offset = transition->putSpecificValue(globalData, propertyName, attributes, specificValue); - if (transition->propertyStorageSize() > transition->propertyStorageCapacity()) - transition->growPropertyStorageCapacity(); + if (transition->outOfLineSize() > transition->outOfLineCapacity()) + transition->growOutOfLineCapacity(); transition->m_offset = offset; structure->m_transitionTable.add(globalData, transition); return transition; } -Structure* Structure::removePropertyTransition(JSGlobalData& globalData, Structure* structure, PropertyName propertyName, size_t& offset) +Structure* Structure::removePropertyTransition(JSGlobalData& globalData, Structure* structure, PropertyName propertyName, PropertyOffset& offset) { ASSERT(!structure->isUncacheableDictionary()); @@ -546,18 +548,19 @@ Structure* Structure::flattenDictionaryStructure(JSGlobalData& globalData, JSObj size_t propertyCount = m_propertyTable->size(); Vector<JSValue> values(propertyCount); - + unsigned i = 0; + PropertyOffset firstOffset = firstPropertyOffsetFor(m_typeInfo.type()); PropertyTable::iterator end = m_propertyTable->end(); for (PropertyTable::iterator iter = m_propertyTable->begin(); iter != end; ++iter, ++i) { values[i] = object->getDirectOffset(iter->offset); // Update property table to have the new property offsets - iter->offset = i; + iter->offset = i + firstOffset; } // Copy the original property values into their final locations for (unsigned i = 0; i < propertyCount; i++) - object->putDirectOffset(globalData, i, values[i]); + object->putDirectOffset(globalData, firstOffset + i, values[i]); m_propertyTable->clearDeletedOffsets(); } @@ -566,7 +569,7 @@ Structure* Structure::flattenDictionaryStructure(JSGlobalData& globalData, JSObj return this; } -size_t Structure::addPropertyWithoutTransition(JSGlobalData& globalData, PropertyName propertyName, unsigned attributes, JSCell* specificValue) +PropertyOffset Structure::addPropertyWithoutTransition(JSGlobalData& globalData, PropertyName propertyName, unsigned attributes, JSCell* specificValue) { ASSERT(!m_enumerationCache); @@ -577,13 +580,13 @@ size_t Structure::addPropertyWithoutTransition(JSGlobalData& globalData, Propert pin(); - size_t offset = putSpecificValue(globalData, propertyName, attributes, specificValue); - if (propertyStorageSize() > propertyStorageCapacity()) - growPropertyStorageCapacity(); + PropertyOffset offset = putSpecificValue(globalData, propertyName, attributes, specificValue); + if (outOfLineSize() > outOfLineCapacity()) + growOutOfLineCapacity(); return offset; } -size_t Structure::removePropertyWithoutTransition(JSGlobalData& globalData, PropertyName propertyName) +PropertyOffset Structure::removePropertyWithoutTransition(JSGlobalData& globalData, PropertyName propertyName) { ASSERT(isUncacheableDictionary()); ASSERT(!m_enumerationCache); @@ -591,8 +594,7 @@ size_t Structure::removePropertyWithoutTransition(JSGlobalData& globalData, Prop materializePropertyMapIfNecessaryForPinning(globalData); pin(); - size_t offset = remove(propertyName); - return offset; + return remove(propertyName); } void Structure::pin() @@ -637,20 +639,20 @@ PassOwnPtr<PropertyTable> Structure::copyPropertyTable(JSGlobalData& globalData, PassOwnPtr<PropertyTable> Structure::copyPropertyTableForPinning(JSGlobalData& globalData, Structure* owner) { - return adoptPtr(m_propertyTable ? new PropertyTable(globalData, owner, *m_propertyTable) : new PropertyTable(m_offset == noOffset ? 0 : m_offset)); + return adoptPtr(m_propertyTable ? new PropertyTable(globalData, owner, *m_propertyTable) : new PropertyTable(numberOfSlotsForLastOffset(m_offset, m_typeInfo.type()))); } -size_t Structure::get(JSGlobalData& globalData, PropertyName propertyName, unsigned& attributes, JSCell*& specificValue) +PropertyOffset Structure::get(JSGlobalData& globalData, PropertyName propertyName, unsigned& attributes, JSCell*& specificValue) { ASSERT(structure()->classInfo() == &s_info); materializePropertyMapIfNecessary(globalData); if (!m_propertyTable) - return WTF::notFound; + return invalidOffset; PropertyMapEntry* entry = m_propertyTable->find(propertyName.uid()).first; if (!entry) - return WTF::notFound; + return invalidOffset; attributes = entry->attributes; specificValue = entry->specificValue.get(); @@ -683,9 +685,9 @@ void Structure::despecifyAllFunctions(JSGlobalData& globalData) iter->specificValue.clear(); } -size_t Structure::putSpecificValue(JSGlobalData& globalData, PropertyName propertyName, unsigned attributes, JSCell* specificValue) +PropertyOffset Structure::putSpecificValue(JSGlobalData& globalData, PropertyName propertyName, unsigned attributes, JSCell* specificValue) { - ASSERT(get(globalData, propertyName) == notFound); + ASSERT(!JSC::isValidOffset(get(globalData, propertyName))); checkConsistency(); if (attributes & DontEnum) @@ -696,12 +698,7 @@ size_t Structure::putSpecificValue(JSGlobalData& globalData, PropertyName proper if (!m_propertyTable) createPropertyMap(); - unsigned newOffset; - - if (m_propertyTable->hasDeletedOffset()) - newOffset = m_propertyTable->getDeletedOffset(); - else - newOffset = m_propertyTable->size(); + PropertyOffset newOffset = m_propertyTable->nextOffset(m_typeInfo.type()); m_propertyTable->add(PropertyMapEntry(globalData, this, rep, newOffset, attributes, specificValue)); @@ -709,20 +706,20 @@ size_t Structure::putSpecificValue(JSGlobalData& globalData, PropertyName proper return newOffset; } -size_t Structure::remove(PropertyName propertyName) +PropertyOffset Structure::remove(PropertyName propertyName) { checkConsistency(); StringImpl* rep = propertyName.uid(); if (!m_propertyTable) - return notFound; + return invalidOffset; PropertyTable::find_iterator position = m_propertyTable->find(rep); if (!position.first) - return notFound; + return invalidOffset; - size_t offset = position.first->offset; + PropertyOffset offset = position.first->offset; m_propertyTable->remove(position); m_propertyTable->addDeletedOffset(offset); diff --git a/Source/JavaScriptCore/runtime/Structure.h b/Source/JavaScriptCore/runtime/Structure.h index 448a81c27..d2d025b98 100644 --- a/Source/JavaScriptCore/runtime/Structure.h +++ b/Source/JavaScriptCore/runtime/Structure.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2008, 2009 Apple Inc. All rights reserved. + * Copyright (C) 2008, 2009, 2012 Apple Inc. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions @@ -87,9 +87,9 @@ namespace JSC { public: static void dumpStatistics(); - JS_EXPORT_PRIVATE static Structure* addPropertyTransition(JSGlobalData&, Structure*, PropertyName, unsigned attributes, JSCell* specificValue, size_t& offset); - JS_EXPORT_PRIVATE static Structure* addPropertyTransitionToExistingStructure(Structure*, PropertyName, unsigned attributes, JSCell* specificValue, size_t& offset); - static Structure* removePropertyTransition(JSGlobalData&, Structure*, PropertyName, size_t& offset); + JS_EXPORT_PRIVATE static Structure* addPropertyTransition(JSGlobalData&, Structure*, PropertyName, unsigned attributes, JSCell* specificValue, PropertyOffset&); + JS_EXPORT_PRIVATE static Structure* addPropertyTransitionToExistingStructure(Structure*, PropertyName, unsigned attributes, JSCell* specificValue, PropertyOffset&); + static Structure* removePropertyTransition(JSGlobalData&, Structure*, PropertyName, PropertyOffset&); JS_EXPORT_PRIVATE static Structure* changePrototypeTransition(JSGlobalData&, Structure*, JSValue prototype); JS_EXPORT_PRIVATE static Structure* despecifyFunctionTransition(JSGlobalData&, Structure*, PropertyName); static Structure* attributeChangeTransition(JSGlobalData&, Structure*, PropertyName, unsigned attributes); @@ -103,16 +103,32 @@ namespace JSC { bool isFrozen(JSGlobalData&); bool isExtensible() const { return !m_preventExtensions; } bool didTransition() const { return m_didTransition; } - bool shouldGrowPropertyStorage() { return propertyStorageCapacity() == propertyStorageSize(); } - JS_EXPORT_PRIVATE size_t suggestedNewPropertyStorageSize(); + bool putWillGrowOutOfLineStorage() + { + ASSERT(outOfLineCapacity() >= outOfLineSize()); + + if (!m_propertyTable) { + unsigned currentSize = numberOfOutOfLineSlotsForLastOffset(m_offset); + ASSERT(outOfLineCapacity() >= currentSize); + return currentSize == outOfLineCapacity(); + } + + ASSERT(totalStorageCapacity() >= m_propertyTable->propertyStorageSize()); + if (m_propertyTable->hasDeletedOffset()) + return false; + + ASSERT(totalStorageCapacity() >= m_propertyTable->size()); + return m_propertyTable->size() == totalStorageCapacity(); + } + JS_EXPORT_PRIVATE size_t suggestedNewOutOfLineStorageCapacity(); Structure* flattenDictionaryStructure(JSGlobalData&, JSObject*); static void destroy(JSCell*); // These should be used with caution. - JS_EXPORT_PRIVATE size_t addPropertyWithoutTransition(JSGlobalData&, PropertyName, unsigned attributes, JSCell* specificValue); - size_t removePropertyWithoutTransition(JSGlobalData&, PropertyName); + JS_EXPORT_PRIVATE PropertyOffset addPropertyWithoutTransition(JSGlobalData&, PropertyName, unsigned attributes, JSCell* specificValue); + PropertyOffset removePropertyWithoutTransition(JSGlobalData&, PropertyName); void setPrototypeWithoutTransition(JSGlobalData& globalData, JSValue prototype) { m_prototype.set(globalData, this, prototype); } bool isDictionary() const { return m_dictionaryKind != NoneDictionaryKind; } @@ -133,17 +149,114 @@ namespace JSC { StructureChain* prototypeChain(ExecState*) const; static void visitChildren(JSCell*, SlotVisitor&); - Structure* previousID() const { ASSERT(structure()->classInfo() == &s_info); return m_previous.get(); } + Structure* previousID() const + { + ASSERT(structure()->classInfo() == &s_info); + return m_previous.get(); + } bool transitivelyTransitionedFrom(Structure* structureToFind); - void growPropertyStorageCapacity(); - unsigned propertyStorageCapacity() const { ASSERT(structure()->classInfo() == &s_info); return m_propertyStorageCapacity; } - unsigned propertyStorageSize() const { ASSERT(structure()->classInfo() == &s_info); return (m_propertyTable ? m_propertyTable->propertyStorageSize() : static_cast<unsigned>(m_offset + 1)); } - bool isUsingInlineStorage() const; + void growOutOfLineCapacity(); + unsigned outOfLineCapacity() const + { + ASSERT(structure()->classInfo() == &s_info); + return m_outOfLineCapacity; + } + unsigned outOfLineSizeForKnownFinalObject() const + { + ASSERT(m_typeInfo.type() == FinalObjectType); + if (m_propertyTable) { + unsigned totalSize = m_propertyTable->propertyStorageSize(); + if (totalSize < static_cast<unsigned>(inlineStorageCapacity)) + return 0; + return totalSize - inlineStorageCapacity; + } + return numberOfOutOfLineSlotsForLastOffset(m_offset); + } + unsigned outOfLineSizeForKnownNonFinalObject() const + { + ASSERT(m_typeInfo.type() != FinalObjectType); + if (m_propertyTable) + return m_propertyTable->propertyStorageSize(); + return numberOfOutOfLineSlotsForLastOffset(m_offset); + } + unsigned outOfLineSize() const + { + ASSERT(structure()->classInfo() == &s_info); + if (m_propertyTable) { + unsigned totalSize = m_propertyTable->propertyStorageSize(); + unsigned inlineCapacity = this->inlineCapacity(); + if (totalSize < inlineCapacity) + return 0; + return totalSize - inlineCapacity; + } + return numberOfOutOfLineSlotsForLastOffset(m_offset); + } + bool hasInlineStorage() const + { + return m_typeInfo.type() == FinalObjectType; + } + unsigned inlineCapacity() const + { + if (hasInlineStorage()) + return inlineStorageCapacity; + return 0; + } + unsigned inlineSizeForKnownFinalObject() const + { + ASSERT(m_typeInfo.type() == FinalObjectType); + unsigned result; + if (m_propertyTable) + result = m_propertyTable->propertyStorageSize(); + else + result = m_offset + 1; + if (result > static_cast<unsigned>(inlineStorageCapacity)) + return inlineStorageCapacity; + return result; + } + unsigned inlineSize() const + { + if (!hasInlineStorage()) + return 0; + return inlineSizeForKnownFinalObject(); + } + unsigned totalStorageSize() const + { + if (m_propertyTable) + return m_propertyTable->propertyStorageSize(); + return numberOfSlotsForLastOffset(m_offset, m_typeInfo.type()); + } + unsigned totalStorageCapacity() const + { + ASSERT(structure()->classInfo() == &s_info); + return m_outOfLineCapacity + inlineCapacity(); + } + + PropertyOffset firstValidOffset() const + { + if (hasInlineStorage()) + return 0; + return inlineStorageCapacity; + } + PropertyOffset lastValidOffset() const + { + if (m_propertyTable) { + PropertyOffset size = m_propertyTable->propertyStorageSize(); + if (!hasInlineStorage()) + size += inlineStorageCapacity; + return size - 1; + } + return m_offset; + } + bool isValidOffset(PropertyOffset offset) const + { + return offset >= firstValidOffset() + && offset <= lastValidOffset(); + } - size_t get(JSGlobalData&, PropertyName); - size_t get(JSGlobalData&, const UString& name); - JS_EXPORT_PRIVATE size_t get(JSGlobalData&, PropertyName, unsigned& attributes, JSCell*& specificValue); + PropertyOffset get(JSGlobalData&, PropertyName); + PropertyOffset get(JSGlobalData&, const UString& name); + JS_EXPORT_PRIVATE PropertyOffset get(JSGlobalData&, PropertyName, unsigned& attributes, JSCell*& specificValue); bool hasGetterSetterProperties() const { return m_hasGetterSetterProperties; } bool hasReadOnlyOrGetterSetterPropertiesExcludingProto() const { return m_hasReadOnlyOrGetterSetterPropertiesExcludingProto; } @@ -160,7 +273,12 @@ namespace JSC { bool hasNonEnumerableProperties() const { return m_hasNonEnumerableProperties; } - bool isEmpty() const { return m_propertyTable ? m_propertyTable->isEmpty() : m_offset == noOffset; } + bool isEmpty() const + { + if (m_propertyTable) + return m_propertyTable->isEmpty(); + return !JSC::isValidOffset(m_offset); + } JS_EXPORT_PRIVATE void despecifyDictionaryFunction(JSGlobalData&, PropertyName); void disableSpecificFunctionTracking() { m_specificFunctionThrashCount = maxSpecificFunctionThrashCount; } @@ -256,8 +374,8 @@ namespace JSC { } DictionaryKind; static Structure* toDictionaryTransition(JSGlobalData&, Structure*, DictionaryKind); - size_t putSpecificValue(JSGlobalData&, PropertyName, unsigned attributes, JSCell* specificValue); - size_t remove(PropertyName); + PropertyOffset putSpecificValue(JSGlobalData&, PropertyName, unsigned attributes, JSCell* specificValue); + PropertyOffset remove(PropertyName); void createPropertyMap(unsigned keyCount = 0); void checkConsistency(); @@ -284,7 +402,7 @@ namespace JSC { int transitionCount() const { // Since the number of transitions is always the same as m_offset, we keep the size of Structure down by not storing both. - return m_offset == noOffset ? 0 : m_offset + 1; + return numberOfSlotsForLastOffset(m_offset, m_typeInfo.type()); } bool isValid(ExecState*, StructureChain* cachedPrototypeChain) const; @@ -293,8 +411,6 @@ namespace JSC { static const int s_maxTransitionLength = 64; - static const int noOffset = -1; - static const unsigned maxSpecificFunctionThrashCount = 3; TypeInfo m_typeInfo; @@ -319,10 +435,10 @@ namespace JSC { mutable InlineWatchpointSet m_transitionWatchpointSet; - uint32_t m_propertyStorageCapacity; + uint32_t m_outOfLineCapacity; // m_offset does not account for anonymous slots - int m_offset; + PropertyOffset m_offset; unsigned m_dictionaryKind : 2; bool m_isPinnedPropertyTable : 1; @@ -336,26 +452,26 @@ namespace JSC { unsigned m_staticFunctionReified; }; - inline size_t Structure::get(JSGlobalData& globalData, PropertyName propertyName) + inline PropertyOffset Structure::get(JSGlobalData& globalData, PropertyName propertyName) { ASSERT(structure()->classInfo() == &s_info); materializePropertyMapIfNecessary(globalData); if (!m_propertyTable) - return notFound; + return invalidOffset; PropertyMapEntry* entry = m_propertyTable->find(propertyName.uid()).first; - return entry ? entry->offset : notFound; + return entry ? entry->offset : invalidOffset; } - inline size_t Structure::get(JSGlobalData& globalData, const UString& name) + inline PropertyOffset Structure::get(JSGlobalData& globalData, const UString& name) { ASSERT(structure()->classInfo() == &s_info); materializePropertyMapIfNecessary(globalData); if (!m_propertyTable) - return notFound; + return invalidOffset; PropertyMapEntry* entry = m_propertyTable->findWithString(name.impl()).first; - return entry ? entry->offset : notFound; + return entry ? entry->offset : invalidOffset; } inline JSValue JSValue::structureOrUndefined() const diff --git a/Source/JavaScriptCore/runtime/WeakGCMap.h b/Source/JavaScriptCore/runtime/WeakGCMap.h index 9e8db4d60..6926165a7 100644 --- a/Source/JavaScriptCore/runtime/WeakGCMap.h +++ b/Source/JavaScriptCore/runtime/WeakGCMap.h @@ -75,8 +75,9 @@ public: return HandleTypes<MappedType>::getFromSlot(const_cast<JSValue*>(&impl->jsValue())); } - void set(JSGlobalData&, const KeyType& key, ExternalType value) + void set(JSGlobalData& globalData, const KeyType& key, ExternalType value) { + ASSERT_UNUSED(globalData, globalData.apiLock().currentThreadIsHoldingLock()); typename MapType::AddResult result = m_map.add(key, 0); if (!result.isNewEntry) WeakSet::deallocate(result.iterator->second); diff --git a/Source/JavaScriptCore/testRegExp.cpp b/Source/JavaScriptCore/testRegExp.cpp index 6899ac284..61c21c31b 100644 --- a/Source/JavaScriptCore/testRegExp.cpp +++ b/Source/JavaScriptCore/testRegExp.cpp @@ -495,9 +495,8 @@ static void parseArguments(int argc, char** argv, CommandLine& options) int realMain(int argc, char** argv) { - JSLock lock(SilenceAssertionsOnly); - RefPtr<JSGlobalData> globalData = JSGlobalData::create(ThreadStackTypeLarge, LargeHeap); + JSLockHolder lock(globalData.get()); CommandLine options; parseArguments(argc, argv, options); diff --git a/Source/JavaScriptCore/tests/mozilla/jsDriver.pl b/Source/JavaScriptCore/tests/mozilla/jsDriver.pl index d1c18ce66..4a774cefd 100644 --- a/Source/JavaScriptCore/tests/mozilla/jsDriver.pl +++ b/Source/JavaScriptCore/tests/mozilla/jsDriver.pl @@ -164,6 +164,7 @@ sub execute_tests { my $failure_lines; my $bug_number; my $status_lines; + my @jsc_exit_code; # user selected [Q]uit from ^C handler. if ($user_exit) { @@ -177,7 +178,20 @@ sub execute_tests { $shell_command = $opt_arch . " "; $shell_command .= &xp_path($engine_command) . " -s "; - + +# FIXME: <https://bugs.webkit.org/show_bug.cgi?id=90119> +# Sporadically on Windows, the exit code returned after close() in $? +# is 126 (after the appropraite shifting, even though jsc exits with +# 0 or 3). To work around this, a -x option was added to jsc that will +# output the exit value right before exiting. We parse that value and +# remove it from the output stream before comparing the actual and expected +# outputs. When that bug is found and fixed, the code for processing of +# "jsc exiting [\d]" and use of @jsc_exit_code can be removed along with +# the -x option in jsc.cpp + if ($os_type eq "WIN") { + $shell_command .= " -x "; + } + $path = &xp_path($opt_suite_path . $suite . "/shell.js"); if (-f $path) { $shell_command .= $file_param . $path; @@ -202,10 +216,19 @@ sub execute_tests { $redirect_command . " |"); @output = <OUTPUT>; close (OUTPUT); - - @output = grep (!/js\>/, @output); - - if ($opt_exit_munge == 1) { + + @jsc_exit_code = grep (/jsc exiting [\d]/, @output); + @output = grep (!/js\>|jsc exiting [\d]/, @output); + + if (($#jsc_exit_code == 0) && ($jsc_exit_code[0] =~ /jsc exiting ([\d])\W*/)) { +# return value from jsc output to work around windows bug + $got_exit = $1; + if ($opt_exit_munge == 1) { + $exit_signal = ($? & 255); + } else { + $exit_signal = 0; + } + } elsif ($opt_exit_munge == 1) { # signal information in the lower 8 bits, exit code above that $got_exit = ($? >> 8); $exit_signal = ($? & 255); diff --git a/Source/JavaScriptCore/wscript b/Source/JavaScriptCore/wscript index 58696d9c5..4afb4d26a 100644 --- a/Source/JavaScriptCore/wscript +++ b/Source/JavaScriptCore/wscript @@ -66,7 +66,7 @@ def build(bld): features = 'cc cxx cshlib', includes = '. .. assembler ../WTF ' + ' '.join(includes), source = sources, - defines = ['BUILDING_JavaScriptCore', 'STATICALLY_LINKED_WITH_WTF'], + defines = ['BUILDING_JavaScriptCore'], target = 'jscore', uselib = 'WX ICU ' + get_config(), uselib_local = '', |